/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.19 - (hide annotations)
Thu Jun 22 12:55:19 2000 UTC (13 years, 9 months ago) by dtc
Branch: MAIN
Changes since 1.18: +3 -1 lines
File MIME type: text/plain
Flush a verbose debug message that occurred rarely during code fixups.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 dtc 1.19 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.19 2000/06/22 12:55:19 dtc Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15     #include <signal.h>
16     #include "lisp.h"
17     #include "internals.h"
18     #include "os.h"
19     #include "globals.h"
20     #include "interrupt.h"
21     #include "validate.h"
22     #include "lispregs.h"
23    
24     #include "gencgc.h"
25    
26     #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
27     __FILE__, __LINE__)
28    
29 dtc 1.3 #if 0
30 dtc 1.1 #define gc_assert(ex) do { \
31     if (!(ex)) gc_abort(); \
32     } while (0)
33     #else
34     #define gc_assert(ex)
35     #endif
36    
37    
38 dtc 1.14 /*
39     * The number of generations, an extra is added to this for use as a temp.
40     */
41 dtc 1.1 #define NUM_GENERATIONS 6
42    
43     /* Debugging variables. */
44    
45 dtc 1.14 /*
46     * The verbose level. All non-error messages are disabled at level 0;
47     * and only a few rare messages are printed at level 1.
48     */
49 dtc 1.6 unsigned gencgc_verbose = 0;
50 dtc 1.1
51 dtc 1.14 /*
52     * To enable the use of page protection to help avoid the scavenging
53     * of pages that don't have pointers to younger generations.
54     */
55 dtc 1.1 boolean enable_page_protection = TRUE;
56    
57 dtc 1.14 /*
58     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
59     * Set to NUM_GENERATIONS to disable.
60     */
61 dtc 1.3 int verify_gens = NUM_GENERATIONS;
62 dtc 1.1
63 dtc 1.14 /*
64     * Enable a pre-scan verify of generation 0 before it's GCed.
65     */
66 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
67    
68 dtc 1.3 /*
69 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
70 dtc 1.3 */
71     boolean verify_after_free_heap = FALSE;
72    
73 dtc 1.14 /*
74     * Enable the printing of a note when code objects are found in the
75     * dynamic space during a heap verify.
76     */
77 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
78    
79 dtc 1.14 /*
80     * Enable the checking of code objects for fixup errors after they are
81     * transported.
82     */
83 dtc 1.3 boolean check_code_fixups = FALSE;
84 dtc 1.1
85 dtc 1.14 /*
86     * To enable unmapping of a page and re-mmaping it to have it zero filled.
87     * Note: this can waste a lot of swap on FreeBSD so don't unmap.
88     */
89 dtc 1.1 #if defined(__FreeBSD__)
90     boolean gencgc_unmap_zero = FALSE;
91     #else
92     boolean gencgc_unmap_zero = TRUE;
93     #endif
94    
95 dtc 1.14 /*
96     * Enable checking that newly allocated regions are zero filled.
97     */
98 dtc 1.1 boolean gencgc_zero_check = FALSE;
99    
100 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
101    
102 dtc 1.3 /*
103     * Enable checking that free pages are zero filled during gc_free_heap
104     * called after purify.
105     */
106     boolean gencgc_zero_check_during_free_heap = FALSE;
107    
108 dtc 1.14 /*
109     * The minimum size for a large object.
110     */
111 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
112 dtc 1.1
113 dtc 1.14 /*
114     * Enable the filtering of stack/register pointers. This could reduce
115     * the number of invalid pointers accepted. It will probably degrades
116     * interrupt safety during object initialisation.
117     */
118 dtc 1.1 boolean enable_pointer_filter = TRUE;
119    
120    
121 dtc 1.14 /*
122     * The total bytes allocated. Seen by (dynamic-usage)
123     */
124 dtc 1.1 unsigned long bytes_allocated = 0;
125     static unsigned long auto_gc_trigger = 0;
126    
127 dtc 1.14 /*
128     * The src. and dest. generations. Set before a GC starts scavenging.
129     */
130 dtc 1.1 static int from_space;
131     static int new_space;
132    
133    
134 dtc 1.14 /*
135     * GC structures and variables.
136     */
137 dtc 1.1
138 dtc 1.14 /*
139     * An array of page structures is statically allocated.
140     * This helps quickly map between an address its page structure.
141     * NUM_PAGES is set from the size of the dynamic space.
142     */
143 dtc 1.1 struct page page_table[NUM_PAGES];
144    
145 dtc 1.14 /*
146     * Heap base, needed for mapping addresses to page structures.
147     */
148 dtc 1.1 static void *heap_base = NULL;
149    
150 dtc 1.14 /*
151     * Calculate the start address for the given page number.
152     */
153     inline void *page_address(int page_num)
154 dtc 1.1 {
155 dtc 1.17 return heap_base + PAGE_SIZE * page_num;
156 dtc 1.1 }
157    
158 dtc 1.14 /*
159     * Find the page index within the page_table for the given address.
160     * Returns -1 on failure.
161     */
162     inline int find_page_index(void *addr)
163 dtc 1.1 {
164     int index = addr-heap_base;
165    
166     if (index >= 0) {
167 dtc 1.17 index = (unsigned int) index / PAGE_SIZE;
168 dtc 1.1 if (index < NUM_PAGES)
169 dtc 1.14 return index;
170 dtc 1.1 }
171    
172 dtc 1.14 return -1;
173 dtc 1.1 }
174    
175    
176 dtc 1.14 /*
177     * A structure to hold the state of a generation.
178     */
179 dtc 1.1 struct generation {
180    
181     /* The first page that gc_alloc checks on its next call. */
182     int alloc_start_page;
183    
184     /* The first page that gc_alloc_unboxed checks on its next call. */
185     int alloc_unboxed_start_page;
186    
187 dtc 1.14 /*
188     * The first page that gc_alloc_large (boxed) considers on its next call.
189     * Although it always allocates after the boxed_region.
190     */
191 dtc 1.1 int alloc_large_start_page;
192    
193 dtc 1.14 /*
194     * The first page that gc_alloc_large (unboxed) considers on its next call.
195     * Although it always allocates after the current_unboxed_region.
196     */
197 dtc 1.1 int alloc_large_unboxed_start_page;
198    
199     /* The bytes allocate to this generation. */
200     int bytes_allocated;
201    
202     /* The number of bytes at which to trigger a GC */
203     int gc_trigger;
204    
205     /* To calculate a new level for gc_trigger */
206     int bytes_consed_between_gc;
207    
208     /* The number of GCs since the last raise. */
209     int num_gc;
210    
211 dtc 1.14 /*
212     * The average age at after which a GC will raise objects to the
213     * next generation.
214     */
215 dtc 1.1 int trigger_age;
216    
217 dtc 1.14 /*
218     * The cumulative sum of the bytes allocated to this generation. It
219     * is cleared after a GC on this generations, and update before new
220     * objects are added from a GC of a younger generation. Dividing by
221     * the bytes_allocated will give the average age of the memory in
222     * this generation since its last GC.
223     */
224 dtc 1.1 int cum_sum_bytes_allocated;
225    
226 dtc 1.14 /*
227     * A minimum average memory age before a GC will occur helps prevent
228     * a GC when a large number of new live objects have been added, in
229     * which case a GC could be a waste of time.
230     */
231 dtc 1.1 double min_av_mem_age;
232     };
233    
234 dtc 1.14 /*
235     * An array of generation structures. There needs to be one more
236     * generation structure than actual generations as the oldest
237     * generations is temporarily raised then lowered.
238     */
239 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
240 dtc 1.1
241 dtc 1.14 /*
242     * The oldest generation that will currently be GCed by default.
243     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
244     *
245     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
246     *
247     * Setting this to 0 effectively disables the generational nature of
248     * the GC. In some applications generational GC may not be useful
249     * because there are no long-lived objects.
250     *
251     * An intermediate value could be handy after moving long-lived data
252     * into an older generation so an unnecessary GC of this long-lived
253     * data can be avoided.
254     */
255     unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
256 dtc 1.1
257    
258 dtc 1.14 /*
259     * The maximum free page in the heap is maintained and used to update
260     * ALLOCATION_POINTER which is used by the room function to limit its
261     * search of the heap. XX Gencgc obviously needs to be better
262     * integrated with the lisp code.
263     */
264 dtc 1.1 static int last_free_page;
265    
266    
267    
268 dtc 1.14 /*
269     * Misc. heap functions.
270     */
271 dtc 1.1
272 dtc 1.14 /*
273     * Count the number of write protected pages within the given generation.
274     */
275     static int count_write_protect_generation_pages(int generation)
276 dtc 1.1 {
277     int i;
278     int cnt = 0;
279 dtc 1.15 int mmask, mflags;
280    
281     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
282     | PAGE_GENERATION_MASK;
283     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
284 dtc 1.14
285 dtc 1.1 for (i = 0; i < last_free_page; i++)
286 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
287 dtc 1.1 cnt++;
288 dtc 1.14 return cnt;
289 dtc 1.1 }
290    
291 dtc 1.14 /*
292     * Count the number of pages within the given generation.
293     */
294     static int count_generation_pages(int generation)
295 dtc 1.1 {
296     int i;
297     int cnt = 0;
298 dtc 1.15 int mmask, mflags;
299    
300     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
301     mflags = PAGE_ALLOCATED_MASK | generation;
302 dtc 1.14
303 dtc 1.1 for (i = 0; i < last_free_page; i++)
304 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
305 dtc 1.1 cnt++;
306 dtc 1.14 return cnt;
307 dtc 1.1 }
308    
309 dtc 1.14 /*
310     * Count the number of dont_move pages.
311     */
312     static int count_dont_move_pages(void)
313 dtc 1.1 {
314     int i;
315     int cnt = 0;
316 dtc 1.15 int mmask;
317    
318     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
319 dtc 1.14
320 dtc 1.1 for (i = 0; i < last_free_page; i++)
321 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mmask)
322 dtc 1.1 cnt++;
323 dtc 1.14 return cnt;
324 dtc 1.1 }
325    
326 dtc 1.14 /*
327     * Work through the pages and add up the number of bytes used for the
328     * given generation.
329     */
330     static int generation_bytes_allocated (int generation)
331 dtc 1.1 {
332     int i;
333     int bytes_allocated = 0;
334 dtc 1.15 int mmask, mflags;
335    
336     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
337     mflags = PAGE_ALLOCATED_MASK | generation;
338 dtc 1.14
339 dtc 1.1 for (i = 0; i < last_free_page; i++) {
340 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
341 dtc 1.1 bytes_allocated += page_table[i].bytes_used;
342     }
343 dtc 1.14 return bytes_allocated;
344 dtc 1.1 }
345    
346 dtc 1.14 /*
347     * Return the average age of the memory in a generation.
348     */
349     static double gen_av_mem_age(int gen)
350 dtc 1.1 {
351     if (generations[gen].bytes_allocated == 0)
352 dtc 1.14 return 0.0;
353    
354     return (double) generations[gen].cum_sum_bytes_allocated /
355     (double) generations[gen].bytes_allocated;
356 dtc 1.1 }
357    
358 dtc 1.14 /*
359     * The verbose argument controls how much to print out:
360     * 0 for normal level of detail; 1 for debugging.
361     */
362     static void print_generation_stats(int verbose)
363 dtc 1.1 {
364     int i, gens;
365 dtc 1.11 int fpu_state[27];
366    
367 dtc 1.14 /*
368     * This code uses the FP instructions which may be setup for Lisp so
369     * they need to the saved and reset for C.
370     */
371 dtc 1.11 fpu_save(fpu_state);
372 dtc 1.1
373     /* Number of generations to print out. */
374     if (verbose)
375 dtc 1.14 gens = NUM_GENERATIONS + 1;
376 dtc 1.1 else
377     gens = NUM_GENERATIONS;
378    
379     /* Print the heap stats */
380 dtc 1.14 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
381 dtc 1.1
382     for (i = 0; i < gens; i++) {
383     int j;
384     int boxed_cnt = 0;
385     int unboxed_cnt = 0;
386     int large_boxed_cnt = 0;
387     int large_unboxed_cnt = 0;
388 dtc 1.14
389 dtc 1.15 for (j = 0; j < last_free_page; j++) {
390     int flags = page_table[j].flags;
391     if ((flags & PAGE_GENERATION_MASK) == i) {
392     if (flags & PAGE_ALLOCATED_MASK) {
393     /*
394     * Count the number of boxed and unboxed pages within the
395     * given generation.
396     */
397     if (flags & PAGE_UNBOXED_MASK)
398     if (flags & PAGE_LARGE_OBJECT_MASK)
399     large_unboxed_cnt++;
400     else
401     unboxed_cnt++;
402 dtc 1.1 else
403 dtc 1.15 if (flags & PAGE_LARGE_OBJECT_MASK)
404     large_boxed_cnt++;
405     else
406     boxed_cnt++;
407     }
408 dtc 1.1 }
409 dtc 1.15 }
410 dtc 1.14
411 dtc 1.1 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
412 dtc 1.14 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4lf\n",
413     i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
414 dtc 1.1 generations[i].bytes_allocated,
415 dtc 1.17 PAGE_SIZE * count_generation_pages(i) -
416     generations[i].bytes_allocated,
417 dtc 1.1 generations[i].gc_trigger,
418     count_write_protect_generation_pages(i),
419     generations[i].num_gc,
420     gen_av_mem_age(i));
421     }
422 dtc 1.14 fprintf(stderr, " Total bytes alloc=%d\n", bytes_allocated);
423 dtc 1.11
424     fpu_restore(fpu_state);
425 dtc 1.1 }
426    
427    
428    
429 dtc 1.14 /*
430     * Allocation routines.
431     *
432     *
433     * To support quick and inline allocation, regions of memory can be
434     * allocated and then allocated from with just a free pointer and a
435     * check against an end address.
436     *
437     * Since objects can be allocated to spaces with different properties
438     * e.g. boxed/unboxed, generation, ages; there may need to be many
439     * allocation regions.
440     *
441     * Each allocation region may be start within a partly used page.
442     * Many features of memory use are noted on a page wise basis,
443     * E.g. the generation; so if a region starts within an existing
444     * allocated page it must be consistent with this page.
445     *
446     * During the scavenging of the newspace, objects will be transported
447     * into an allocation region, and pointers updated to point to this
448     * allocation region. It is possible that these pointers will be
449     * scavenged again before the allocation region is closed, E.g. due to
450     * trans_list which jumps all over the place to cleanup the list. It
451     * is important to be able to determine properties of all objects
452     * pointed to when scavenging, E.g to detect pointers to the
453     * oldspace. Thus it's important that the allocation regions have the
454     * correct properties set when allocated, and not just set when
455     * closed. The region allocation routines return regions with the
456     * specified properties, and grab all the pages, setting there
457     * properties appropriately, except that the amount used is not known.
458     *
459     * These regions are used to support quicker allocation using just a
460     * free pointer. The actual space used by the region is not reflected
461     * in the pages tables until it is closed. It can't be scavenged until
462     * closed.
463     *
464     * When finished with the region it should be closed, which will
465     * update the page tables for the actual space used returning unused
466     * space. Further it may be noted in the new regions which is
467     * necessary when scavenging the newspace.
468     *
469     * Large objects may be allocated directly without an allocation
470     * region, the page tables are updated immediately.
471     *
472     * Unboxed objects don't contain points to other objects so don't need
473     * scavenging. Further they can't contain pointers to younger
474     * generations so WP is not needed. By allocating pages to unboxed
475     * objects the whole page never needs scavenging or write protecting.
476     */
477 dtc 1.1
478 dtc 1.14 /*
479     * Only using two regions at present, both are for the current
480     * newspace generation.
481     */
482 dtc 1.1 struct alloc_region boxed_region;
483     struct alloc_region unboxed_region;
484    
485 dtc 1.14 /*
486     * X hack. current lisp code uses the following. Need coping in/out.
487     */
488 dtc 1.1 void *current_region_free_pointer;
489     void *current_region_end_addr;
490    
491     /* The generation currently being allocated to. X */
492     static int gc_alloc_generation;
493    
494 dtc 1.14 /*
495     * Find a new region with room for at least the given number of bytes.
496     *
497     * It starts looking at the current generations alloc_start_page. So
498     * may pick up from the previous region if there is enough space. This
499     * keeps the allocation contiguous when scavenging the newspace.
500     *
501     * The alloc_region should have been closed by a call to
502     * gc_alloc_update_page_tables, and will thus be in an empty state.
503     *
504     * To assist the scavenging functions, write protected pages are not
505     * used. Free pages should not be write protected.
506     *
507     * It is critical to the conservative GC that the start of regions be
508     * known. To help achieve this only small regions are allocated at a
509     * time.
510     *
511     * During scavenging, pointers may be found that point within the
512     * current region and the page generation must be set so pointers to
513     * the from space can be recognised. So the generation of pages in
514     * the region are set to gc_alloc_generation. To prevent another
515     * allocation call using the same pages, all the pages in the region
516     * are allocated, although they will initially be empty.
517     */
518     static void gc_alloc_new_region(int nbytes, int unboxed,
519     struct alloc_region *alloc_region)
520 dtc 1.1 {
521     int first_page;
522     int last_page;
523     int region_size;
524     int restart_page;
525     int bytes_found;
526     int num_pages;
527     int i;
528 dtc 1.15 int mmask, mflags;
529 dtc 1.1
530 dtc 1.14 #if 0
531     fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
532     nbytes, gc_alloc_generation);
533     #endif
534 dtc 1.1
535     /* Check that the region is in a reset state. */
536 dtc 1.14 gc_assert(alloc_region->first_page == 0
537     && alloc_region->last_page == -1
538     && alloc_region->free_pointer == alloc_region->end_addr);
539 dtc 1.1
540     if (unboxed)
541     restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
542     else
543     restart_page = generations[gc_alloc_generation].alloc_start_page;
544    
545 dtc 1.14 /*
546     * Search for a contiguous free region of at least nbytes with the
547 dtc 1.15 * given properties: boxed/unboxed, generation. First setting up the
548     * mask and matching flags.
549 dtc 1.14 */
550 dtc 1.15
551     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
552     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
553     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
554     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
555     | gc_alloc_generation;
556    
557 dtc 1.1 do {
558     first_page = restart_page;
559 dtc 1.14
560     /*
561     * First search for a page with at least 32 bytes free, that is
562     * not write protected, or marked dont_move.
563     */
564 dtc 1.15
565     while (first_page < NUM_PAGES) {
566     int flags = page_table[first_page].flags;
567     if (!(flags & PAGE_ALLOCATED_MASK)
568     || ((flags & mmask) == mflags &&
569 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
570 dtc 1.15 break;
571 dtc 1.1 first_page++;
572 dtc 1.15 }
573    
574 dtc 1.1 /* Check for a failure */
575     if (first_page >= NUM_PAGES) {
576 dtc 1.14 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
577 dtc 1.1 print_generation_stats(1);
578     exit(1);
579     }
580 dtc 1.14
581 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
582 dtc 1.14
583     #if 0
584     fprintf(stderr, " first_page=%d bytes_used=%d\n",
585     first_page, page_table[first_page].bytes_used);
586     #endif
587    
588     /*
589     * Now search forward to calculate the available region size. It
590     * tries to keeps going until nbytes are found and the number of
591     * pages is greater than some level. This helps keep down the
592     * number of pages in a region.
593     */
594 dtc 1.1 last_page = first_page;
595 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
596 dtc 1.1 num_pages = 1;
597 dtc 1.14 while ((bytes_found < nbytes || num_pages < 2)
598     && last_page < NUM_PAGES - 1
599 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
600 dtc 1.1 last_page++;
601     num_pages++;
602 dtc 1.17 bytes_found += PAGE_SIZE;
603 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
604 dtc 1.1 }
605 dtc 1.14
606 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
607     + PAGE_SIZE * (last_page - first_page);
608 dtc 1.14
609 dtc 1.1 gc_assert(bytes_found == region_size);
610 dtc 1.14
611     #if 0
612     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
613     last_page, bytes_found, num_pages);
614     #endif
615    
616 dtc 1.1 restart_page = last_page + 1;
617     }
618 dtc 1.14 while (restart_page < NUM_PAGES && bytes_found < nbytes);
619    
620 dtc 1.1 /* Check for a failure */
621 dtc 1.14 if (restart_page >= NUM_PAGES && bytes_found < nbytes) {
622     fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
623 dtc 1.1 print_generation_stats(1);
624     exit(1);
625     }
626 dtc 1.14
627     #if 0
628     fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
629     gc_alloc_generation, bytes_found, first_page, last_page,
630     page_address(first_page));
631     #endif
632    
633 dtc 1.1 /* Setup the alloc_region. */
634     alloc_region->first_page = first_page;
635     alloc_region->last_page = last_page;
636     alloc_region->start_addr = page_table[first_page].bytes_used
637     + page_address(first_page);
638     alloc_region->free_pointer = alloc_region->start_addr;
639     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
640    
641     if (gencgc_zero_check) {
642     int *p;
643     for(p = (int *)alloc_region->start_addr;
644     p < (int *)alloc_region->end_addr; p++)
645     if (*p != 0)
646 dtc 1.14 fprintf(stderr, "** new region not zero @ %x\n",p);
647 dtc 1.1 }
648    
649     /* Setup the pages. */
650    
651     /* The first page may have already been in use. */
652     if (page_table[first_page].bytes_used == 0) {
653 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
654 dtc 1.1 page_table[first_page].first_object_offset = 0;
655     }
656 dtc 1.14
657 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
658     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
659     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
660     gc_assert(!PAGE_LARGE_OBJECT(first_page));
661    
662     for (i = first_page + 1; i <= last_page; i++) {
663     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
664     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
665     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
666     | gc_alloc_generation);
667 dtc 1.14 /*
668     * This may not be necessary for unboxed regions (think it was
669     * broken before!)
670     */
671 dtc 1.1 page_table[i].first_object_offset =
672     alloc_region->start_addr - page_address(i);
673     }
674    
675 dtc 1.17 /* Bump up the last_free_page */
676 dtc 1.14 if (last_page + 1 > last_free_page) {
677     last_free_page = last_page + 1;
678 dtc 1.1 SetSymbolValue(ALLOCATION_POINTER,
679 dtc 1.17 (lispobj) ((char *) heap_base +
680     PAGE_SIZE * last_free_page));
681 dtc 1.1 }
682     }
683    
684    
685    
686 dtc 1.14 /*
687     * If the record_new_objects flag is 2 then all new regions created
688     * are recorded.
689     *
690     * If it's 1 then then it is only recorded if the first page of the
691     * current region is <= new_areas_ignore_page. This helps avoid
692     * unnecessary recording when doing full scavenge pass.
693     *
694     * The new_object structure holds the page, byte offset, and size of
695     * new regions of objects. Each new area is placed in the array of
696     * these structures pointed to by new_areas; new_areas_index holds the
697     * offset into new_areas.
698     *
699     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
700     * later code must detect this an handle it, probably by doing a full
701     * scavenge of a generation.
702     */
703 dtc 1.1
704     #define NUM_NEW_AREAS 512
705     static int record_new_objects = 0;
706     static int new_areas_ignore_page;
707     struct new_area {
708     int page;
709     int offset;
710     int size;
711     };
712     static struct new_area (*new_areas)[];
713     static new_areas_index;
714     int max_new_areas;
715    
716     /* Add a new area to new_areas. */
717 dtc 1.14 static void add_new_area(int first_page, int offset, int size)
718 dtc 1.1 {
719     unsigned new_area_start,c;
720     int i;
721    
722     /* Ignore if full */
723     if (new_areas_index >= NUM_NEW_AREAS)
724     return;
725    
726     switch (record_new_objects) {
727     case 0:
728     return;
729     case 1:
730     if (first_page > new_areas_ignore_page)
731     return;
732     break;
733     case 2:
734     break;
735     default:
736     gc_abort();
737     }
738    
739 dtc 1.17 new_area_start = PAGE_SIZE * first_page + offset;
740 dtc 1.14
741     /*
742     * Search backwards for a prior area that this follows from. If
743     * found this will save adding a new area.
744     */
745     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
746 dtc 1.17 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
747 dtc 1.1 + (*new_areas)[i].offset + (*new_areas)[i].size;
748 dtc 1.14 #if 0
749     fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
750     #endif
751 dtc 1.1 if (new_area_start == area_end) {
752 dtc 1.14 #if 0
753     fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
754 dtc 1.1 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
755 dtc 1.14 (*new_areas)[i].size, first_page, offset, size);
756     #endif
757 dtc 1.1 (*new_areas)[i].size += size;
758     return;
759     }
760     }
761 dtc 1.14 #if 0
762     fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
763     #endif
764 dtc 1.1
765     (*new_areas)[new_areas_index].page = first_page;
766     (*new_areas)[new_areas_index].offset = offset;
767     (*new_areas)[new_areas_index].size = size;
768 dtc 1.14 #if 0
769     fprintf(stderr, " new_area %d page %d offset %d size %d\n",
770     new_areas_index, first_page, offset, size);
771     #endif
772 dtc 1.1 new_areas_index++;
773 dtc 1.14
774 dtc 1.1 /* Note the max new_areas used. */
775     if (new_areas_index > max_new_areas)
776     max_new_areas = new_areas_index;
777     }
778    
779    
780 dtc 1.14 /*
781     * Update the tables for the alloc_region. The region may be added to
782     * the new_areas.
783     *
784     * When done the alloc_region its setup so that the next quick alloc
785     * will fail safely and thus a new region will be allocated. Further
786     * it is safe to try and re-update the page table of this reset
787     * alloc_region.
788     */
789     void gc_alloc_update_page_tables(int unboxed,
790     struct alloc_region *alloc_region)
791 dtc 1.1 {
792     int more;
793     int first_page;
794     int next_page;
795     int bytes_used;
796     int orig_first_page_bytes_used;
797     int region_size;
798     int byte_cnt;
799    
800 dtc 1.14 #if 0
801     fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
802     gc_alloc_generation);
803     #endif
804 dtc 1.1
805     first_page = alloc_region->first_page;
806    
807     /* Catch an unused alloc_region. */
808 dtc 1.14 if (first_page == 0 && alloc_region->last_page == -1)
809 dtc 1.1 return;
810    
811 dtc 1.14 next_page = first_page + 1;
812 dtc 1.1
813     /* Skip if no bytes were allocated */
814     if (alloc_region->free_pointer != alloc_region->start_addr) {
815     orig_first_page_bytes_used = page_table[first_page].bytes_used;
816 dtc 1.14
817     gc_assert(alloc_region->start_addr == page_address(first_page) +
818     page_table[first_page].bytes_used);
819    
820 dtc 1.1 /* All the pages used need to be updated */
821 dtc 1.14
822 dtc 1.1 /* Update the first page. */
823 dtc 1.14
824     #if 0
825     fprintf(stderr, "0");
826     #endif
827    
828     /* If the page was free then setup the gen, and first_object_offset. */
829 dtc 1.1 if (page_table[first_page].bytes_used == 0)
830     gc_assert(page_table[first_page].first_object_offset == 0);
831 dtc 1.14
832 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
833     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
834     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
835     gc_assert(!PAGE_LARGE_OBJECT(first_page));
836 dtc 1.14
837 dtc 1.1 byte_cnt = 0;
838 dtc 1.14
839     /*
840     * Calc. the number of bytes used in this page. This is not always
841     * the number of new bytes, unless it was free.
842     */
843 dtc 1.1 more = 0;
844 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(first_page);
845 dtc 1.17 if (bytes_used > PAGE_SIZE) {
846     bytes_used = PAGE_SIZE;
847 dtc 1.1 more = 1;
848     }
849     page_table[first_page].bytes_used = bytes_used;
850     byte_cnt += bytes_used;
851 dtc 1.14
852     /*
853     * All the rest of the pages should be free. Need to set their
854     * first_object_offset pointer to the start of the region, and set
855     * the bytes_used.
856     */
857 dtc 1.1 while (more) {
858 dtc 1.14 #if 0
859     fprintf(stderr, "+")
860     #endif
861 dtc 1.15 gc_assert(PAGE_ALLOCATED(next_page));
862     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
863 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
864 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
865     gc_assert(!PAGE_LARGE_OBJECT(next_page));
866 dtc 1.14
867 dtc 1.1 gc_assert(page_table[next_page].first_object_offset ==
868     alloc_region->start_addr - page_address(next_page));
869 dtc 1.14
870 dtc 1.1 /* Calc. the number of bytes used in this page. */
871     more = 0;
872 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(next_page);
873 dtc 1.17 if (bytes_used > PAGE_SIZE) {
874     bytes_used = PAGE_SIZE;
875 dtc 1.1 more = 1;
876     }
877     page_table[next_page].bytes_used = bytes_used;
878     byte_cnt += bytes_used;
879 dtc 1.14
880 dtc 1.1 next_page++;
881     }
882 dtc 1.14
883 dtc 1.1 region_size = alloc_region->free_pointer - alloc_region->start_addr;
884     bytes_allocated += region_size;
885     generations[gc_alloc_generation].bytes_allocated += region_size;
886 dtc 1.14
887     gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
888    
889     /*
890     * Set the generations alloc restart page to the last page of
891     * the region.
892     */
893 dtc 1.1 if (unboxed)
894 dtc 1.14 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
895 dtc 1.1 else
896 dtc 1.14 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
897    
898 dtc 1.1 /* Add the region to the new_areas if requested. */
899     if (!unboxed)
900 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, region_size);
901    
902     #if 0
903     fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
904     region_size, gc_alloc_generation);
905     #endif
906 dtc 1.1 }
907     else
908 dtc 1.14 /*
909     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
910     */
911 dtc 1.1 if (page_table[first_page].bytes_used == 0)
912 dtc 1.15 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
913 dtc 1.14
914 dtc 1.1 /* Unallocate any unused pages. */
915     while (next_page <= alloc_region->last_page) {
916     gc_assert(page_table[next_page].bytes_used == 0);
917 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
918 dtc 1.1 next_page++;
919     }
920    
921     /* Reset the alloc_region. */
922     alloc_region->first_page = 0;
923     alloc_region->last_page = -1;
924     alloc_region->start_addr = page_address(0);
925     alloc_region->free_pointer = page_address(0);
926     alloc_region->end_addr = page_address(0);
927    
928 dtc 1.14 #if 0
929     fprintf(stderr, "\n");
930     #endif
931 dtc 1.1 }
932    
933    
934    
935     static inline void *gc_quick_alloc(int nbytes);
936    
937 dtc 1.14 /*
938     * Allocate a possibly large object.
939     */
940     static void *gc_alloc_large(int nbytes, int unboxed,
941     struct alloc_region *alloc_region)
942 dtc 1.1 {
943     int first_page;
944     int last_page;
945     int region_size;
946     int restart_page;
947     int bytes_found;
948     int num_pages;
949     int orig_first_page_bytes_used;
950     int byte_cnt;
951     int more;
952     int bytes_used;
953     int next_page;
954     int large = (nbytes >= large_object_size);
955 dtc 1.15 int mmask, mflags;
956 dtc 1.1
957 dtc 1.14 #if 0
958     if (nbytes > 200000)
959     fprintf(stderr, "*** alloc_large %d\n", nbytes);
960     #endif
961 dtc 1.1
962 dtc 1.14 #if 0
963     fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
964     nbytes, gc_alloc_generation);
965     #endif
966 dtc 1.1
967 dtc 1.14 /*
968     * If the object is small, and there is room in the current region
969     * then allocation it in the current region.
970     */
971     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
972 dtc 1.1 return gc_quick_alloc(nbytes);
973 dtc 1.14
974     /*
975     * Search for a contiguous free region of at least nbytes. If it's a
976     * large object then align it on a page boundary by searching for a
977     * free page.
978     */
979    
980     /*
981     * To allow the allocation of small objects without the danger of
982     * using a page in the current boxed region, the search starts after
983     * the current boxed free region. XX could probably keep a page
984     * index ahead of the current region and bumped up here to save a
985     * lot of re-scanning.
986     */
987 dtc 1.1 if (unboxed)
988     restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
989     else
990     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
991     if (restart_page <= alloc_region->last_page)
992 dtc 1.14 restart_page = alloc_region->last_page + 1;
993 dtc 1.1
994 dtc 1.15 /* Setup the mask and matching flags. */
995    
996     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
997     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
998     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
999     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1000     | gc_alloc_generation;
1001    
1002 dtc 1.1 do {
1003     first_page = restart_page;
1004 dtc 1.14
1005 dtc 1.1 if (large)
1006 dtc 1.15 while (first_page < NUM_PAGES && PAGE_ALLOCATED(first_page))
1007 dtc 1.1 first_page++;
1008     else
1009 dtc 1.15 while (first_page < NUM_PAGES) {
1010     int flags = page_table[first_page].flags;
1011     if (!(flags & PAGE_ALLOCATED_MASK)
1012     || ((flags & mmask) == mflags &&
1013 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1014 dtc 1.15 break;
1015 dtc 1.1 first_page++;
1016 dtc 1.15 }
1017 dtc 1.14
1018 dtc 1.1 /* Check for a failure */
1019     if (first_page >= NUM_PAGES) {
1020 dtc 1.14 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1021 dtc 1.1 print_generation_stats(1);
1022     exit(1);
1023     }
1024 dtc 1.14
1025 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1026 dtc 1.14
1027     #if 0
1028     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1029     first_page, page_table[first_page].bytes_used);
1030     #endif
1031    
1032 dtc 1.1 last_page = first_page;
1033 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1034 dtc 1.1 num_pages = 1;
1035 dtc 1.14 while (bytes_found < nbytes
1036     && last_page < NUM_PAGES - 1
1037 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
1038 dtc 1.1 last_page++;
1039     num_pages++;
1040 dtc 1.17 bytes_found += PAGE_SIZE;
1041 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1042 dtc 1.1 }
1043 dtc 1.14
1044 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1045     + PAGE_SIZE * (last_page - first_page);
1046 dtc 1.14
1047 dtc 1.1 gc_assert(bytes_found == region_size);
1048 dtc 1.14
1049     #if 0
1050     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1051     last_page, bytes_found, num_pages);
1052     #endif
1053    
1054 dtc 1.1 restart_page = last_page + 1;
1055     }
1056     while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
1057 dtc 1.14
1058 dtc 1.1 /* Check for a failure */
1059 dtc 1.14 if (restart_page >= NUM_PAGES && bytes_found < nbytes) {
1060     fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1061 dtc 1.1 print_generation_stats(1);
1062     exit(1);
1063     }
1064 dtc 1.14
1065     #if 0
1066     if (large)
1067     fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1068 dtc 1.1 gc_alloc_generation, nbytes, bytes_found,
1069 dtc 1.14 first_page, last_page, page_address(first_page));
1070     #endif
1071 dtc 1.1
1072     gc_assert(first_page > alloc_region->last_page);
1073     if (unboxed)
1074     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1075     last_page;
1076     else
1077     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1078    
1079     /* Setup the pages. */
1080     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1081 dtc 1.14
1082     /*
1083     * If the first page was free then setup the gen, and
1084     * first_object_offset.
1085     */
1086 dtc 1.15
1087     if (large)
1088     mflags |= PAGE_LARGE_OBJECT_MASK;
1089 dtc 1.1 if (page_table[first_page].bytes_used == 0) {
1090 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1091 dtc 1.1 page_table[first_page].first_object_offset = 0;
1092     }
1093    
1094 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
1095     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1096     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1097     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1098 dtc 1.14
1099 dtc 1.1 byte_cnt = 0;
1100 dtc 1.14
1101     /*
1102     * Calc. the number of bytes used in this page. This is not
1103     * always the number of new bytes, unless it was free.
1104     */
1105 dtc 1.1 more = 0;
1106 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used;
1107 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1108     bytes_used = PAGE_SIZE;
1109 dtc 1.1 more = 1;
1110     }
1111     page_table[first_page].bytes_used = bytes_used;
1112     byte_cnt += bytes_used;
1113    
1114 dtc 1.14 next_page = first_page + 1;
1115 dtc 1.1
1116 dtc 1.14 /*
1117     * All the rest of the pages should be free. Need to set their
1118     * first_object_offset pointer to the start of the region, and set
1119     * the bytes_used.
1120     */
1121 dtc 1.1 while (more) {
1122 dtc 1.14 #if 0
1123     fprintf(stderr, "+");
1124     #endif
1125    
1126 dtc 1.15 gc_assert(!PAGE_ALLOCATED(next_page));
1127 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1128 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1129 dtc 1.14
1130 dtc 1.1 page_table[next_page].first_object_offset =
1131 dtc 1.17 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1132 dtc 1.14
1133 dtc 1.1 /* Calc. the number of bytes used in this page. */
1134     more = 0;
1135 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1136 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1137     bytes_used = PAGE_SIZE;
1138 dtc 1.1 more = 1;
1139     }
1140     page_table[next_page].bytes_used = bytes_used;
1141     byte_cnt += bytes_used;
1142 dtc 1.14
1143 dtc 1.1 next_page++;
1144     }
1145 dtc 1.14
1146     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1147 dtc 1.1
1148     bytes_allocated += nbytes;
1149     generations[gc_alloc_generation].bytes_allocated += nbytes;
1150    
1151     /* Add the region to the new_areas if requested. */
1152     if (!unboxed)
1153 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1154    
1155 dtc 1.17 /* Bump up the last_free_page */
1156 dtc 1.14 if (last_page + 1 > last_free_page) {
1157     last_free_page = last_page + 1;
1158 dtc 1.1 SetSymbolValue(ALLOCATION_POINTER,
1159 dtc 1.17 (lispobj) ((char *) heap_base +
1160     PAGE_SIZE * last_free_page));
1161 dtc 1.14 }
1162    
1163     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1164 dtc 1.1 }
1165    
1166 dtc 1.14 /*
1167     * Allocate bytes from the boxed_region. It first checks if there is
1168     * room, if not then it calls gc_alloc_new_region to find a new region
1169     * with enough space. A pointer to the start of the region is returned.
1170     */
1171     static void *gc_alloc(int nbytes)
1172 dtc 1.1 {
1173     void *new_free_pointer;
1174    
1175 dtc 1.14 #if 0
1176     fprintf(stderr, "gc_alloc %d\n",nbytes);
1177     #endif
1178 dtc 1.1
1179     /* Check if there is room in the current alloc region. */
1180     new_free_pointer = boxed_region.free_pointer + nbytes;
1181 dtc 1.14
1182 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1183     /* If so then allocate from the current alloc region. */
1184     void *new_obj = boxed_region.free_pointer;
1185     boxed_region.free_pointer = new_free_pointer;
1186 dtc 1.14
1187 dtc 1.1 /* Check if the alloc region is almost empty. */
1188 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1189 dtc 1.1 /* If so finished with the current region. */
1190 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1191 dtc 1.1 /* Setup a new region. */
1192 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1193 dtc 1.1 }
1194 dtc 1.14 return (void *) new_obj;
1195 dtc 1.1 }
1196 dtc 1.14
1197 dtc 1.1 /* Else not enough free space in the current region. */
1198    
1199 dtc 1.14 /*
1200     * If there is a bit of room left in the current region then
1201     * allocate a large object.
1202     */
1203     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1204     return gc_alloc_large(nbytes, 0, &boxed_region);
1205 dtc 1.1
1206     /* Else find a new region. */
1207    
1208     /* Finished with the current region. */
1209 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1210    
1211 dtc 1.1 /* Setup a new region. */
1212 dtc 1.14 gc_alloc_new_region(nbytes, 0, &boxed_region);
1213    
1214 dtc 1.1 /* Should now be enough room. */
1215 dtc 1.14
1216 dtc 1.1 /* Check if there is room in the current region. */
1217     new_free_pointer = boxed_region.free_pointer + nbytes;
1218 dtc 1.14
1219 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1220     /* If so then allocate from the current region. */
1221     void *new_obj = boxed_region.free_pointer;
1222     boxed_region.free_pointer = new_free_pointer;
1223    
1224     /* Check if the current region is almost empty. */
1225 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1226 dtc 1.1 /* If so find, finished with the current region. */
1227 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1228    
1229 dtc 1.1 /* Setup a new region. */
1230 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1231 dtc 1.1 }
1232 dtc 1.14
1233     return (void *) new_obj;
1234 dtc 1.1 }
1235 dtc 1.14
1236 dtc 1.1 /* Shouldn't happen? */
1237     gc_assert(0);
1238     }
1239    
1240 dtc 1.14 /*
1241     * Allocate space from the boxed_region. If there is not enough free
1242     * space then call gc_alloc to do the job. A pointer to the start of
1243     * the region is returned.
1244     */
1245     static inline void *gc_quick_alloc(int nbytes)
1246 dtc 1.1 {
1247     void *new_free_pointer;
1248    
1249     /* Check if there is room in the current region. */
1250     new_free_pointer = boxed_region.free_pointer + nbytes;
1251 dtc 1.14
1252 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1253     /* If so then allocate from the current region. */
1254     void *new_obj = boxed_region.free_pointer;
1255     boxed_region.free_pointer = new_free_pointer;
1256 dtc 1.14 return (void *) new_obj;
1257 dtc 1.1 }
1258 dtc 1.14
1259 dtc 1.1 /* Else call gc_alloc */
1260 dtc 1.14 return gc_alloc(nbytes);
1261 dtc 1.1 }
1262    
1263 dtc 1.14 /*
1264     * Allocate space for the boxed object. If it is a large object then
1265     * do a large alloc else allocate from the current region. If there is
1266     * not enough free space then call gc_alloc to do the job. A pointer
1267     * to the start of the region is returned.
1268     */
1269     static inline void *gc_quick_alloc_large(int nbytes)
1270 dtc 1.1 {
1271     void *new_free_pointer;
1272    
1273     if (nbytes >= large_object_size)
1274     return gc_alloc_large(nbytes,0,&boxed_region);
1275    
1276     /* Check if there is room in the current region. */
1277     new_free_pointer = boxed_region.free_pointer + nbytes;
1278 dtc 1.14
1279 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1280     /* If so then allocate from the current region. */
1281     void *new_obj = boxed_region.free_pointer;
1282     boxed_region.free_pointer = new_free_pointer;
1283 dtc 1.14 return (void *) new_obj;
1284 dtc 1.1 }
1285 dtc 1.14
1286 dtc 1.1 /* Else call gc_alloc */
1287 dtc 1.14 return gc_alloc(nbytes);
1288 dtc 1.1 }
1289    
1290    
1291    
1292    
1293 dtc 1.14 static void *gc_alloc_unboxed(int nbytes)
1294 dtc 1.1 {
1295     void *new_free_pointer;
1296    
1297 dtc 1.14 #if 0
1298     fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1299     #endif
1300 dtc 1.1
1301     /* Check if there is room in the current region. */
1302     new_free_pointer = unboxed_region.free_pointer + nbytes;
1303 dtc 1.14
1304 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1305     /* If so then allocate from the current region. */
1306     void *new_obj = unboxed_region.free_pointer;
1307     unboxed_region.free_pointer = new_free_pointer;
1308 dtc 1.14
1309 dtc 1.1 /* Check if the current region is almost empty. */
1310     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1311     /* If so finished with the current region. */
1312 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1313    
1314 dtc 1.1 /* Setup a new region. */
1315 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1316 dtc 1.1 }
1317 dtc 1.14
1318     return (void *) new_obj;
1319 dtc 1.1 }
1320 dtc 1.14
1321 dtc 1.1 /* Else not enough free space in the current region. */
1322 dtc 1.14
1323     /*
1324     * If there is a bit of room left in the current region then
1325     * allocate a large object.
1326     */
1327     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1328     return gc_alloc_large(nbytes, 1, &unboxed_region);
1329 dtc 1.1
1330     /* Else find a new region. */
1331    
1332     /* Finished with the current region. */
1333     gc_alloc_update_page_tables(1,&unboxed_region);
1334 dtc 1.14
1335 dtc 1.1 /* Setup a new region. */
1336     gc_alloc_new_region(nbytes,1,&unboxed_region);
1337 dtc 1.14
1338 dtc 1.1 /* Should now be enough room. */
1339 dtc 1.14
1340 dtc 1.1 /* Check if there is room in the current region. */
1341     new_free_pointer = unboxed_region.free_pointer + nbytes;
1342 dtc 1.14
1343 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1344     /* If so then allocate from the current region. */
1345     void *new_obj = unboxed_region.free_pointer;
1346     unboxed_region.free_pointer = new_free_pointer;
1347 dtc 1.14
1348 dtc 1.1 /* Check if the current region is almost empty. */
1349     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1350     /* If so find, finished with the current region. */
1351 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1352    
1353 dtc 1.1 /* Setup a new region. */
1354 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1355 dtc 1.1 }
1356 dtc 1.14
1357     return (void *) new_obj;
1358 dtc 1.1 }
1359 dtc 1.14
1360 dtc 1.1 /* Shouldn't happen? */
1361     gc_assert(0);
1362     }
1363    
1364 dtc 1.14 static inline void *gc_quick_alloc_unboxed(int nbytes)
1365 dtc 1.1 {
1366     void *new_free_pointer;
1367    
1368     /* Check if there is room in the current region. */
1369     new_free_pointer = unboxed_region.free_pointer + nbytes;
1370 dtc 1.14
1371 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1372     /* If so then allocate from the current region. */
1373     void *new_obj = unboxed_region.free_pointer;
1374     unboxed_region.free_pointer = new_free_pointer;
1375 dtc 1.14
1376     return (void *) new_obj;
1377 dtc 1.1 }
1378 dtc 1.14
1379 dtc 1.1 /* Else call gc_alloc */
1380 dtc 1.14 return gc_alloc_unboxed(nbytes);
1381 dtc 1.1 }
1382    
1383 dtc 1.14 /*
1384     * Allocate space for the object. If it is a large object then do a
1385     * large alloc else allocate from the current region. If there is not
1386     * enough free space then call gc_alloc to do the job.
1387     *
1388     * A pointer to the start of the region is returned.
1389     */
1390     static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1391 dtc 1.1 {
1392     void *new_free_pointer;
1393    
1394     if (nbytes >= large_object_size)
1395     return gc_alloc_large(nbytes,1,&unboxed_region);
1396    
1397     /* Check if there is room in the current region. */
1398     new_free_pointer = unboxed_region.free_pointer + nbytes;
1399 dtc 1.14
1400 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1401     /* If so then allocate from the current region. */
1402     void *new_obj = unboxed_region.free_pointer;
1403     unboxed_region.free_pointer = new_free_pointer;
1404 dtc 1.14
1405     return (void *) new_obj;
1406 dtc 1.1 }
1407 dtc 1.14
1408 dtc 1.1 /* Else call gc_alloc */
1409 dtc 1.14 return gc_alloc_unboxed(nbytes);
1410 dtc 1.1 }
1411    
1412     /***************************************************************************/
1413    
1414    
1415     /* Scavenging/transporting routines derived from gc.c */
1416    
1417     static int (*scavtab[256])(lispobj *where, lispobj object);
1418     static lispobj (*transother[256])(lispobj object);
1419     static int (*sizetab[256])(lispobj *where);
1420    
1421     static struct weak_pointer *weak_pointers;
1422 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1423 dtc 1.1
1424     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1425    
1426    
1427     /* Predicates */
1428    
1429 dtc 1.14 static inline boolean from_space_p(lispobj obj)
1430 dtc 1.1 {
1431 dtc 1.14 int page_index = (void*) obj - heap_base;
1432     return page_index >= 0
1433 dtc 1.17 && (page_index = (unsigned int) page_index / PAGE_SIZE) < NUM_PAGES
1434 dtc 1.15 && PAGE_GENERATION(page_index) == from_space;
1435 dtc 1.14 }
1436    
1437     static inline boolean new_space_p(lispobj obj)
1438     {
1439     int page_index = (void*) obj - heap_base;
1440     return page_index >= 0
1441 dtc 1.17 && (page_index = (unsigned int) page_index / PAGE_SIZE) < NUM_PAGES
1442 dtc 1.15 && PAGE_GENERATION(page_index) == new_space;
1443 dtc 1.1 }
1444    
1445    
1446     /* Copying Objects */
1447    
1448    
1449     /* Copying Boxed Objects */
1450 dtc 1.14 static inline lispobj copy_object(lispobj object, int nwords)
1451 dtc 1.1 {
1452     int tag;
1453     lispobj *new;
1454     lispobj *source, *dest;
1455 dtc 1.14
1456 dtc 1.1 gc_assert(Pointerp(object));
1457     gc_assert(from_space_p(object));
1458     gc_assert((nwords & 0x01) == 0);
1459 dtc 1.14
1460 dtc 1.1 /* get tag of object */
1461     tag = LowtagOf(object);
1462    
1463     /* allocate space */
1464     new = gc_quick_alloc(nwords*4);
1465 dtc 1.14
1466 dtc 1.1 dest = new;
1467     source = (lispobj *) PTR(object);
1468 dtc 1.14
1469 dtc 1.1 /* copy the object */
1470     while (nwords > 0) {
1471     dest[0] = source[0];
1472     dest[1] = source[1];
1473     dest += 2;
1474     source += 2;
1475     nwords -= 2;
1476     }
1477 dtc 1.14
1478 dtc 1.1 /* return lisp pointer of new object */
1479 dtc 1.14 return (lispobj) new | tag;
1480 dtc 1.1 }
1481    
1482 dtc 1.14 /*
1483     * Copying Large Boxed Objects. If the object is in a large object
1484     * region then it is simply promoted, else it is copied. If it's large
1485     * enough then it's copied to a large object region.
1486     *
1487     * Vectors may have shrunk. If the object is not copied the space
1488     * needs to be reclaimed, and the page_tables corrected.
1489     */
1490     static lispobj copy_large_object(lispobj object, int nwords)
1491 dtc 1.1 {
1492     int tag;
1493     lispobj *new;
1494     lispobj *source, *dest;
1495     int first_page;
1496 dtc 1.14
1497 dtc 1.1 gc_assert(Pointerp(object));
1498     gc_assert(from_space_p(object));
1499     gc_assert((nwords & 0x01) == 0);
1500    
1501 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1502     fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1503 dtc 1.1
1504     /* Check if it's a large object. */
1505 dtc 1.14 first_page = find_page_index((void *) object);
1506 dtc 1.1 gc_assert(first_page >= 0);
1507    
1508 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1509 dtc 1.1 /* Promote the object. */
1510     int remaining_bytes;
1511     int next_page;
1512     int bytes_freed;
1513     int old_bytes_used;
1514 dtc 1.15 int mmask, mflags;
1515 dtc 1.14
1516     /*
1517     * Note: Any page write protection must be removed, else a later
1518     * scavenge_newspace may incorrectly not scavenge these pages.
1519     * This would not be necessary if they are added to the new areas,
1520     * but lets do it for them all (they'll probably be written
1521     * anyway?).
1522     */
1523 dtc 1.1
1524     gc_assert(page_table[first_page].first_object_offset == 0);
1525    
1526     next_page = first_page;
1527 dtc 1.14 remaining_bytes = nwords * 4;
1528 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1529 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1530     gc_assert(PAGE_ALLOCATED(next_page));
1531     gc_assert(!PAGE_UNBOXED(next_page));
1532     gc_assert(PAGE_LARGE_OBJECT(next_page));
1533 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1534 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1535     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1536 dtc 1.14
1537 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1538 dtc 1.14
1539     /*
1540     * Remove any write protection. Should be able to religh on the
1541     * WP flag to avoid redundant calls.
1542     */
1543 dtc 1.15 if (PAGE_WRITE_PROTECTED(next_page)) {
1544 dtc 1.17 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1545 dtc 1.15 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1546 dtc 1.1 }
1547 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1548 dtc 1.1 next_page++;
1549     }
1550    
1551 dtc 1.14 /*
1552     * Now only one page remains, but the object may have shrunk so
1553     * there may be more unused pages which will be freed.
1554     */
1555    
1556 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1557     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1558 dtc 1.14
1559 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1560     gc_assert(PAGE_ALLOCATED(next_page));
1561     gc_assert(!PAGE_UNBOXED(next_page));
1562 dtc 1.14
1563 dtc 1.1 /* Adjust the bytes_used. */
1564     old_bytes_used = page_table[next_page].bytes_used;
1565     page_table[next_page].bytes_used = remaining_bytes;
1566 dtc 1.14
1567 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1568 dtc 1.14
1569 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1570     | PAGE_GENERATION_MASK;
1571     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1572    
1573 dtc 1.1 /* Free any remaining pages; needs care. */
1574     next_page++;
1575 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1576 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1577 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1578     - next_page)) {
1579 dtc 1.14 /*
1580     * Checks out OK, free the page. Don't need to both zeroing
1581     * pages as this should have been done before shrinking the
1582     * object. These pages shouldn't be write protected as they
1583     * should be zero filled.
1584     */
1585 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1586 dtc 1.14
1587 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1588 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1589 dtc 1.1 page_table[next_page].bytes_used = 0;
1590     bytes_freed += old_bytes_used;
1591     next_page++;
1592     }
1593 dtc 1.14
1594     if (gencgc_verbose && bytes_freed > 0)
1595     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1596    
1597     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1598     generations[new_space].bytes_allocated += 4 * nwords;
1599 dtc 1.1 bytes_allocated -= bytes_freed;
1600 dtc 1.14
1601 dtc 1.1 /* Add the region to the new_areas if requested. */
1602 dtc 1.14 add_new_area(first_page, 0, nwords * 4);
1603 dtc 1.1
1604 dtc 1.14 return object;
1605 dtc 1.1 }
1606     else {
1607     /* get tag of object */
1608     tag = LowtagOf(object);
1609 dtc 1.14
1610 dtc 1.1 /* allocate space */
1611 dtc 1.14 new = gc_quick_alloc_large(nwords * 4);
1612    
1613 dtc 1.1 dest = new;
1614     source = (lispobj *) PTR(object);
1615 dtc 1.14
1616 dtc 1.1 /* copy the object */
1617     while (nwords > 0) {
1618     dest[0] = source[0];
1619     dest[1] = source[1];
1620     dest += 2;
1621     source += 2;
1622     nwords -= 2;
1623     }
1624 dtc 1.14
1625 dtc 1.1 /* return lisp pointer of new object */
1626 dtc 1.14 return (lispobj) new | tag;
1627 dtc 1.1 }
1628     }
1629    
1630     /* Copying UnBoxed Objects. */
1631 dtc 1.14 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1632 dtc 1.1 {
1633     int tag;
1634     lispobj *new;
1635     lispobj *source, *dest;
1636 dtc 1.14
1637 dtc 1.1 gc_assert(Pointerp(object));
1638     gc_assert(from_space_p(object));
1639     gc_assert((nwords & 0x01) == 0);
1640 dtc 1.14
1641 dtc 1.1 /* get tag of object */
1642     tag = LowtagOf(object);
1643    
1644     /* allocate space */
1645     new = gc_quick_alloc_unboxed(nwords*4);
1646 dtc 1.14
1647 dtc 1.1 dest = new;
1648     source = (lispobj *) PTR(object);
1649 dtc 1.14
1650     /* Copy the object */
1651 dtc 1.1 while (nwords > 0) {
1652     dest[0] = source[0];
1653     dest[1] = source[1];
1654     dest += 2;
1655     source += 2;
1656     nwords -= 2;
1657     }
1658 dtc 1.14
1659     /* Return lisp pointer of new object. */
1660     return (lispobj) new | tag;
1661 dtc 1.1 }
1662    
1663    
1664 dtc 1.14 /*
1665     * Copying Large Unboxed Objects. If the object is in a large object
1666     * region then it is simply promoted, else it is copied. If it's large
1667     * enough then it's copied to a large object region.
1668     *
1669     * Bignums and vectors may have shrunk. If the object is not copied
1670     * the space needs to be reclaimed, and the page_tables corrected.
1671     */
1672     static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1673 dtc 1.1 {
1674     int tag;
1675     lispobj *new;
1676     lispobj *source, *dest;
1677     int first_page;
1678 dtc 1.14
1679 dtc 1.1 gc_assert(Pointerp(object));
1680     gc_assert(from_space_p(object));
1681     gc_assert((nwords & 0x01) == 0);
1682    
1683 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1684     fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1685 dtc 1.1
1686     /* Check if it's a large object. */
1687 dtc 1.14 first_page = find_page_index((void *) object);
1688 dtc 1.1 gc_assert(first_page >= 0);
1689    
1690 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1691 dtc 1.14 /*
1692     * Promote the object. Note: Unboxed objects may have been
1693     * allocated to a BOXED region so it may be necessary to change
1694     * the region to UNBOXED.
1695     */
1696 dtc 1.1 int remaining_bytes;
1697     int next_page;
1698     int bytes_freed;
1699     int old_bytes_used;
1700 dtc 1.15 int mmask, mflags;
1701 dtc 1.14
1702 dtc 1.1 gc_assert(page_table[first_page].first_object_offset == 0);
1703 dtc 1.14
1704 dtc 1.1 next_page = first_page;
1705 dtc 1.14 remaining_bytes = nwords * 4;
1706 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1707 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1708     gc_assert(PAGE_ALLOCATED(next_page));
1709     gc_assert(PAGE_LARGE_OBJECT(next_page));
1710 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1711 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1712     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1713 dtc 1.14
1714 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1715     PAGE_UNBOXED_MASK | new_space);
1716 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1717 dtc 1.1 next_page++;
1718     }
1719 dtc 1.14
1720     /*
1721     * Now only one page remains, but the object may have shrunk so
1722     * there may be more unused pages which will be freed.
1723     */
1724    
1725 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1726     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1727 dtc 1.14
1728 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1729     | PAGE_GENERATION_MASK,
1730     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1731 dtc 1.14
1732 dtc 1.1 /* Adjust the bytes_used. */
1733     old_bytes_used = page_table[next_page].bytes_used;
1734     page_table[next_page].bytes_used = remaining_bytes;
1735 dtc 1.14
1736 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1737 dtc 1.14
1738 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1739     | PAGE_GENERATION_MASK;
1740     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1741    
1742 dtc 1.1 /* Free any remaining pages; needs care. */
1743     next_page++;
1744 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1745 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1746 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1747     - next_page)) {
1748 dtc 1.14 /*
1749     * Checks out OK, free the page. Don't need to both zeroing
1750     * pages as this should have been done before shrinking the
1751     * object. These pages shouldn't be write protected, even if
1752     * boxed they should be zero filled.
1753     */
1754 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1755 dtc 1.14
1756 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1757 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1758 dtc 1.1 page_table[next_page].bytes_used = 0;
1759     bytes_freed += old_bytes_used;
1760     next_page++;
1761     }
1762 dtc 1.14
1763     if (gencgc_verbose && bytes_freed > 0)
1764     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1765    
1766     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1767     generations[new_space].bytes_allocated += 4 * nwords;
1768 dtc 1.1 bytes_allocated -= bytes_freed;
1769 dtc 1.14
1770     return object;
1771 dtc 1.1 }
1772     else {
1773     /* get tag of object */
1774     tag = LowtagOf(object);
1775 dtc 1.14
1776 dtc 1.1 /* allocate space */
1777 dtc 1.14 new = gc_quick_alloc_large_unboxed(nwords * 4);
1778    
1779 dtc 1.1 dest = new;
1780     source = (lispobj *) PTR(object);
1781 dtc 1.14
1782 dtc 1.1 /* copy the object */
1783     while (nwords > 0) {
1784     dest[0] = source[0];
1785     dest[1] = source[1];
1786     dest += 2;
1787     source += 2;
1788     nwords -= 2;
1789     }
1790 dtc 1.14
1791 dtc 1.1 /* return lisp pointer of new object */
1792 dtc 1.14 return (lispobj) new | tag;
1793 dtc 1.1 }
1794     }
1795    
1796    
1797     /* Scavenging */
1798    
1799     #define DIRECT_SCAV 0
1800    
1801 dtc 1.14 static void scavenge(lispobj *start, long nwords)
1802 dtc 1.1 {
1803     while (nwords > 0) {
1804     lispobj object;
1805     int type, words_scavenged;
1806 dtc 1.14
1807 dtc 1.1 object = *start;
1808 dtc 1.14
1809 dtc 1.1 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1810 dtc 1.14
1811 dtc 1.1 #if DIRECT_SCAV
1812     type = TypeOf(object);
1813     words_scavenged = (scavtab[type])(start, object);
1814     #else
1815     if (Pointerp(object))
1816     /* It be a pointer. */
1817     if (from_space_p(object)) {
1818 dtc 1.14 /*
1819     * It currently points to old space. Check for a forwarding
1820     * pointer.
1821     */
1822     lispobj *ptr = (lispobj *) PTR(object);
1823 dtc 1.1 lispobj first_word = *ptr;
1824 dtc 1.14
1825 dtc 1.1 if(first_word == 0x01) {
1826     /* Yep, there be a forwarding pointer. */
1827     *start = ptr[1];
1828     words_scavenged = 1;
1829     }
1830     else
1831     /* Scavenge that pointer. */
1832     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1833     }
1834     else
1835 dtc 1.14 /* It points somewhere other than oldspace. Leave it alone. */
1836 dtc 1.1 words_scavenged = 1;
1837     else
1838     if ((object & 3) == 0)
1839     /* It's a fixnum. Real easy. */
1840     words_scavenged = 1;
1841     else
1842     /* It's some random header object. */
1843     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1844     #endif
1845 dtc 1.14
1846 dtc 1.1 start += words_scavenged;
1847     nwords -= words_scavenged;
1848     }
1849     gc_assert(nwords == 0);
1850     }
1851    
1852    
1853     /* Code and Code-Related Objects */
1854    
1855 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
1856 dtc 1.1
1857     static lispobj trans_function_header(lispobj object);
1858     static lispobj trans_boxed(lispobj object);
1859    
1860     #if DIRECT_SCAV
1861 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
1862 dtc 1.1 {
1863     gc_assert(Pointerp(object));
1864    
1865     if (from_space_p(object)) {
1866     lispobj first, *first_pointer;
1867 dtc 1.14
1868     /*
1869     * Object is a pointer into from space - check to see if it has
1870     * been forwarded.
1871     */
1872 dtc 1.1 first_pointer = (lispobj *) PTR(object);
1873     first = *first_pointer;
1874 dtc 1.14
1875 dtc 1.1 if (first == 0x01) {
1876     /* Forwarded */
1877     *where = first_pointer[1];
1878     return 1;
1879     }
1880     else {
1881     int type;
1882     lispobj copy;
1883 dtc 1.14
1884     /*
1885     * Must transport object -- object may point to either a
1886     * function header, a closure function header, or to a closure
1887     * header.
1888     */
1889    
1890 dtc 1.1 type = TypeOf(first);
1891     switch (type) {
1892     case type_FunctionHeader:
1893     case type_ClosureFunctionHeader:
1894     copy = trans_function_header(object);
1895     break;
1896     default:
1897     copy = trans_boxed(object);
1898     break;
1899     }
1900 dtc 1.14
1901 dtc 1.1 if (copy != object) {
1902     /* Set forwarding pointer. */
1903     first_pointer[0] = 0x01;
1904     first_pointer[1] = copy;
1905     }
1906 dtc 1.14
1907 dtc 1.1 first = copy;
1908     }
1909 dtc 1.14
1910 dtc 1.1 gc_assert(Pointerp(first));
1911     gc_assert(!from_space_p(first));
1912 dtc 1.14
1913 dtc 1.1 *where = first;
1914     }
1915     return 1;
1916     }
1917     #else
1918 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
1919 dtc 1.1 {
1920     lispobj *first_pointer;
1921     lispobj copy;
1922    
1923     gc_assert(Pointerp(object));
1924 dtc 1.14
1925 dtc 1.1 /* Object is a pointer into from space - no a FP. */
1926     first_pointer = (lispobj *) PTR(object);
1927 dtc 1.14
1928     /*
1929     * Must transport object -- object may point to either a function
1930     * header, a closure function header, or to a closure header.
1931     */
1932    
1933 dtc 1.1 switch (TypeOf(*first_pointer)) {
1934     case type_FunctionHeader:
1935     case type_ClosureFunctionHeader:
1936     copy = trans_function_header(object);
1937     break;
1938     default:
1939     copy = trans_boxed(object);
1940     break;
1941     }
1942 dtc 1.14
1943 dtc 1.1 if (copy != object) {
1944     /* Set forwarding pointer */
1945     first_pointer[0] = 0x01;
1946     first_pointer[1] = copy;
1947     }
1948 dtc 1.14
1949 dtc 1.1 gc_assert(Pointerp(copy));
1950     gc_assert(!from_space_p(copy));
1951    
1952     *where = copy;
1953 dtc 1.14
1954 dtc 1.1 return 1;
1955     }
1956     #endif
1957    
1958 dtc 1.14 /*
1959     * Scan a x86 compiled code objected, looking for possible fixups that
1960     * have been missed after a move.
1961     *
1962     * Two types of fixups are needed:
1963     * 1. Absolution fixups to within the code object.
1964     * 2. Relative fixups to outside the code object.
1965     *
1966     * Currently only absolution fixups to the constant vector, or to the
1967     * code area are checked.
1968     */
1969     void sniff_code_object(struct code *code, unsigned displacement)
1970 dtc 1.1 {
1971     int nheader_words, ncode_words, nwords;
1972     lispobj fheaderl;
1973     struct function *fheaderp;
1974     void *p;
1975     void *constants_start_addr, *constants_end_addr;
1976     void *code_start_addr, *code_end_addr;
1977     int fixup_found = 0;
1978 dtc 1.14
1979 dtc 1.3 if (!check_code_fixups)
1980     return;
1981    
1982 dtc 1.14 /*
1983     * It's ok if it's byte compiled code. The trace table offset will
1984     * be a fixnum if it's x86 compiled code - check.
1985     */
1986 dtc 1.1 if (code->trace_table_offset & 0x3) {
1987 dtc 1.14 #if 0
1988     fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
1989     #endif
1990 dtc 1.1 return;
1991     }
1992    
1993     /* Else it's x86 machine code. */
1994    
1995     ncode_words = fixnum_value(code->code_size);
1996 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) code);
1997 dtc 1.1 nwords = ncode_words + nheader_words;
1998    
1999 dtc 1.14 constants_start_addr = (void *) code + 5 * 4;
2000     constants_end_addr = (void *) code + nheader_words * 4;
2001     code_start_addr = (void *) code + nheader_words * 4;
2002     code_end_addr = (void *) code + nwords * 4;
2003 dtc 1.1
2004     /* Work through the unboxed code. */
2005     for (p = code_start_addr; p < code_end_addr; p++) {
2006 dtc 1.14 void *data = *(void **) p;
2007     unsigned d1 = *((unsigned char *) p - 1);
2008     unsigned d2 = *((unsigned char *) p - 2);
2009     unsigned d3 = *((unsigned char *) p - 3);
2010     unsigned d4 = *((unsigned char *) p - 4);
2011     unsigned d5 = *((unsigned char *) p - 5);
2012     unsigned d6 = *((unsigned char *) p - 6);
2013    
2014     /*
2015     * Check for code references.
2016     *
2017     * Check for a 32 bit word that looks like an absolute reference
2018     * to within the code adea of the code object.
2019     */
2020     if (data >= code_start_addr - displacement
2021     && data < code_end_addr - displacement) {
2022 dtc 1.1 /* Function header */
2023 dtc 1.14 if (d4 == 0x5e
2024     && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2025 dtc 1.1 /* Skip the function header */
2026 dtc 1.14 p += 6 * 4 - 4 - 1;
2027 dtc 1.1 continue;
2028     }
2029     /* Push imm32 */
2030     if (d1 == 0x68) {
2031     fixup_found = 1;
2032 dtc 1.14 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2033 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2034 dtc 1.14 fprintf(stderr, "*** Push $0x%.8x\n", data);
2035 dtc 1.1 }
2036     /* Mov [reg-8],imm32 */
2037 dtc 1.14 if (d3 == 0xc7
2038     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2039     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2040     && d1 == 0xf8) {
2041 dtc 1.1 fixup_found = 1;
2042 dtc 1.14 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2043 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2044 dtc 1.14 fprintf(stderr, "*** Mov [reg-8],$0x%.8x\n", data);
2045 dtc 1.1 }
2046     /* Lea reg, [disp32] */
2047 dtc 1.14 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2048 dtc 1.1 fixup_found = 1;
2049 dtc 1.14 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2050 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2051 dtc 1.14 fprintf(stderr, "*** Lea reg,[$0x%.8x]\n", data);
2052 dtc 1.1 }
2053     }
2054    
2055 dtc 1.14 /*
2056     * Check for constant references.
2057     *
2058     * Check for a 32 bit word that looks like an absolution reference
2059     * to within the constant vector. Constant references will be
2060     * aligned.
2061     */
2062     if (data >= constants_start_addr - displacement
2063     && data < constants_end_addr - displacement
2064     && ((unsigned) data & 0x3) == 0) {
2065 dtc 1.1 /* Mov eax,m32 */
2066     if (d1 == 0xa1) {
2067     fixup_found = 1;
2068 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2069     p, d6, d5, d4, d3, d2, d1, data);
2070     fprintf(stderr, "*** Mov eax,0x%.8x\n", data);
2071 dtc 1.1 }
2072 dtc 1.14
2073 dtc 1.1 /* Mov m32,eax */
2074     if (d1 == 0xa3) {
2075     fixup_found = 1;
2076 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2077     p, d6, d5, d4, d3, d2, d1, data);
2078     fprintf(stderr, "*** Mov 0x%.8x,eax\n", data);
2079 dtc 1.1 }
2080 dtc 1.14
2081 dtc 1.1 /* Cmp m32,imm32 */
2082 dtc 1.14 if (d1 == 0x3d && d2 == 0x81) {
2083 dtc 1.1 fixup_found = 1;
2084 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2085     p, d6, d5, d4, d3, d2, d1, data);
2086 dtc 1.1 /* XX Check this */
2087 dtc 1.14 fprintf(stderr, "*** Cmp 0x%.8x,immed32\n", data);
2088 dtc 1.1 }
2089 dtc 1.14
2090 dtc 1.1 /* Check for a mod=00, r/m=101 byte. */
2091     if ((d1 & 0xc7) == 5) {
2092     /* Cmp m32,reg */
2093     if (d2 == 0x39) {
2094     fixup_found = 1;
2095 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2096     p, d6, d5, d4, d3, d2, d1, data);
2097     fprintf(stderr, "*** Cmp 0x%.8x,reg\n", data);
2098 dtc 1.1 }
2099     /* Cmp reg32,m32 */
2100     if (d2 == 0x3b) {
2101     fixup_found = 1;
2102 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2103     p, d6, d5, d4, d3, d2, d1, data);
2104     fprintf(stderr, "*** Cmp reg32,0x%.8x\n", data);
2105 dtc 1.1 }
2106     /* Mov m32,reg32 */
2107     if (d2 == 0x89) {
2108     fixup_found = 1;
2109 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2110     p, d6, d5, d4, d3, d2, d1, data);
2111     fprintf(stderr, "*** Mov 0x%.8x,reg32\n", data);
2112 dtc 1.1 }
2113     /* Mov reg32,m32 */
2114     if (d2 == 0x8b) {
2115     fixup_found = 1;
2116 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2117     p, d6, d5, d4, d3, d2, d1, data);
2118     fprintf(stderr, "*** Mov reg32,0x%.8x\n", data);
2119 dtc 1.1 }
2120     /* Lea reg32,m32 */
2121     if (d2 == 0x8d) {
2122     fixup_found = 1;
2123 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2124     p, d6, d5, d4, d3, d2, d1, data);
2125     fprintf(stderr, "*** Lea reg32,0x%.8x\n", data);
2126 dtc 1.1 }
2127     }
2128     }
2129     }
2130 dtc 1.14
2131 dtc 1.1 /* If anything was found print out some info. on the code object. */
2132     if (fixup_found) {
2133 dtc 1.14 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2134 dtc 1.1 code, nheader_words, ncode_words);
2135 dtc 1.14 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2136     constants_start_addr, constants_end_addr,
2137     code_start_addr, code_end_addr);
2138 dtc 1.1 }
2139     }
2140    
2141 dtc 1.14 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2142 dtc 1.1 {
2143     int nheader_words, ncode_words, nwords;
2144     void *constants_start_addr, *constants_end_addr;
2145     void *code_start_addr, *code_end_addr;
2146     lispobj p;
2147     lispobj fixups = NIL;
2148 dtc 1.14 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2149     struct vector *fixups_vector;
2150    
2151     /*
2152     * It's ok if it's byte compiled code. The trace table offset will
2153     * be a fixnum if it's x86 compiled code - check.
2154     */
2155 dtc 1.1 if (new_code->trace_table_offset & 0x3) {
2156 dtc 1.14 #if 0
2157     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2158     #endif
2159 dtc 1.1 return;
2160     }
2161    
2162     /* Else it's x86 machine code. */
2163     ncode_words = fixnum_value(new_code->code_size);
2164 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) new_code);
2165 dtc 1.1 nwords = ncode_words + nheader_words;
2166 dtc 1.14 #if 0
2167     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2168     new_code, nheader_words, ncode_words);
2169     #endif
2170     constants_start_addr = (void *) new_code + 5 * 4;
2171     constants_end_addr = (void *) new_code + nheader_words * 4;
2172     code_start_addr = (void *) new_code + nheader_words * 4;
2173 dtc 1.1 code_end_addr = (void *)new_code + nwords*4;
2174 dtc 1.14 #if 0
2175     fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2176     constants_start_addr, constants_end_addr,
2177     code_start_addr, code_end_addr);
2178     #endif
2179 dtc 1.1
2180 dtc 1.14 /*
2181     * The first constant should be a pointer to the fixups for this
2182     * code objects - Check.
2183     */
2184     fixups = new_code->constants[0];
2185 dtc 1.1
2186 dtc 1.14 /*
2187     * It will be 0 or the unbound-marker if there are no fixups, and
2188     * will be an other pointer if it is valid.
2189     */
2190     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2191 dtc 1.1 /* Check for possible errors. */
2192     if (check_code_fixups)
2193     sniff_code_object(new_code, displacement);
2194 dtc 1.14
2195     #if 0
2196     fprintf(stderr, "Fixups for code object not found!?\n");
2197     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2198 dtc 1.1 new_code, nheader_words, ncode_words);
2199 dtc 1.14 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2200     constants_start_addr, constants_end_addr,
2201     code_start_addr, code_end_addr);
2202     #endif
2203 dtc 1.1 return;
2204     }
2205    
2206 dtc 1.14 fixups_vector = (struct vector *) PTR(fixups);
2207 dtc 1.1
2208     /* Could be pointing to a forwarding pointer. */
2209 dtc 1.14 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2210     && fixups_vector->header == 0x01) {
2211 dtc 1.19 #if 0
2212 dtc 1.14 fprintf(stderr, "* FF\n");
2213 dtc 1.19 #endif
2214 dtc 1.1 /* If so then follow it. */
2215 dtc 1.14 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2216 dtc 1.1 }
2217 dtc 1.14
2218     #if 0
2219     fprintf(stderr, "Got the fixups\n");
2220     #endif
2221 dtc 1.1
2222     if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2223 dtc 1.14 /*
2224     * Got the fixups for the code block. Now work through the
2225     * vector, and apply a fixup at each address.
2226     */
2227 dtc 1.1 int length = fixnum_value(fixups_vector->length);
2228     int i;
2229     for (i = 0; i < length; i++) {
2230     unsigned offset = fixups_vector->data[i];
2231     /* Now check the current value of offset. */
2232 dtc 1.14 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2233    
2234     /*
2235     * If it's within the old_code object then it must be an
2236     * absolute fixup (relative ones are not saved).
2237     */
2238     if (old_value >= (unsigned) old_code
2239     && old_value < (unsigned) old_code + nwords * 4)
2240 dtc 1.1 /* So add the dispacement. */
2241 dtc 1.14 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2242 dtc 1.1 + displacement;
2243     else
2244 dtc 1.14 /*
2245     * It is outside the old code object so it must be a relative
2246     * fixup (absolute fixups are not saved). So subtract the
2247     * displacement.
2248     */
2249     *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2250 dtc 1.1 - displacement;
2251     }
2252     }
2253 dtc 1.14
2254 dtc 1.1 /* Check for possible errors. */
2255     if (check_code_fixups)
2256 dtc 1.14 sniff_code_object(new_code, displacement);
2257 dtc 1.1 }
2258    
2259 dtc 1.14 static struct code * trans_code(struct code *code)
2260 dtc 1.1 {
2261     struct code *new_code;
2262     lispobj l_code, l_new_code;
2263     int nheader_words, ncode_words, nwords;
2264     unsigned long displacement;
2265     lispobj fheaderl, *prev_pointer;
2266 dtc 1.14
2267     #if 0
2268     fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2269     (unsigned long) code);
2270     #endif
2271    
2272     /* If object has already been transported, just return pointer */
2273     if (*(lispobj *) code == 0x01)
2274     return (struct code*) (((lispobj *) code)[1]);
2275    
2276 dtc 1.1 gc_assert(TypeOf(code->header) == type_CodeHeader);
2277 dtc 1.14
2278 dtc 1.1 /* prepare to transport the code vector */
2279     l_code = (lispobj) code | type_OtherPointer;
2280 dtc 1.14
2281 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2282     nheader_words = HeaderValue(code->header);
2283     nwords = ncode_words + nheader_words;
2284     nwords = CEILING(nwords, 2);
2285 dtc 1.14
2286 dtc 1.1 l_new_code = copy_large_object(l_code, nwords);
2287     new_code = (struct code *) PTR(l_new_code);
2288    
2289     /* May not have been moved. */
2290     if (new_code == code)
2291     return new_code;
2292 dtc 1.14
2293 dtc 1.1 displacement = l_new_code - l_code;
2294 dtc 1.14
2295     #if 0
2296     fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2297     (unsigned long) code, (unsigned long) new_code);
2298     fprintf(stderr, "Code object is %d words long.\n", nwords);
2299     #endif
2300    
2301 dtc 1.1 /* set forwarding pointer */
2302 dtc 1.14 ((lispobj *) code)[0] = 0x01;
2303     ((lispobj *) code)[1] = l_new_code;
2304    
2305     /*
2306     * Set forwarding pointers for all the function headers in the code
2307     * object; also fix all self pointers.
2308     */
2309    
2310 dtc 1.1 fheaderl = code->entry_points;
2311     prev_pointer = &new_code->entry_points;
2312 dtc 1.14
2313 dtc 1.1 while (fheaderl != NIL) {
2314     struct function *fheaderp, *nfheaderp;
2315     lispobj nfheaderl;
2316 dtc 1.14
2317 dtc 1.1 fheaderp = (struct function *) PTR(fheaderl);
2318     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2319 dtc 1.14
2320     /*
2321     * Calcuate the new function pointer and the new function header.
2322     */
2323 dtc 1.1 nfheaderl = fheaderl + displacement;
2324     nfheaderp = (struct function *) PTR(nfheaderl);
2325 dtc 1.14
2326 dtc 1.1 /* set forwarding pointer */
2327 dtc 1.14 ((lispobj *) fheaderp)[0] = 0x01;
2328     ((lispobj *) fheaderp)[1] = nfheaderl;
2329    
2330     /* Fix self pointer */
2331 dtc 1.1 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2332 dtc 1.14
2333 dtc 1.1 *prev_pointer = nfheaderl;
2334 dtc 1.14
2335 dtc 1.1 fheaderl = fheaderp->next;
2336     prev_pointer = &nfheaderp->next;
2337     }
2338    
2339 dtc 1.14 #if 0
2340     sniff_code_object(new_code, displacement);
2341     #endif
2342     apply_code_fixups(code, new_code);
2343    
2344 dtc 1.1 return new_code;
2345     }
2346    
2347 dtc 1.14 static int scav_code_header(lispobj *where, lispobj object)
2348 dtc 1.1 {
2349     struct code *code;
2350     int nheader_words, ncode_words, nwords;
2351     lispobj fheaderl;
2352     struct function *fheaderp;
2353 dtc 1.14
2354 dtc 1.1 code = (struct code *) where;
2355     ncode_words = fixnum_value(code->code_size);
2356     nheader_words = HeaderValue(object);
2357     nwords = ncode_words + nheader_words;
2358     nwords = CEILING(nwords, 2);
2359    
2360     /* Scavenge the boxed section of the code data block */
2361     scavenge(where + 1, nheader_words - 1);
2362    
2363 dtc 1.14 /*
2364     * Scavenge the boxed section of each function object in the code
2365     * data block
2366     */
2367 dtc 1.1 fheaderl = code->entry_points;
2368     while (fheaderl != NIL) {
2369     fheaderp = (struct function *) PTR(fheaderl);
2370     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2371 dtc 1.14
2372 dtc 1.1 scavenge(&fheaderp->name, 1);
2373     scavenge(&fheaderp->arglist, 1);
2374     scavenge(&fheaderp->type, 1);
2375 dtc 1.14
2376 dtc 1.1 fheaderl = fheaderp->next;
2377     }
2378 dtc 1.14
2379 dtc 1.1 return nwords;
2380     }
2381    
2382 dtc 1.14 static lispobj trans_code_header(lispobj object)
2383 dtc 1.1 {
2384     struct code *ncode;
2385    
2386     ncode = trans_code((struct code *) PTR(object));
2387     return (lispobj) ncode | type_OtherPointer;
2388     }
2389    
2390 dtc 1.14 static int size_code_header(lispobj *where)
2391 dtc 1.1 {
2392     struct code *code;
2393     int nheader_words, ncode_words, nwords;
2394    
2395     code = (struct code *) where;
2396 dtc 1.14
2397 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2398     nheader_words = HeaderValue(code->header);
2399     nwords = ncode_words + nheader_words;
2400     nwords = CEILING(nwords, 2);
2401    
2402     return nwords;
2403     }
2404    
2405    
2406 dtc 1.14 static int scav_return_pc_header(lispobj *where, lispobj object)
2407 dtc 1.1 {
2408     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2409     fprintf(stderr, "Return PC Header.\n");
2410     fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2411     (unsigned long) where, (unsigned long) object);
2412     lose(NULL);
2413     return 0;
2414     }
2415    
2416 dtc 1.14 static lispobj trans_return_pc_header(lispobj object)
2417 dtc 1.1 {
2418     struct function *return_pc;
2419     unsigned long offset;
2420     struct code *code, *ncode;
2421    
2422 dtc 1.14 fprintf(stderr, "*** trans_return_pc_header: will this work?\n");
2423    
2424 dtc 1.1 return_pc = (struct function *) PTR(object);
2425     offset = HeaderValue(return_pc->header) * 4;
2426    
2427     /* Transport the whole code object */
2428     code = (struct code *) ((unsigned long) return_pc - offset);
2429     ncode = trans_code(code);
2430 dtc 1.14
2431 dtc 1.1 return ((lispobj) ncode + offset) | type_OtherPointer;
2432     }
2433    
2434 dtc 1.14 /*
2435     * On the 386, closures hold a pointer to the raw address instead of
2436     * the function object.
2437     */
2438 dtc 1.1 #ifdef i386
2439 dtc 1.14 static int scav_closure_header(lispobj *where, lispobj object)
2440 dtc 1.1 {
2441     struct closure *closure;
2442     lispobj fun;
2443    
2444     closure = (struct closure *)where;
2445     fun = closure->function - RAW_ADDR_OFFSET;
2446     scavenge(&fun, 1);
2447     /* The function may have moved so update the raw address. But don't
2448     write unnecessarily. */
2449     if (closure->function != fun + RAW_ADDR_OFFSET)
2450     closure->function = fun + RAW_ADDR_OFFSET;
2451 dtc 1.14
2452 dtc 1.1 return 2;
2453     }
2454     #endif
2455    
2456 dtc 1.14 static int scav_function_header(lispobj *where, lispobj object)
2457 dtc 1.1 {
2458     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2459     fprintf(stderr, "Function Header.\n");
2460     fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2461     (unsigned long) where, (unsigned long) object);
2462     lose(NULL);
2463     return 0;
2464     }
2465    
2466 dtc 1.14 static lispobj trans_function_header(lispobj object)
2467 dtc 1.1 {
2468     struct function *fheader;
2469     unsigned long offset;
2470     struct code *code, *ncode;
2471 dtc 1.14
2472 dtc 1.1 fheader = (struct function *) PTR(object);
2473     offset = HeaderValue(fheader->header) * 4;
2474 dtc 1.14
2475 dtc 1.1 /* Transport the whole code object */
2476     code = (struct code *) ((unsigned long) fheader - offset);
2477     ncode = trans_code(code);
2478 dtc 1.14
2479 dtc 1.1 return ((lispobj) ncode + offset) | type_FunctionPointer;
2480     }
2481    
2482    
2483     /* Instances */
2484    
2485     #if DIRECT_SCAV
2486 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2487 dtc 1.1 {
2488     if (from_space_p(object)) {
2489     lispobj first, *first_pointer;
2490 dtc 1.14
2491     /*
2492     * object is a pointer into from space. check to see if it has
2493     * been forwarded
2494     */
2495 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2496     first = *first_pointer;
2497 dtc 1.14
2498 dtc 1.1 if (first == 0x01)
2499     /* Forwarded. */
2500     first = first_pointer[1];
2501     else {
2502     first = trans_boxed(object);
2503     gc_assert(first != object);
2504     /* Set forwarding pointer */
2505     first_pointer[0] = 0x01;
2506     first_pointer[1] = first;
2507     }
2508     *where = first;
2509     }
2510     return 1;
2511     }
2512     #else
2513 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2514 dtc 1.1 {
2515     lispobj copy, *first_pointer;
2516 dtc 1.14
2517 dtc 1.1 /* Object is a pointer into from space - not a FP */
2518     copy = trans_boxed(object);
2519    
2520     gc_assert(copy != object);
2521    
2522     first_pointer = (lispobj *) PTR(object);
2523 dtc 1.14
2524 dtc 1.1 /* Set forwarding pointer. */
2525     first_pointer[0] = 0x01;
2526     first_pointer[1] = copy;
2527     *where = copy;
2528    
2529     return 1;
2530     }
2531     #endif
2532    
2533    
2534     /* Lists and Conses */
2535    
2536     static lispobj trans_list(lispobj object);
2537    
2538     #if DIRECT_SCAV
2539 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2540 dtc 1.1 {
2541     gc_assert(Pointerp(object));
2542    
2543     if (from_space_p(object)) {
2544     lispobj first, *first_pointer;
2545 dtc 1.14
2546     /*
2547     * Object is a pointer into from space - check to see if it has
2548     * been forwarded.
2549     */
2550 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2551     first = *first_pointer;
2552 dtc 1.14
2553 dtc 1.1 if (first == 0x01)
2554     /* Forwarded. */
2555     first = first_pointer[1];
2556     else {
2557     first = trans_list(object);
2558 dtc 1.14
2559 dtc 1.1 /* Set forwarding pointer */
2560     first_pointer[0] = 0x01;
2561     first_pointer[1] = first;
2562     }
2563 dtc 1.14
2564 dtc 1.1 gc_assert(Pointerp(first));
2565     gc_assert(!from_space_p(first));
2566     *where = first;
2567     }
2568     return 1;
2569     }
2570     #else
2571 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2572 dtc 1.1 {
2573     lispobj first, *first_pointer;
2574    
2575     gc_assert(Pointerp(object));
2576    
2577     /* Object is a pointer into from space - not FP */
2578 dtc 1.14
2579 dtc 1.1 first = trans_list(object);
2580     gc_assert(first != object);
2581    
2582     first_pointer = (lispobj *) PTR(object);
2583    
2584     /* Set forwarding pointer */
2585     first_pointer[0] = 0x01;
2586     first_pointer[1] = first;
2587    
2588     gc_assert(Pointerp(first));
2589     gc_assert(!from_space_p(first));
2590     *where = first;
2591     return 1;
2592     }
2593     #endif
2594    
2595 dtc 1.14 static lispobj trans_list(lispobj object)
2596 dtc 1.1 {
2597     lispobj new_list_pointer;
2598     struct cons *cons, *new_cons;
2599     int n = 0;
2600     lispobj cdr;
2601    
2602     gc_assert(from_space_p(object));
2603    
2604     cons = (struct cons *) PTR(object);
2605 dtc 1.14
2606 dtc 1.1 /* copy 'object' */
2607     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2608     new_cons->car = cons->car;
2609     new_cons->cdr = cons->cdr; /* updated later */
2610 dtc 1.14 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2611 dtc 1.1
2612     /* Grab the cdr before it is clobbered */
2613     cdr = cons->cdr;
2614    
2615     /* Set forwarding pointer (clobbers start of list). */
2616     cons->car = 0x01;
2617     cons->cdr = new_list_pointer;
2618    
2619     /* Try to linearize the list in the cdr direction to help reduce paging. */
2620     while (1) {
2621     lispobj new_cdr;
2622     struct cons *cdr_cons, *new_cdr_cons;
2623 dtc 1.14
2624 dtc 1.1 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2625 dtc 1.14 || *((lispobj *) PTR(cdr)) == 0x01)
2626 dtc 1.1 break;
2627 dtc 1.14
2628 dtc 1.1 cdr_cons = (struct cons *) PTR(cdr);
2629 dtc 1.14
2630 dtc 1.1 /* copy 'cdr' */
2631     new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2632     new_cdr_cons->car = cdr_cons->car;
2633     new_cdr_cons->cdr = cdr_cons->cdr;
2634 dtc 1.14 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2635    
2636 dtc 1.1 /* Grab the cdr before it is clobbered */
2637     cdr = cdr_cons->cdr;
2638 dtc 1.14
2639 dtc 1.1 /* Set forwarding pointer */
2640     cdr_cons->car = 0x01;
2641     cdr_cons->cdr = new_cdr;
2642 dtc 1.14
2643     /*
2644     * Update the cdr of the last cons copied into new space to keep
2645     * the newspace scavenge from having to do it.
2646 dtc 1.1 */
2647     new_cons->cdr = new_cdr;
2648 dtc 1.14
2649 dtc 1.1 new_cons = new_cdr_cons;
2650     }
2651 dtc 1.14
2652 dtc 1.1 return new_list_pointer;
2653     }
2654    
2655    
2656     /* Scavenging and Transporting Other Pointers */
2657    
2658     #if DIRECT_SCAV
2659 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
2660 dtc 1.1 {
2661     gc_assert(Pointerp(object));
2662    
2663     if (from_space_p(object)) {
2664     lispobj first, *first_pointer;
2665 dtc 1.14
2666     /*
2667     * Object is a pointer into from space. check to see if it has
2668     * been forwarded.
2669     */
2670 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2671     first = *first_pointer;
2672 dtc 1.14
2673 dtc 1.1 if (first == 0x01) {
2674     /* Forwarded. */
2675     first = first_pointer[1];
2676     *where = first;
2677     } else {
2678     first = (transother[TypeOf(first)])(object);
2679 dtc 1.14
2680 dtc 1.1 if (first != object) {
2681     /* Set forwarding pointer */
2682     first_pointer[0] = 0x01;
2683     first_pointer[1] = first;
2684     *where = first;
2685     }
2686     }
2687 dtc 1.14
2688 dtc 1.1 gc_assert(Pointerp(first));
2689     gc_assert(!from_space_p(first));
2690     }
2691     return 1;
2692     }
2693     #else
2694 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
2695 dtc 1.1 {
2696     lispobj first, *first_pointer;
2697    
2698     gc_assert(Pointerp(object));
2699    
2700     /* Object is a pointer into from space - not FP */
2701     first_pointer = (lispobj *) PTR(object);
2702    
2703     first = (transother[TypeOf(*first_pointer)])(object);
2704    
2705     if (first != object) {
2706     /* Set forwarding pointer */
2707     first_pointer[0] = 0x01;
2708     first_pointer[1] = first;
2709     *where = first;
2710     }
2711 dtc 1.14
2712 dtc 1.1 gc_assert(Pointerp(first));
2713     gc_assert(!from_space_p(first));
2714    
2715     return 1;
2716     }
2717     #endif
2718    
2719    
2720     /* Immediate, Boxed, and Unboxed Objects */
2721    
2722 dtc 1.14 static int size_pointer(lispobj *where)
2723 dtc 1.1 {
2724     return 1;
2725     }
2726    
2727 dtc 1.14 static int scav_immediate(lispobj *where, lispobj object)
2728 dtc 1.1 {
2729     return 1;
2730     }
2731    
2732 dtc 1.14 static lispobj trans_immediate(lispobj object)
2733 dtc 1.1 {
2734     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2735     lose(NULL);
2736     return NIL;
2737     }
2738    
2739 dtc 1.14 static int size_immediate(lispobj *where)
2740 dtc 1.1 {
2741     return 1;
2742     }
2743    
2744    
2745 dtc 1.14 static int scav_boxed(lispobj *where, lispobj object)
2746 dtc 1.1 {
2747     return 1;
2748     }
2749    
2750 dtc 1.14 static lispobj trans_boxed(lispobj object)
2751 dtc 1.1 {
2752     lispobj header;
2753     unsigned long length;
2754    
2755     gc_assert(Pointerp(object));
2756    
2757     header = *((lispobj *) PTR(object));
2758     length = HeaderValue(header) + 1;
2759     length = CEILING(length, 2);
2760    
2761     return copy_object(object, length);
2762     }
2763    
2764 dtc 1.14 static lispobj trans_boxed_large(lispobj object)
2765 dtc 1.1 {
2766     lispobj header;
2767     unsigned long length;
2768    
2769     gc_assert(Pointerp(object));
2770    
2771     header = *((lispobj *) PTR(object));
2772     length = HeaderValue(header) + 1;
2773     length = CEILING(length, 2);
2774    
2775     return copy_large_object(object, length);
2776     }
2777    
2778 dtc 1.14 static int size_boxed(lispobj *where)
2779 dtc 1.1 {
2780     lispobj header;
2781     unsigned long length;
2782    
2783     header = *where;
2784     length = HeaderValue(header) + 1;
2785     length = CEILING(length, 2);
2786    
2787     return length;
2788     }
2789    
2790 dtc 1.14 static int scav_fdefn(lispobj *where, lispobj object)
2791 dtc 1.1 {
2792     struct fdefn *fdefn;
2793 dtc 1.14
2794 dtc 1.1 fdefn = (struct fdefn *)where;
2795 dtc 1.14
2796     if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2797     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
2798    
2799 dtc 1.1 /* Don't write unnecessarily */
2800     if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2801     fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2802 dtc 1.14
2803 dtc 1.1 return sizeof(struct fdefn) / sizeof(lispobj);
2804     }
2805     else
2806     return 1;
2807     }
2808    
2809 dtc 1.14 static int scav_unboxed(lispobj *where, lispobj object)
2810 dtc 1.1 {
2811     unsigned long length;
2812    
2813     length = HeaderValue(object) + 1;
2814     length = CEILING(length, 2);
2815    
2816     return length;
2817     }
2818    
2819 dtc 1.14 static lispobj trans_unboxed(lispobj object)
2820 dtc 1.1 {
2821     lispobj header;
2822     unsigned long length;
2823    
2824    
2825     gc_assert(Pointerp(object));
2826    
2827     header = *((lispobj *) PTR(object));
2828     length = HeaderValue(header) + 1;
2829     length = CEILING(length, 2);
2830    
2831     return copy_unboxed_object(object, length);
2832     }
2833    
2834 dtc 1.14 static lispobj trans_unboxed_large(lispobj object)
2835 dtc 1.1 {
2836     lispobj header;
2837     unsigned long length;
2838    
2839    
2840     gc_assert(Pointerp(object));
2841    
2842     header = *((lispobj *) PTR(object));
2843     length = HeaderValue(header) + 1;
2844     length = CEILING(length, 2);
2845    
2846     return copy_large_unboxed_object(object, length);
2847     }
2848    
2849 dtc 1.14 static int size_unboxed(lispobj *where)
2850 dtc 1.1 {
2851     lispobj header;
2852     unsigned long length;
2853    
2854     header = *where;
2855     length = HeaderValue(header) + 1;
2856     length = CEILING(length, 2);
2857    
2858     return length;
2859     }
2860    
2861    
2862     /* Vector-Like Objects */
2863    
2864     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2865    
2866 dtc 1.14 static int scav_string(lispobj *where, lispobj object)
2867 dtc 1.1 {
2868     struct vector *vector;
2869     int length, nwords;
2870    
2871 dtc 1.14 /*
2872     * NOTE: Strings contain one more byte of data than the length
2873     * slot indicates.
2874     */
2875 dtc 1.1
2876     vector = (struct vector *) where;
2877     length = fixnum_value(vector->length) + 1;
2878     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2879    
2880     return nwords;
2881     }
2882    
2883 dtc 1.14 static lispobj trans_string(lispobj object)
2884 dtc 1.1 {
2885     struct vector *vector;
2886     int length, nwords;
2887    
2888     gc_assert(Pointerp(object));
2889    
2890 dtc 1.14 /*
2891     * NOTE: Strings contain one more byte of data than the length
2892     * slot indicates.
2893     */
2894 dtc 1.1
2895     vector = (struct vector *) PTR(object);
2896     length = fixnum_value(vector->length) + 1;
2897     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2898    
2899     return copy_large_unboxed_object(object, nwords);
2900     }
2901    
2902 dtc 1.14 static int size_string(lispobj *where)
2903 dtc 1.1 {
2904     struct vector *vector;
2905     int length, nwords;
2906    
2907 dtc 1.14 /*
2908     * NOTE: Strings contain one more byte of data than the length
2909     * slot indicates.
2910     */
2911 dtc 1.1
2912     vector = (struct vector *) where;
2913     length = fixnum_value(vector->length) + 1;
2914     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2915    
2916     return nwords;
2917     }
2918    
2919 dtc 1.14 #if 0
2920     static int scav_vector(lispobj *where, lispobj object)
2921 dtc 1.1 {
2922     if (HeaderValue(object) == subtype_VectorValidHashing)
2923     *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2924    
2925     return 1;
2926 dtc 1.14 }
2927     #endif
2928 dtc 1.1
2929     int gencgc_hash = 1;
2930    
2931 dtc 1.14 static int scav_vector(lispobj *where, lispobj object)
2932 dtc 1.1 {
2933     unsigned int kv_length;
2934     lispobj *kv_vector;
2935     unsigned int length;
2936     lispobj *hash_table;
2937     lispobj empty_symbol;
2938     unsigned int *index_vector, *next_vector, *hash_vector;
2939     lispobj weak_p_obj;
2940     unsigned next_vector_length;
2941    
2942     if (HeaderValue(object) != subtype_VectorValidHashing)
2943     return 1;
2944    
2945     if (!gencgc_hash) {
2946     /* Set for backward compatibility. */
2947     *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2948     return 1;
2949     }
2950    
2951     kv_length = fixnum_value(where[1]);
2952     kv_vector = where+2; /* Skip the header and length. */
2953 dtc 1.14 #if 0
2954     fprintf(stderr, "* kv_length = %d\n", kv_length);
2955     #endif
2956 dtc 1.1
2957     /* Scavenge element 0 which may be a hash-table structure. */
2958 dtc 1.14 scavenge(where + 2, 1);
2959 dtc 1.1 if (!Pointerp(where[2])) {
2960 dtc 1.14 fprintf(stderr, "* Not hash table pointer? %x\n", where[2]);
2961 dtc 1.1 return 3;
2962     }
2963 dtc 1.14 hash_table = (lispobj *) PTR(where[2]);
2964     #if 0
2965     fprintf(stderr, "* hash_table = %x\n", hash_table);
2966     #endif
2967     if (TypeOf(hash_table[0]) != type_InstanceHeader) {
2968     fprintf(stderr, "* Hash table not instance? %x\n", hash_table[0]);
2969 dtc 1.1 return 3;
2970     }
2971 dtc 1.14
2972 dtc 1.1 /* Scavenge element 1 which should be an :empty symbol. */
2973 dtc 1.14 scavenge(where + 3, 1);
2974 dtc 1.1 if (!Pointerp(where[3])) {
2975 dtc 1.14 fprintf(stderr, "* Not :empty symbol pointer? %x\n", where[3]);
2976 dtc 1.1 return 4;
2977     }
2978     empty_symbol = where[3];
2979 dtc 1.14 #if 0
2980     fprintf(stderr, "* empty_symbol = %x\n", empty_symbol);
2981     #endif
2982     if (TypeOf(*(lispobj *) PTR(empty_symbol)) != type_SymbolHeader) {
2983     fprintf(stderr, "* empty symbol not symbol? %x\n",
2984     *(lispobj *) PTR(empty_symbol));
2985 dtc 1.1 return 4;
2986     }
2987    
2988 dtc 1.14 /*
2989     * Scavenge hash table which will fix the positions of the other
2990     * needed objects.
2991     */
2992 dtc 1.1 scavenge(hash_table,16);
2993    
2994     /* Cross check the kv_vector. */
2995 dtc 1.14 if (where != (lispobj *) PTR(hash_table[9])) {
2996     fprintf(stderr, "* hash_table table!=this table? %x\n", hash_table[9]);
2997 dtc 1.1 return 4;
2998     }
2999    
3000     /* Weak-p */
3001     weak_p_obj = hash_table[10];
3002 dtc 1.14 #if 0
3003     fprintf(stderr, "* weak-p = %x\n", weak_p_obj);
3004     #endif
3005 dtc 1.1
3006     /* Index vector */
3007     {
3008     lispobj index_vector_obj = hash_table[13];
3009    
3010     if (Pointerp(index_vector_obj) &&
3011 dtc 1.14 TypeOf(*(lispobj *) PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3012     index_vector = (unsigned int *) PTR(index_vector_obj) + 2;
3013     #if 0
3014     fprintf(stderr, "* index_vector = %x\n", index_vector);
3015     #endif
3016     length = fixnum_value(((unsigned int *) PTR(index_vector_obj))[1]);
3017     #if 0
3018     fprintf(stderr, "* length = %d\n", length);
3019     #endif
3020 dtc 1.1 } else {
3021 dtc 1.14 fprintf(stderr, "* invalid index_vector? %x\n", index_vector_obj);
3022 dtc 1.1 return 4;
3023     }
3024     }
3025    
3026     /* Next vector */
3027     {
3028     lispobj next_vector_obj = hash_table[14];
3029    
3030     if (Pointerp(next_vector_obj) &&
3031 dtc 1.14 TypeOf(*(lispobj *) PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3032     next_vector = (unsigned int *) PTR(next_vector_obj) + 2;
3033     #if 0
3034     fprintf(stderr, "* next_vector = %x\n", next_vector);
3035     #endif
3036     next_vector_length = fixnum_value(((unsigned int *) PTR(next_vector_obj))[1]);
3037     #if 0
3038     fprintf(stderr, "* next_vector_length = %d\n", next_vector_length);
3039     #endif
3040 dtc 1.1 } else {
3041 dtc 1.14 fprintf(stderr, "* invalid next_vector? %x\n", next_vector_obj);
3042 dtc 1.1 return 4;
3043     }
3044     }
3045    
3046     /* Maybe Hash vector */
3047     {
3048     lispobj hash_vector_obj = hash_table[15];
3049    
3050     if (Pointerp(hash_vector_obj) &&
3051 dtc 1.14 TypeOf(*(lispobj *) PTR(hash_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3052     hash_vector = (unsigned int *) PTR(hash_vector_obj) + 2;
3053     #if 0
3054     fprintf(stderr, "* hash_vector = %x\n", hash_vector);
3055     #endif
3056     gc_assert(fixnum_value(((unsigned int *) PTR(hash_vector_obj))[1])
3057 dtc 1.1 == next_vector_length);
3058     } else {
3059     hash_vector = NULL;
3060 dtc 1.14 #if 0
3061     fprintf(stderr, "* No hash_vector: %x\n", hash_vector_obj);
3062     #endif
3063 dtc 1.1 }
3064     }
3065    
3066 dtc 1.14 /*
3067     * These lengths could be different as the index_vector can be a
3068     * different length to the others, a larger index_vector could help
3069     * reduce collisions.
3070     */
3071     gc_assert(next_vector_length * 2 == kv_length);
3072 dtc 1.1
3073     /* Now all setup */
3074 dtc 1.14
3075 dtc 1.1 /* Work through the KV vector */
3076     {
3077     int i;
3078     for (i = 1; i < next_vector_length; i++) {
3079 dtc 1.14 lispobj old_key = kv_vector[2 * i];
3080     unsigned int old_index = (old_key & 0x1fffffff) % length;
3081    
3082 dtc 1.1 /* Scavenge the Key and Value */
3083 dtc 1.14 scavenge(&kv_vector[2 * i], 2);
3084    
3085 dtc 1.1 /* Check if the Key has moved and is EQ based */
3086     {
3087 dtc 1.14 lispobj new_key = kv_vector[2 * i];
3088     unsigned int new_index = (new_key & 0x1fffffff) % length;
3089    
3090     if (old_index != new_index &&
3091     (!hash_vector || hash_vector[i] == 0x80000000) &&
3092     (new_key != empty_symbol || kv_vector[2 * i] != empty_symbol)) {
3093    
3094     #if 0
3095     fprintf(stderr, "* EQ key %d moved from %x to %x; index %d to %d\n",
3096     i, old_key, new_key, old_index, new_index);
3097     #endif
3098 dtc 1.1
3099     if (index_vector[old_index] != 0) {
3100 dtc 1.14 #if 0
3101     fprintf(stderr, "*P1 %d\n", index_vector[old_index]);
3102     #endif
3103    
3104 dtc 1.1 /* Unlink the key from the old_index chain. */
3105     if (index_vector[old_index] == i) {
3106 dtc 1.14 #if 0
3107     fprintf(stderr, "*P2a %d\n", next_vector[i]);
3108     #endif
3109 dtc 1.1 index_vector[old_index] = next_vector[i];
3110     /* Link it into the needing rehash chain. */
3111     next_vector[i] = fixnum_value(hash_table[11]);
3112     hash_table[11] = make_fixnum(i);
3113 dtc 1.14 #if 0
3114     fprintf(stderr, "*P2\n");
3115     #endif
3116 dtc 1.1 } else {
3117     unsigned prior = index_vector[old_index];
3118     unsigned next = next_vector[prior];
3119    
3120 dtc 1.14 #if 0
3121     fprintf(stderr, "*P3a %d %d\n", prior, next);
3122     #endif
3123    
3124 dtc 1.1 while (next != 0) {
3125 dtc 1.14 #if 0
3126     fprintf(stderr, "*P3b %d %d\n", prior, next);
3127     #endif
3128 dtc 1.1 if (next == i) {
3129     /* Unlink it */
3130     next_vector[prior] = next_vector[next];
3131     /* Link it into the needing rehash chain. */
3132     next_vector[next] = fixnum_value(hash_table[11]);
3133     hash_table[11] = make_fixnum(next);
3134 dtc 1.14 #if 0
3135     fprintf(stderr, "*P3\n");
3136     #endif
3137 dtc 1.1 break;
3138     }
3139     prior = next;
3140     next = next_vector[next];
3141     };
3142     }
3143     }
3144     }
3145     }
3146     }
3147     }
3148 dtc 1.14 return CEILING(kv_length + 2, 2);
3149 dtc 1.1 }
3150    
3151    
3152 dtc 1.14 static lispobj trans_vector(lispobj object)
3153 dtc 1.1 {
3154     struct vector *vector;
3155     int length, nwords;
3156    
3157     gc_assert(Pointerp(object));
3158    
3159     vector = (struct vector *) PTR(object);
3160    
3161     length = fixnum_value(vector->length);
3162     nwords = CEILING(length + 2, 2);
3163    
3164     return copy_large_object(object, nwords);
3165     }
3166    
3167 dtc 1.14 static int size_vector(lispobj *where)
3168 dtc 1.1 {
3169     struct vector *vector;
3170     int length, nwords;
3171    
3172     vector = (struct vector *) where;
3173     length = fixnum_value(vector->length);
3174     nwords = CEILING(length + 2, 2);
3175    
3176     return nwords;
3177     }
3178    
3179    
3180 dtc 1.14 static int scav_vector_bit(lispobj *where, lispobj object)
3181 dtc 1.1 {
3182     struct vector *vector;
3183     int length, nwords;
3184    
3185     vector = (struct vector *) where;
3186     length = fixnum_value(vector->length);
3187     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3188    
3189     return nwords;
3190     }
3191    
3192 dtc 1.14 static lispobj trans_vector_bit(lispobj object)
3193 dtc 1.1 {
3194     struct vector *vector;
3195     int length, nwords;
3196    
3197     gc_assert(Pointerp(object));
3198    
3199     vector = (struct vector *) PTR(object);
3200     length = fixnum_value(vector->length);
3201     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3202    
3203     return copy_large_unboxed_object(object, nwords);
3204     }
3205    
3206 dtc 1.14 static int size_vector_bit(lispobj *where)
3207 dtc 1.1 {
3208     struct vector *vector;
3209     int length, nwords;
3210    
3211     vector = (struct vector *) where;
3212     length = fixnum_value(vector->length);
3213     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3214    
3215     return nwords;
3216     }
3217    
3218    
3219 dtc 1.14 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3220 dtc 1.1 {
3221     struct vector *vector;
3222     int length, nwords;
3223    
3224     vector = (struct vector *) where;
3225     length = fixnum_value(vector->length);
3226     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3227    
3228     return nwords;
3229     }
3230    
3231 dtc 1.14 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3232 dtc 1.1 {
3233     struct vector *vector;
3234     int length, nwords;
3235    
3236     gc_assert(Pointerp(object));
3237    
3238     vector = (struct vector *) PTR(object);
3239     length = fixnum_value(vector->length);
3240     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3241    
3242     return copy_large_unboxed_object(object, nwords);
3243     }
3244    
3245 dtc 1.14 static int size_vector_unsigned_byte_2(lispobj *where)
3246 dtc 1.1 {
3247     struct vector *vector;
3248     int length, nwords;
3249    
3250     vector = (struct vector *) where;
3251     length = fixnum_value(vector->length);
3252     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3253    
3254     return nwords;
3255     }
3256    
3257    
3258 dtc 1.14 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3259 dtc 1.1 {
3260     struct vector *vector;
3261     int length, nwords;
3262    
3263     vector = (struct vector *) where;
3264     length = fixnum_value(vector->length);
3265     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3266    
3267     return nwords;
3268     }
3269    
3270 dtc 1.14 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3271 dtc 1.1 {
3272     struct vector *vector;
3273     int length, nwords;
3274    
3275     gc_assert(Pointerp(object));
3276    
3277     vector = (struct vector *) PTR(object);
3278     length = fixnum_value(vector->length);
3279     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3280    
3281     return copy_large_unboxed_object(object, nwords);
3282     }
3283    
3284 dtc 1.14 static int size_vector_unsigned_byte_4(lispobj *where)
3285 dtc 1.1 {
3286     struct vector *vector;
3287     int length, nwords;
3288    
3289     vector = (struct vector *) where;
3290     length = fixnum_value(vector->length);
3291     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3292    
3293     return nwords;
3294     }
3295    
3296    
3297 dtc 1.14 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3298 dtc 1.1 {
3299     struct vector *vector;
3300     int length, nwords;
3301    
3302     vector = (struct vector *) where;
3303     length = fixnum_value(vector->length);
3304     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3305    
3306     return nwords;
3307     }
3308    
3309 dtc 1.14 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3310 dtc 1.1 {
3311     struct vector *vector;
3312     int length, nwords;
3313    
3314     gc_assert(Pointerp(object));
3315    
3316     vector = (struct vector *) PTR(object);
3317     length = fixnum_value(vector->length);
3318     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3319    
3320     return copy_large_unboxed_object(object, nwords);
3321     }
3322    
3323 dtc 1.14 static int size_vector_unsigned_byte_8(lispobj *where)
3324 dtc 1.1 {
3325     struct vector *vector;
3326     int length, nwords;
3327    
3328     vector = (struct vector *) where;
3329     length = fixnum_value(vector->length);
3330     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3331    
3332     return nwords;
3333     }
3334    
3335    
3336 dtc 1.14 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3337 dtc 1.1 {
3338     struct vector *vector;
3339     int length, nwords;
3340    
3341     vector = (struct vector *) where;
3342     length = fixnum_value(vector->length);
3343     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3344    
3345     return nwords;
3346     }
3347    
3348 dtc 1.14 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3349 dtc 1.1 {
3350     struct vector *vector;
3351     int length, nwords;
3352    
3353     gc_assert(Pointerp(object));
3354    
3355     vector = (struct vector *) PTR(object);
3356     length = fixnum_value(vector->length);
3357     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3358    
3359     return copy_large_unboxed_object(object, nwords);
3360     }
3361    
3362 dtc 1.14 static int size_vector_unsigned_byte_16(lispobj *where)
3363 dtc 1.1 {
3364     struct vector *vector;
3365     int length, nwords;
3366    
3367     vector = (struct vector *) where;
3368     length = fixnum_value(vector->length);
3369     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3370    
3371     return nwords;
3372     }
3373    
3374    
3375 dtc 1.14 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3376 dtc 1.1 {
3377     struct vector *vector;
3378     int length, nwords;
3379    
3380     vector = (struct vector *) where;
3381     length = fixnum_value(vector->length);
3382     nwords = CEILING(length + 2, 2);
3383    
3384     return nwords;
3385     }
3386    
3387 dtc 1.14 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3388 dtc 1.1 {
3389     struct vector *vector;
3390     int length, nwords;
3391    
3392     gc_assert(Pointerp(object));
3393    
3394     vector = (struct vector *) PTR(object);
3395     length = fixnum_value(vector->length);
3396     nwords = CEILING(length + 2, 2);
3397    
3398     return copy_large_unboxed_object(object, nwords);
3399     }
3400    
3401 dtc 1.14 static int size_vector_unsigned_byte_32(lispobj *where)
3402 dtc 1.1 {
3403     struct vector *vector;
3404     int length, nwords;
3405    
3406     vector = (struct vector *) where;
3407     length = fixnum_value(vector->length);
3408     nwords = CEILING(length + 2, 2);
3409    
3410     return nwords;
3411     }
3412    
3413    
3414 dtc 1.14 static int scav_vector_single_float(lispobj *where, lispobj object)
3415 dtc 1.1 {
3416     struct vector *vector;
3417     int length, nwords;
3418    
3419     vector = (struct vector *) where;
3420     length = fixnum_value(vector->length);
3421     nwords = CEILING(length + 2, 2);
3422    
3423     return nwords;
3424     }
3425    
3426 dtc 1.14 static lispobj trans_vector_single_float(lispobj object)
3427 dtc 1.1 {
3428     struct vector *vector;
3429     int length, nwords;
3430    
3431     gc_assert(Pointerp(object));
3432    
3433     vector = (struct vector *) PTR(object);
3434     length = fixnum_value(vector->length);
3435     nwords = CEILING(length + 2, 2);
3436    
3437     return copy_large_unboxed_object(object, nwords);
3438     }
3439    
3440 dtc 1.14 static int size_vector_single_float(lispobj *where)
3441 dtc 1.1 {
3442     struct vector *vector;
3443     int length, nwords;
3444    
3445     vector = (struct vector *) where;
3446     length = fixnum_value(vector->length);
3447     nwords = CEILING(length + 2, 2);
3448    
3449     return nwords;
3450     }
3451    
3452    
3453 dtc 1.14 static int scav_vector_double_float(lispobj *where, lispobj object)
3454 dtc 1.1 {
3455     struct vector *vector;
3456     int length, nwords;
3457    
3458     vector = (struct vector *) where;
3459     length = fixnum_value(vector->length);
3460     nwords = CEILING(length * 2 + 2, 2);
3461    
3462     return nwords;
3463     }
3464    
3465 dtc 1.14 static lispobj trans_vector_double_float(lispobj object)
3466 dtc 1.1 {
3467     struct vector *vector;
3468     int length, nwords;
3469    
3470     gc_assert(Pointerp(object));
3471    
3472     vector = (struct vector *) PTR(object);
3473     length = fixnum_value(vector->length);
3474     nwords = CEILING(length * 2 + 2, 2);
3475    
3476     return copy_large_unboxed_object(object, nwords);
3477     }
3478    
3479 dtc 1.14 static int size_vector_double_float(lispobj *where)
3480 dtc 1.1 {
3481     struct vector *vector;
3482     int length, nwords;
3483    
3484     vector = (struct vector *) where;
3485     length = fixnum_value(vector->length);
3486     nwords = CEILING(length * 2 + 2, 2);
3487    
3488     return nwords;
3489     }
3490    
3491    
3492 dtc 1.11 #ifdef type_SimpleArrayLongFloat
3493 dtc 1.14 static int scav_vector_long_float(lispobj *where, lispobj object)
3494 dtc 1.11 {
3495     struct vector *vector;
3496     int length, nwords;
3497    
3498     vector = (struct vector *) where;
3499     length = fixnum_value(vector->length);
3500     nwords = CEILING(length * 3 + 2, 2);
3501    
3502     return nwords;
3503     }
3504    
3505 dtc 1.14 static lispobj trans_vector_long_float(lispobj object)
3506 dtc 1.11 {
3507     struct vector *vector;
3508     int length, nwords;
3509    
3510     gc_assert(Pointerp(object));
3511    
3512     vector = (struct vector *) PTR(object);
3513     length = fixnum_value(vector->length);
3514     nwords = CEILING(length * 3 + 2, 2);
3515    
3516     return copy_large_unboxed_object(object, nwords);
3517     }
3518    
3519 dtc 1.14 static int size_vector_long_float(lispobj *where)
3520 dtc 1.11 {
3521     struct vector *vector;
3522     int length, nwords;
3523    
3524     vector = (struct vector *) where;
3525     length = fixnum_value(vector->length);
3526     nwords = CEILING(length * 3 + 2, 2);
3527    
3528     return nwords;
3529     }
3530     #endif
3531    
3532    
3533 dtc 1.1 #ifdef type_SimpleArrayComplexSingleFloat
3534 dtc 1.14 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3535 dtc 1.1 {
3536     struct vector *vector;
3537     int length, nwords;
3538    
3539     vector = (struct vector *) where;
3540     length = fixnum_value(vector->length);
3541     nwords = CEILING(length * 2 + 2, 2);
3542    
3543     return nwords;
3544     }
3545    
3546 dtc 1.14 static lispobj trans_vector_complex_single_float(lispobj object)
3547 dtc 1.1 {
3548     struct vector *vector;
3549     int length, nwords;
3550    
3551     gc_assert(Pointerp(object));
3552    
3553     vector = (struct vector *) PTR(object);
3554     length = fixnum_value(vector->length);
3555     nwords = CEILING(length * 2 + 2, 2);
3556    
3557     return copy_large_unboxed_object(object, nwords);
3558     }
3559    
3560 dtc 1.14 static int size_vector_complex_single_float(lispobj *where)
3561 dtc 1.1 {
3562     struct vector *vector;
3563     int length, nwords;
3564    
3565     vector = (struct vector *) where;
3566     length = fixnum_value(vector->length);
3567     nwords = CEILING(length * 2 + 2, 2);
3568    
3569     return nwords;
3570     }
3571     #endif
3572    
3573     #ifdef type_SimpleArrayComplexDoubleFloat
3574 dtc 1.14 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3575 dtc 1.1 {
3576     struct vector *vector;
3577     int length, nwords;
3578    
3579     vector = (struct vector *) where;
3580     length = fixnum_value(vector->length);
3581     nwords = CEILING(length * 4 + 2, 2);
3582    
3583     return nwords;
3584     }
3585    
3586 dtc 1.14 static lispobj trans_vector_complex_double_float(lispobj object)
3587 dtc 1.1 {
3588     struct vector *vector;
3589     int length, nwords;
3590    
3591     gc_assert(Pointerp(object));
3592    
3593     vector = (struct vector *) PTR(object);
3594     length = fixnum_value(vector->length);
3595     nwords = CEILING(length * 4 + 2, 2);
3596    
3597     return copy_large_unboxed_object(object, nwords);
3598     }
3599    
3600 dtc 1.14 static int size_vector_complex_double_float(lispobj *where)
3601 dtc 1.1 {
3602     struct vector *vector;
3603     int length, nwords;
3604    
3605     vector = (struct vector *) where;
3606     length = fixnum_value(vector->length);
3607     nwords = CEILING(length * 4 + 2, 2);
3608    
3609     return nwords;
3610     }
3611     #endif
3612    
3613 dtc 1.11
3614     #ifdef type_SimpleArrayComplexLongFloat
3615 dtc 1.14 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
3616 dtc 1.11 {
3617     struct vector *vector;
3618     int length, nwords;
3619    
3620     vector = (struct vector *) where;
3621     length = fixnum_value(vector->length);
3622     nwords = CEILING(length * 6 + 2, 2);
3623    
3624     return nwords;
3625     }
3626    
3627 dtc 1.14 static lispobj trans_vector_complex_long_float(lispobj object)
3628 dtc 1.11 {
3629     struct vector *vector;
3630     int length, nwords;
3631    
3632     gc_assert(Pointerp(object));
3633    
3634     vector = (struct vector *) PTR(object);
3635     length = fixnum_value(vector->length);
3636     nwords = CEILING(length * 6 + 2, 2);
3637    
3638     return copy_large_unboxed_object(object, nwords);
3639     }
3640    
3641 dtc 1.14 static int size_vector_complex_long_float(lispobj *where)
3642 dtc 1.11 {
3643     struct vector *vector;
3644     int length, nwords;
3645    
3646     vector = (struct vector *) where;
3647     length = fixnum_value(vector->length);
3648     nwords = CEILING(length * 6 + 2, 2);
3649    
3650     return nwords;
3651     }
3652     #endif
3653    
3654 dtc 1.1
3655     /* Weak Pointers */
3656    
3657 dtc 1.14 /*
3658     * XX Hack adapted from cgc.c; These don't work too well with the
3659     * gencgc as a list of the weak pointers is maintained within the
3660     * objects which causes writes to the pages. A limited attempt is made
3661     * to avoid unnecessary writes, but this needs a re-think.
3662     */
3663 dtc 1.1
3664     #define WEAK_POINTER_NWORDS \
3665     CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3666    
3667 dtc 1.14 static int scav_weak_pointer(lispobj *where, lispobj object)
3668 dtc 1.1 {
3669     struct weak_pointer *wp = weak_pointers;
3670 dtc 1.14 /*
3671     * Push the weak pointer onto the list of weak pointers. Do I have
3672     * to watch for duplicates? Originally this was part of trans_weak_pointer
3673     * but that didn't work in the case where the WP was in a promoted region.
3674 dtc 1.1 */
3675    
3676     /* Check if it's already in the list. */
3677     while(wp != NULL) {
3678 dtc 1.14 if(wp == (struct weak_pointer*) where)
3679 dtc 1.1 break;
3680     wp = wp->next;
3681     }
3682     if(wp == NULL) {
3683     /* Add it to the start of the list. */
3684 dtc 1.14 wp = (struct weak_pointer*) where;
3685 dtc 1.1 if (wp->next != weak_pointers)
3686     wp->next = weak_pointers;
3687 dtc 1.14 #if 0
3688     else
3689     fprintf(stderr, "Avoided write to weak pointer.\n");
3690     #endif
3691 dtc 1.1 weak_pointers = wp;
3692     }
3693    
3694 dtc 1.14 /*
3695     * Do not let GC scavenge the value slot of the weak pointer (that
3696     * is why it is a weak pointer).
3697 dtc 1.1 */
3698    
3699     return WEAK_POINTER_NWORDS;
3700     }
3701    
3702 dtc 1.14 static lispobj trans_weak_pointer(lispobj object)
3703 dtc 1.1 {
3704     lispobj copy;
3705     struct weak_pointer *wp;
3706 dtc 1.14
3707 dtc 1.1 gc_assert(Pointerp(object));
3708 dtc 1.14
3709 dtc 1.1 #if defined(DEBUG_WEAK)
3710     printf("Transporting weak pointer from 0x%08x\n", object);
3711     #endif
3712 dtc 1.14
3713     /*
3714     * Need to remember where all the weak pointers are that have been
3715     * transported so they can be fixed up in a post-GC pass.
3716     */
3717    
3718 dtc 1.1 copy = copy_object(object, WEAK_POINTER_NWORDS);
3719 dtc 1.14 #if 0
3720     wp = (struct weak_pointer *) PTR(copy);
3721     #endif
3722    
3723 dtc 1.1 /* Push the weak pointer onto the list of weak pointers. */
3724 dtc 1.14 #if 0
3725     wp->next = weak_pointers;
3726     weak_pointers = wp;
3727     #endif
3728    
3729 dtc 1.1 return copy;
3730     }
3731    
3732 dtc 1.14 static int size_weak_pointer(lispobj *where)
3733 dtc 1.1 {
3734 dtc 1.14 return WEAK_POINTER_NWORDS;
3735 dtc 1.1 }
3736    
3737     void scan_weak_pointers(void)
3738     {
3739     struct weak_pointer *wp;
3740     for (wp = weak_pointers; wp != NULL; wp = wp->next) {
3741     lispobj value = wp->value;
3742     lispobj first, *first_pointer;
3743 dtc 1.14
3744     first_pointer = (lispobj *) PTR(value);
3745    
3746     #if 0
3747     fprintf(stderr, "Weak pointer at 0x%08x\n", (unsigned long) wp);
3748     fprintf(stderr, "Value: 0x%08x\n", (unsigned long) value);
3749     #endif
3750 dtc 1.1
3751     if (Pointerp(value) && from_space_p(value)) {
3752 dtc 1.14 /*
3753     * Now, we need to check if the object has been forwarded. If
3754 dtc 1.1 * it has been, the weak pointer is still good and needs to be
3755 dtc 1.14 * updated. Otherwise, the weak pointer needs to be nil'ed out.
3756     */
3757    
3758 dtc 1.1 if (first_pointer[0] == 0x01)
3759     wp->value = first_pointer[1];
3760     else {
3761 dtc 1.14 /* Break it */
3762     #if 0
3763     fprintf(stderr, "Broken.\n");
3764     #endif
3765 dtc 1.1 wp->value = NIL;
3766     wp->broken = T;
3767     }
3768     }
3769     }
3770     }
3771    
3772    
3773     /* Scavenged Hooks */
3774    
3775     #define SCAVENGER_HOOK_NWORDS \
3776     CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3777    
3778 dtc 1.14 static int scav_scavenger_hook(lispobj *where, lispobj object)
3779 dtc 1.1 {
3780 dtc 1.14 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
3781 dtc 1.5 lispobj old_value = scav_hook->value;
3782 dtc 1.1
3783 dtc 1.14 #if 0
3784     fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
3785     #endif
3786 dtc 1.1
3787     /* Scavenge the value */
3788 dtc 1.14 scavenge(where + 1, 1);
3789 dtc 1.1
3790 dtc 1.5 if (scav_hook->value != old_value) {
3791     /* Value object has moved */
3792 dtc 1.14 #if 0
3793     fprintf(stderr, " value object moved to %x\n", scav_hook->value);
3794     #endif
3795    
3796 dtc 1.5 /* Check if this hook is already noted. */
3797 dtc 1.14 #if 0
3798     fprintf(stderr, " next=%x sh hooks=%x\n",
3799     scav_hook->next, scavenger_hooks);
3800     #endif
3801 dtc