/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.32 - (hide annotations)
Thu Mar 27 12:42:10 2003 UTC (11 years, 1 month ago) by gerd
Branch: MAIN
CVS Tags: remove_negative_zero_not_zero, dynamic-extent-base, sparc_gencgc
Branch point for: sparc_gencgc_branch, dynamic-extent
Changes since 1.31: +468 -422 lines
File MIME type: text/plain
	* code/hash-new.lisp (make-hash-table): Put warning about
	creating weak tables in #-gencgc.

	* lisp/gencgc.c (gc_assert): Ensure macro expansion is always a C
	statement.
	(struct hash_table): New struct.
	(HASH_TABLE_SIZE, EQ_HASH): New macros.
	(weak_hash_tables): New variable.
	(survives_gc, u32_vector, free_hash_entry, record_for_rehashing)
	(scav_hash_entries, scav_weak_entries, scan_weak_tables)
	(scav_hash_vector): New functions.
	(scav_vector): Removed.
	(scav_weak_pointer, trans_weak_pointer, scan_weak_pointers):
	Cleaned up.
	(gc_init_tables): Use scav_hash_vector instead of scav_vector.
	(garbage_collect_generation): Call scan_weak_tables.
	(everywhere): Add casts, change format strings, etc. to placate
	the compiler.

	* lisp/FreeBSD-os.h: Fix a function prototype.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 gerd 1.32 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.32 2003/03/27 12:42:10 gerd Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15 gerd 1.32 #include <stdlib.h>
16 dtc 1.1 #include <signal.h>
17     #include "lisp.h"
18 dtc 1.24 #include "arch.h"
19 dtc 1.1 #include "internals.h"
20     #include "os.h"
21     #include "globals.h"
22     #include "interrupt.h"
23     #include "validate.h"
24     #include "lispregs.h"
25 dtc 1.24 #include "interr.h"
26 dtc 1.1 #include "gencgc.h"
27    
28     #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
29     __FILE__, __LINE__)
30    
31 dtc 1.3 #if 0
32 dtc 1.1 #define gc_assert(ex) do { \
33     if (!(ex)) gc_abort(); \
34     } while (0)
35     #else
36 gerd 1.32 #define gc_assert(ex) (void) 0
37 dtc 1.1 #endif
38    
39    
40 dtc 1.14 /*
41     * The number of generations, an extra is added to this for use as a temp.
42     */
43 dtc 1.1 #define NUM_GENERATIONS 6
44    
45     /* Debugging variables. */
46    
47 dtc 1.14 /*
48     * The verbose level. All non-error messages are disabled at level 0;
49     * and only a few rare messages are printed at level 1.
50     */
51 dtc 1.6 unsigned gencgc_verbose = 0;
52 cracauer 1.28 unsigned counters_verbose = 0;
53 dtc 1.1
54 dtc 1.14 /*
55     * To enable the use of page protection to help avoid the scavenging
56     * of pages that don't have pointers to younger generations.
57     */
58 pmai 1.26 #ifdef __NetBSD__
59 cracauer 1.28
60 pmai 1.26 /* NetBSD on x86 has no way to retrieve the faulting address in the
61     * SIGSEGV handler, so for the moment we can't use page protection. */
62     boolean enable_page_protection = FALSE;
63 cracauer 1.28 #else /* Netbsd */
64 dtc 1.1 boolean enable_page_protection = TRUE;
65 cracauer 1.28 #endif /* Netbsd */
66 dtc 1.1
67 dtc 1.14 /*
68     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
69     * Set to NUM_GENERATIONS to disable.
70     */
71 dtc 1.3 int verify_gens = NUM_GENERATIONS;
72 dtc 1.1
73 dtc 1.14 /*
74     * Enable a pre-scan verify of generation 0 before it's GCed.
75     */
76 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
77    
78 dtc 1.3 /*
79 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
80 dtc 1.3 */
81     boolean verify_after_free_heap = FALSE;
82    
83 dtc 1.14 /*
84     * Enable the printing of a note when code objects are found in the
85     * dynamic space during a heap verify.
86     */
87 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
88    
89 dtc 1.14 /*
90     * Enable the checking of code objects for fixup errors after they are
91     * transported.
92     */
93 dtc 1.3 boolean check_code_fixups = FALSE;
94 dtc 1.1
95 dtc 1.14 /*
96     * To enable unmapping of a page and re-mmaping it to have it zero filled.
97 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
98     * don't unmap.
99 dtc 1.14 */
100 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
101 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
102     #else
103     boolean gencgc_unmap_zero = TRUE;
104     #endif
105    
106 dtc 1.14 /*
107     * Enable checking that newly allocated regions are zero filled.
108     */
109 dtc 1.1 boolean gencgc_zero_check = FALSE;
110    
111 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
112    
113 dtc 1.3 /*
114     * Enable checking that free pages are zero filled during gc_free_heap
115     * called after purify.
116     */
117     boolean gencgc_zero_check_during_free_heap = FALSE;
118    
119 dtc 1.14 /*
120     * The minimum size for a large object.
121     */
122 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
123 dtc 1.1
124 dtc 1.14 /*
125     * Enable the filtering of stack/register pointers. This could reduce
126     * the number of invalid pointers accepted. It will probably degrades
127     * interrupt safety during object initialisation.
128     */
129 dtc 1.1 boolean enable_pointer_filter = TRUE;
130    
131    
132 dtc 1.14 /*
133     * The total bytes allocated. Seen by (dynamic-usage)
134     */
135 dtc 1.1 unsigned long bytes_allocated = 0;
136 dtc 1.22
137     /*
138 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
139     */
140    
141     volatile unsigned long long bytes_allocated_sum = 0;
142    
143     /*
144 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
145     */
146     unsigned long auto_gc_trigger = 0xffffffff;
147 dtc 1.1
148 dtc 1.14 /*
149     * The src. and dest. generations. Set before a GC starts scavenging.
150     */
151 dtc 1.1 static int from_space;
152     static int new_space;
153    
154    
155 dtc 1.14 /*
156     * GC structures and variables.
157     */
158 dtc 1.1
159 dtc 1.14 /*
160 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
161     * dynamic space.
162     */
163     unsigned dynamic_space_pages;
164    
165     /*
166 dtc 1.14 * An array of page structures is statically allocated.
167     * This helps quickly map between an address its page structure.
168     */
169 dtc 1.23 struct page *page_table;
170 dtc 1.1
171 dtc 1.14 /*
172     * Heap base, needed for mapping addresses to page structures.
173     */
174 dtc 1.1 static void *heap_base = NULL;
175    
176 dtc 1.14 /*
177     * Calculate the start address for the given page number.
178     */
179     inline void *page_address(int page_num)
180 dtc 1.1 {
181 dtc 1.17 return heap_base + PAGE_SIZE * page_num;
182 dtc 1.1 }
183    
184 dtc 1.14 /*
185     * Find the page index within the page_table for the given address.
186     * Returns -1 on failure.
187     */
188     inline int find_page_index(void *addr)
189 dtc 1.1 {
190     int index = addr-heap_base;
191    
192     if (index >= 0) {
193 dtc 1.17 index = (unsigned int) index / PAGE_SIZE;
194 dtc 1.23 if (index < dynamic_space_pages)
195 dtc 1.14 return index;
196 dtc 1.1 }
197    
198 dtc 1.14 return -1;
199 dtc 1.1 }
200    
201    
202 dtc 1.14 /*
203     * A structure to hold the state of a generation.
204     */
205 dtc 1.1 struct generation {
206    
207     /* The first page that gc_alloc checks on its next call. */
208     int alloc_start_page;
209    
210     /* The first page that gc_alloc_unboxed checks on its next call. */
211     int alloc_unboxed_start_page;
212    
213 dtc 1.14 /*
214     * The first page that gc_alloc_large (boxed) considers on its next call.
215     * Although it always allocates after the boxed_region.
216     */
217 dtc 1.1 int alloc_large_start_page;
218    
219 dtc 1.14 /*
220     * The first page that gc_alloc_large (unboxed) considers on its next call.
221     * Although it always allocates after the current_unboxed_region.
222     */
223 dtc 1.1 int alloc_large_unboxed_start_page;
224    
225     /* The bytes allocate to this generation. */
226     int bytes_allocated;
227    
228     /* The number of bytes at which to trigger a GC */
229     int gc_trigger;
230    
231     /* To calculate a new level for gc_trigger */
232     int bytes_consed_between_gc;
233    
234     /* The number of GCs since the last raise. */
235     int num_gc;
236    
237 dtc 1.14 /*
238     * The average age at after which a GC will raise objects to the
239     * next generation.
240     */
241 dtc 1.1 int trigger_age;
242    
243 dtc 1.14 /*
244     * The cumulative sum of the bytes allocated to this generation. It
245     * is cleared after a GC on this generations, and update before new
246     * objects are added from a GC of a younger generation. Dividing by
247     * the bytes_allocated will give the average age of the memory in
248     * this generation since its last GC.
249     */
250 dtc 1.1 int cum_sum_bytes_allocated;
251    
252 dtc 1.14 /*
253     * A minimum average memory age before a GC will occur helps prevent
254     * a GC when a large number of new live objects have been added, in
255     * which case a GC could be a waste of time.
256     */
257 dtc 1.1 double min_av_mem_age;
258     };
259    
260 dtc 1.14 /*
261     * An array of generation structures. There needs to be one more
262     * generation structure than actual generations as the oldest
263     * generations is temporarily raised then lowered.
264     */
265 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
266 dtc 1.1
267 moore 1.27 /* Statistics about a generation, extracted from the generations
268     array. This gets returned to Lisp.
269     */
270    
271     struct generation_stats {
272     int bytes_allocated;
273     int gc_trigger;
274     int bytes_consed_between_gc;
275     int num_gc;
276     int trigger_age;
277     int cum_sum_bytes_allocated;
278     double min_av_mem_age;
279     };
280    
281    
282 dtc 1.14 /*
283     * The oldest generation that will currently be GCed by default.
284     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
285     *
286     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
287     *
288     * Setting this to 0 effectively disables the generational nature of
289     * the GC. In some applications generational GC may not be useful
290     * because there are no long-lived objects.
291     *
292     * An intermediate value could be handy after moving long-lived data
293     * into an older generation so an unnecessary GC of this long-lived
294     * data can be avoided.
295     */
296     unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
297 dtc 1.1
298    
299 dtc 1.14 /*
300     * The maximum free page in the heap is maintained and used to update
301     * ALLOCATION_POINTER which is used by the room function to limit its
302     * search of the heap. XX Gencgc obviously needs to be better
303     * integrated with the lisp code.
304     */
305 dtc 1.1 static int last_free_page;
306    
307    
308    
309 dtc 1.14 /*
310     * Misc. heap functions.
311     */
312 dtc 1.1
313 dtc 1.14 /*
314     * Count the number of write protected pages within the given generation.
315     */
316     static int count_write_protect_generation_pages(int generation)
317 dtc 1.1 {
318     int i;
319     int cnt = 0;
320 dtc 1.15 int mmask, mflags;
321    
322     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
323     | PAGE_GENERATION_MASK;
324     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
325 dtc 1.14
326 dtc 1.1 for (i = 0; i < last_free_page; i++)
327 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
328 dtc 1.1 cnt++;
329 dtc 1.14 return cnt;
330 dtc 1.1 }
331    
332 dtc 1.14 /*
333     * Count the number of pages within the given generation.
334     */
335     static int count_generation_pages(int generation)
336 dtc 1.1 {
337     int i;
338     int cnt = 0;
339 dtc 1.15 int mmask, mflags;
340    
341     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
342     mflags = PAGE_ALLOCATED_MASK | generation;
343 dtc 1.14
344 dtc 1.1 for (i = 0; i < last_free_page; i++)
345 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
346 dtc 1.1 cnt++;
347 dtc 1.14 return cnt;
348 dtc 1.1 }
349    
350 dtc 1.14 /*
351     * Count the number of dont_move pages.
352     */
353     static int count_dont_move_pages(void)
354 dtc 1.1 {
355     int i;
356     int cnt = 0;
357 dtc 1.15 int mmask;
358    
359     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
360 dtc 1.14
361 dtc 1.1 for (i = 0; i < last_free_page; i++)
362 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mmask)
363 dtc 1.1 cnt++;
364 dtc 1.14 return cnt;
365 dtc 1.1 }
366    
367 dtc 1.14 /*
368     * Work through the pages and add up the number of bytes used for the
369     * given generation.
370     */
371     static int generation_bytes_allocated (int generation)
372 dtc 1.1 {
373     int i;
374     int bytes_allocated = 0;
375 dtc 1.15 int mmask, mflags;
376    
377     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
378     mflags = PAGE_ALLOCATED_MASK | generation;
379 dtc 1.14
380 dtc 1.1 for (i = 0; i < last_free_page; i++) {
381 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
382 dtc 1.1 bytes_allocated += page_table[i].bytes_used;
383     }
384 dtc 1.14 return bytes_allocated;
385 dtc 1.1 }
386    
387 dtc 1.14 /*
388     * Return the average age of the memory in a generation.
389     */
390     static double gen_av_mem_age(int gen)
391 dtc 1.1 {
392     if (generations[gen].bytes_allocated == 0)
393 dtc 1.14 return 0.0;
394    
395     return (double) generations[gen].cum_sum_bytes_allocated /
396     (double) generations[gen].bytes_allocated;
397 dtc 1.1 }
398    
399 dtc 1.14 /*
400     * The verbose argument controls how much to print out:
401     * 0 for normal level of detail; 1 for debugging.
402     */
403 toy 1.29 void print_generation_stats(int verbose)
404 dtc 1.1 {
405     int i, gens;
406 dtc 1.11 int fpu_state[27];
407    
408 dtc 1.14 /*
409     * This code uses the FP instructions which may be setup for Lisp so
410     * they need to the saved and reset for C.
411     */
412 dtc 1.11 fpu_save(fpu_state);
413 dtc 1.1
414     /* Number of generations to print out. */
415     if (verbose)
416 dtc 1.14 gens = NUM_GENERATIONS + 1;
417 dtc 1.1 else
418     gens = NUM_GENERATIONS;
419    
420     /* Print the heap stats */
421 dtc 1.14 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
422 dtc 1.1
423     for (i = 0; i < gens; i++) {
424     int j;
425     int boxed_cnt = 0;
426     int unboxed_cnt = 0;
427     int large_boxed_cnt = 0;
428     int large_unboxed_cnt = 0;
429 dtc 1.14
430 dtc 1.15 for (j = 0; j < last_free_page; j++) {
431     int flags = page_table[j].flags;
432     if ((flags & PAGE_GENERATION_MASK) == i) {
433     if (flags & PAGE_ALLOCATED_MASK) {
434     /*
435     * Count the number of boxed and unboxed pages within the
436     * given generation.
437     */
438     if (flags & PAGE_UNBOXED_MASK)
439     if (flags & PAGE_LARGE_OBJECT_MASK)
440     large_unboxed_cnt++;
441     else
442     unboxed_cnt++;
443 dtc 1.1 else
444 dtc 1.15 if (flags & PAGE_LARGE_OBJECT_MASK)
445     large_boxed_cnt++;
446     else
447     boxed_cnt++;
448     }
449 dtc 1.1 }
450 dtc 1.15 }
451 dtc 1.14
452 dtc 1.1 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
453 dtc 1.20 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
454 dtc 1.14 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
455 dtc 1.1 generations[i].bytes_allocated,
456 dtc 1.17 PAGE_SIZE * count_generation_pages(i) -
457     generations[i].bytes_allocated,
458 dtc 1.1 generations[i].gc_trigger,
459     count_write_protect_generation_pages(i),
460     generations[i].num_gc,
461     gen_av_mem_age(i));
462     }
463 gerd 1.32 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
464 dtc 1.11
465     fpu_restore(fpu_state);
466 dtc 1.1 }
467    
468 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
469     array.
470     */
471     void get_generation_stats(int gen, struct generation_stats *stats)
472     {
473     if (gen <= NUM_GENERATIONS) {
474     stats->bytes_allocated = generations[gen].bytes_allocated;
475     stats->gc_trigger = generations[gen].gc_trigger;
476     stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
477     stats->num_gc = generations[gen].num_gc;
478     stats->trigger_age = generations[gen].trigger_age;
479     stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
480     stats->min_av_mem_age = generations[gen].min_av_mem_age;
481     }
482     }
483    
484     void set_gc_trigger(int gen, int trigger)
485     {
486     if (gen <= NUM_GENERATIONS) {
487     generations[gen].gc_trigger = trigger;
488     }
489     }
490 dtc 1.1
491 moore 1.27 void set_trigger_age(int gen, int trigger_age)
492     {
493     if (gen <= NUM_GENERATIONS) {
494     generations[gen].trigger_age = trigger_age;
495     }
496     }
497    
498     void set_min_mem_age(int gen, double min_mem_age)
499     {
500     if (gen <= NUM_GENERATIONS) {
501     generations[gen].min_av_mem_age = min_mem_age;
502     }
503     }
504 dtc 1.1
505 dtc 1.14 /*
506     * Allocation routines.
507     *
508     *
509     * To support quick and inline allocation, regions of memory can be
510     * allocated and then allocated from with just a free pointer and a
511     * check against an end address.
512     *
513     * Since objects can be allocated to spaces with different properties
514     * e.g. boxed/unboxed, generation, ages; there may need to be many
515     * allocation regions.
516     *
517     * Each allocation region may be start within a partly used page.
518     * Many features of memory use are noted on a page wise basis,
519     * E.g. the generation; so if a region starts within an existing
520     * allocated page it must be consistent with this page.
521     *
522     * During the scavenging of the newspace, objects will be transported
523     * into an allocation region, and pointers updated to point to this
524     * allocation region. It is possible that these pointers will be
525     * scavenged again before the allocation region is closed, E.g. due to
526     * trans_list which jumps all over the place to cleanup the list. It
527     * is important to be able to determine properties of all objects
528     * pointed to when scavenging, E.g to detect pointers to the
529     * oldspace. Thus it's important that the allocation regions have the
530     * correct properties set when allocated, and not just set when
531     * closed. The region allocation routines return regions with the
532     * specified properties, and grab all the pages, setting there
533     * properties appropriately, except that the amount used is not known.
534     *
535     * These regions are used to support quicker allocation using just a
536     * free pointer. The actual space used by the region is not reflected
537     * in the pages tables until it is closed. It can't be scavenged until
538     * closed.
539     *
540     * When finished with the region it should be closed, which will
541     * update the page tables for the actual space used returning unused
542     * space. Further it may be noted in the new regions which is
543     * necessary when scavenging the newspace.
544     *
545     * Large objects may be allocated directly without an allocation
546     * region, the page tables are updated immediately.
547     *
548     * Unboxed objects don't contain points to other objects so don't need
549     * scavenging. Further they can't contain pointers to younger
550     * generations so WP is not needed. By allocating pages to unboxed
551     * objects the whole page never needs scavenging or write protecting.
552     */
553 dtc 1.1
554 dtc 1.14 /*
555     * Only using two regions at present, both are for the current
556     * newspace generation.
557     */
558 dtc 1.1 struct alloc_region boxed_region;
559     struct alloc_region unboxed_region;
560    
561 moore 1.27 #if 0
562 dtc 1.14 /*
563     * X hack. current lisp code uses the following. Need coping in/out.
564     */
565 dtc 1.1 void *current_region_free_pointer;
566     void *current_region_end_addr;
567 moore 1.27 #endif
568 dtc 1.1
569     /* The generation currently being allocated to. X */
570     static int gc_alloc_generation;
571    
572 dtc 1.14 /*
573     * Find a new region with room for at least the given number of bytes.
574     *
575     * It starts looking at the current generations alloc_start_page. So
576     * may pick up from the previous region if there is enough space. This
577     * keeps the allocation contiguous when scavenging the newspace.
578     *
579     * The alloc_region should have been closed by a call to
580     * gc_alloc_update_page_tables, and will thus be in an empty state.
581     *
582     * To assist the scavenging functions, write protected pages are not
583     * used. Free pages should not be write protected.
584     *
585     * It is critical to the conservative GC that the start of regions be
586     * known. To help achieve this only small regions are allocated at a
587     * time.
588     *
589     * During scavenging, pointers may be found that point within the
590     * current region and the page generation must be set so pointers to
591     * the from space can be recognised. So the generation of pages in
592     * the region are set to gc_alloc_generation. To prevent another
593     * allocation call using the same pages, all the pages in the region
594     * are allocated, although they will initially be empty.
595     */
596     static void gc_alloc_new_region(int nbytes, int unboxed,
597     struct alloc_region *alloc_region)
598 dtc 1.1 {
599     int first_page;
600     int last_page;
601     int region_size;
602     int restart_page;
603     int bytes_found;
604     int num_pages;
605     int i;
606 dtc 1.15 int mmask, mflags;
607 dtc 1.1
608 dtc 1.14 #if 0
609     fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
610     nbytes, gc_alloc_generation);
611     #endif
612 dtc 1.1
613     /* Check that the region is in a reset state. */
614 dtc 1.14 gc_assert(alloc_region->first_page == 0
615     && alloc_region->last_page == -1
616     && alloc_region->free_pointer == alloc_region->end_addr);
617 dtc 1.1
618     if (unboxed)
619     restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
620     else
621     restart_page = generations[gc_alloc_generation].alloc_start_page;
622    
623 dtc 1.14 /*
624     * Search for a contiguous free region of at least nbytes with the
625 dtc 1.15 * given properties: boxed/unboxed, generation. First setting up the
626     * mask and matching flags.
627 dtc 1.14 */
628 dtc 1.15
629     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
630     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
631     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
632     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
633     | gc_alloc_generation;
634    
635 dtc 1.1 do {
636     first_page = restart_page;
637 dtc 1.14
638     /*
639     * First search for a page with at least 32 bytes free, that is
640     * not write protected, or marked dont_move.
641     */
642 dtc 1.15
643 dtc 1.23 while (first_page < dynamic_space_pages) {
644 dtc 1.15 int flags = page_table[first_page].flags;
645     if (!(flags & PAGE_ALLOCATED_MASK)
646     || ((flags & mmask) == mflags &&
647 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
648 dtc 1.15 break;
649 dtc 1.1 first_page++;
650 dtc 1.15 }
651    
652 dtc 1.1 /* Check for a failure */
653 dtc 1.23 if (first_page >= dynamic_space_pages) {
654 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
655     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
656 dtc 1.14 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
657 dtc 1.1 print_generation_stats(1);
658     exit(1);
659     }
660 dtc 1.14
661 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
662 dtc 1.14
663     #if 0
664     fprintf(stderr, " first_page=%d bytes_used=%d\n",
665     first_page, page_table[first_page].bytes_used);
666     #endif
667    
668     /*
669     * Now search forward to calculate the available region size. It
670     * tries to keeps going until nbytes are found and the number of
671     * pages is greater than some level. This helps keep down the
672     * number of pages in a region.
673     */
674 dtc 1.1 last_page = first_page;
675 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
676 dtc 1.1 num_pages = 1;
677 dtc 1.14 while ((bytes_found < nbytes || num_pages < 2)
678 dtc 1.23 && last_page < dynamic_space_pages - 1
679 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
680 dtc 1.1 last_page++;
681     num_pages++;
682 dtc 1.17 bytes_found += PAGE_SIZE;
683 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
684 dtc 1.1 }
685 dtc 1.14
686 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
687     + PAGE_SIZE * (last_page - first_page);
688 dtc 1.14
689 dtc 1.1 gc_assert(bytes_found == region_size);
690 dtc 1.14
691     #if 0
692     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
693     last_page, bytes_found, num_pages);
694     #endif
695    
696 dtc 1.1 restart_page = last_page + 1;
697     }
698 dtc 1.23 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
699 dtc 1.14
700 dtc 1.1 /* Check for a failure */
701 dtc 1.23 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
702 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
703     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
704 dtc 1.14 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
705 dtc 1.1 print_generation_stats(1);
706     exit(1);
707     }
708 dtc 1.14
709     #if 0
710     fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
711     gc_alloc_generation, bytes_found, first_page, last_page,
712     page_address(first_page));
713     #endif
714    
715 dtc 1.1 /* Setup the alloc_region. */
716     alloc_region->first_page = first_page;
717     alloc_region->last_page = last_page;
718     alloc_region->start_addr = page_table[first_page].bytes_used
719     + page_address(first_page);
720     alloc_region->free_pointer = alloc_region->start_addr;
721     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
722    
723     if (gencgc_zero_check) {
724     int *p;
725     for(p = (int *)alloc_region->start_addr;
726     p < (int *)alloc_region->end_addr; p++)
727     if (*p != 0)
728 gerd 1.32 fprintf(stderr, "** new region not zero @ %lx\n", (unsigned long) p);
729 dtc 1.1 }
730    
731     /* Setup the pages. */
732    
733     /* The first page may have already been in use. */
734     if (page_table[first_page].bytes_used == 0) {
735 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
736 dtc 1.1 page_table[first_page].first_object_offset = 0;
737     }
738 dtc 1.14
739 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
740     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
741     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
742     gc_assert(!PAGE_LARGE_OBJECT(first_page));
743    
744     for (i = first_page + 1; i <= last_page; i++) {
745     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
746     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
747     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
748     | gc_alloc_generation);
749 dtc 1.14 /*
750     * This may not be necessary for unboxed regions (think it was
751     * broken before!)
752     */
753 dtc 1.1 page_table[i].first_object_offset =
754     alloc_region->start_addr - page_address(i);
755     }
756    
757 dtc 1.17 /* Bump up the last_free_page */
758 dtc 1.14 if (last_page + 1 > last_free_page) {
759     last_free_page = last_page + 1;
760 dtc 1.1 SetSymbolValue(ALLOCATION_POINTER,
761 dtc 1.17 (lispobj) ((char *) heap_base +
762     PAGE_SIZE * last_free_page));
763 dtc 1.1 }
764     }
765    
766    
767    
768 dtc 1.14 /*
769     * If the record_new_objects flag is 2 then all new regions created
770     * are recorded.
771     *
772     * If it's 1 then then it is only recorded if the first page of the
773     * current region is <= new_areas_ignore_page. This helps avoid
774     * unnecessary recording when doing full scavenge pass.
775     *
776     * The new_object structure holds the page, byte offset, and size of
777     * new regions of objects. Each new area is placed in the array of
778     * these structures pointed to by new_areas; new_areas_index holds the
779     * offset into new_areas.
780     *
781     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
782     * later code must detect this an handle it, probably by doing a full
783     * scavenge of a generation.
784     */
785 dtc 1.1
786     #define NUM_NEW_AREAS 512
787     static int record_new_objects = 0;
788     static int new_areas_ignore_page;
789     struct new_area {
790     int page;
791     int offset;
792     int size;
793     };
794     static struct new_area (*new_areas)[];
795 dtc 1.20 static int new_areas_index;
796 dtc 1.1 int max_new_areas;
797    
798     /* Add a new area to new_areas. */
799 dtc 1.14 static void add_new_area(int first_page, int offset, int size)
800 dtc 1.1 {
801     unsigned new_area_start,c;
802     int i;
803    
804     /* Ignore if full */
805     if (new_areas_index >= NUM_NEW_AREAS)
806     return;
807    
808     switch (record_new_objects) {
809     case 0:
810     return;
811     case 1:
812     if (first_page > new_areas_ignore_page)
813     return;
814     break;
815     case 2:
816     break;
817     default:
818     gc_abort();
819     }
820    
821 dtc 1.17 new_area_start = PAGE_SIZE * first_page + offset;
822 dtc 1.14
823     /*
824     * Search backwards for a prior area that this follows from. If
825     * found this will save adding a new area.
826     */
827     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
828 dtc 1.17 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
829 dtc 1.1 + (*new_areas)[i].offset + (*new_areas)[i].size;
830 dtc 1.14 #if 0
831     fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
832     #endif
833 dtc 1.1 if (new_area_start == area_end) {
834 dtc 1.14 #if 0
835     fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
836 dtc 1.1 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
837 dtc 1.14 (*new_areas)[i].size, first_page, offset, size);
838     #endif
839 dtc 1.1 (*new_areas)[i].size += size;
840     return;
841     }
842     }
843 dtc 1.14 #if 0
844     fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
845     #endif
846 dtc 1.1
847     (*new_areas)[new_areas_index].page = first_page;
848     (*new_areas)[new_areas_index].offset = offset;
849     (*new_areas)[new_areas_index].size = size;
850 dtc 1.14 #if 0
851     fprintf(stderr, " new_area %d page %d offset %d size %d\n",
852     new_areas_index, first_page, offset, size);
853     #endif
854 dtc 1.1 new_areas_index++;
855 dtc 1.14
856 dtc 1.1 /* Note the max new_areas used. */
857     if (new_areas_index > max_new_areas)
858     max_new_areas = new_areas_index;
859     }
860    
861    
862 dtc 1.14 /*
863     * Update the tables for the alloc_region. The region may be added to
864     * the new_areas.
865     *
866     * When done the alloc_region its setup so that the next quick alloc
867     * will fail safely and thus a new region will be allocated. Further
868     * it is safe to try and re-update the page table of this reset
869     * alloc_region.
870     */
871     void gc_alloc_update_page_tables(int unboxed,
872     struct alloc_region *alloc_region)
873 dtc 1.1 {
874     int more;
875     int first_page;
876     int next_page;
877     int bytes_used;
878     int orig_first_page_bytes_used;
879     int region_size;
880     int byte_cnt;
881    
882 dtc 1.14 #if 0
883     fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
884     gc_alloc_generation);
885     #endif
886 dtc 1.1
887     first_page = alloc_region->first_page;
888    
889     /* Catch an unused alloc_region. */
890 dtc 1.14 if (first_page == 0 && alloc_region->last_page == -1)
891 dtc 1.1 return;
892    
893 dtc 1.14 next_page = first_page + 1;
894 dtc 1.1
895     /* Skip if no bytes were allocated */
896     if (alloc_region->free_pointer != alloc_region->start_addr) {
897     orig_first_page_bytes_used = page_table[first_page].bytes_used;
898 dtc 1.14
899     gc_assert(alloc_region->start_addr == page_address(first_page) +
900     page_table[first_page].bytes_used);
901    
902 dtc 1.1 /* All the pages used need to be updated */
903 dtc 1.14
904 dtc 1.1 /* Update the first page. */
905 dtc 1.14
906     #if 0
907     fprintf(stderr, "0");
908     #endif
909    
910     /* If the page was free then setup the gen, and first_object_offset. */
911 dtc 1.1 if (page_table[first_page].bytes_used == 0)
912     gc_assert(page_table[first_page].first_object_offset == 0);
913 dtc 1.14
914 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
915     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
916     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
917     gc_assert(!PAGE_LARGE_OBJECT(first_page));
918 dtc 1.14
919 dtc 1.1 byte_cnt = 0;
920 dtc 1.14
921     /*
922     * Calc. the number of bytes used in this page. This is not always
923     * the number of new bytes, unless it was free.
924     */
925 dtc 1.1 more = 0;
926 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(first_page);
927 dtc 1.17 if (bytes_used > PAGE_SIZE) {
928     bytes_used = PAGE_SIZE;
929 dtc 1.1 more = 1;
930     }
931     page_table[first_page].bytes_used = bytes_used;
932     byte_cnt += bytes_used;
933 dtc 1.14
934     /*
935     * All the rest of the pages should be free. Need to set their
936     * first_object_offset pointer to the start of the region, and set
937     * the bytes_used.
938     */
939 dtc 1.1 while (more) {
940 dtc 1.14 #if 0
941     fprintf(stderr, "+")
942     #endif
943 dtc 1.15 gc_assert(PAGE_ALLOCATED(next_page));
944     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
945 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
946 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
947     gc_assert(!PAGE_LARGE_OBJECT(next_page));
948 dtc 1.14
949 dtc 1.1 gc_assert(page_table[next_page].first_object_offset ==
950     alloc_region->start_addr - page_address(next_page));
951 dtc 1.14
952 dtc 1.1 /* Calc. the number of bytes used in this page. */
953     more = 0;
954 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(next_page);
955 dtc 1.17 if (bytes_used > PAGE_SIZE) {
956     bytes_used = PAGE_SIZE;
957 dtc 1.1 more = 1;
958     }
959     page_table[next_page].bytes_used = bytes_used;
960     byte_cnt += bytes_used;
961 dtc 1.14
962 dtc 1.1 next_page++;
963     }
964 dtc 1.14
965 dtc 1.1 region_size = alloc_region->free_pointer - alloc_region->start_addr;
966     bytes_allocated += region_size;
967     generations[gc_alloc_generation].bytes_allocated += region_size;
968 dtc 1.14
969     gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
970    
971     /*
972     * Set the generations alloc restart page to the last page of
973     * the region.
974     */
975 dtc 1.1 if (unboxed)
976 dtc 1.14 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
977 dtc 1.1 else
978 dtc 1.14 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
979    
980 dtc 1.1 /* Add the region to the new_areas if requested. */
981     if (!unboxed)
982 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, region_size);
983    
984     #if 0
985     fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
986     region_size, gc_alloc_generation);
987     #endif
988 dtc 1.1 }
989     else
990 dtc 1.14 /*
991     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
992     */
993 dtc 1.1 if (page_table[first_page].bytes_used == 0)
994 dtc 1.15 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
995 dtc 1.14
996 dtc 1.1 /* Unallocate any unused pages. */
997     while (next_page <= alloc_region->last_page) {
998     gc_assert(page_table[next_page].bytes_used == 0);
999 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1000 dtc 1.1 next_page++;
1001     }
1002    
1003     /* Reset the alloc_region. */
1004     alloc_region->first_page = 0;
1005     alloc_region->last_page = -1;
1006     alloc_region->start_addr = page_address(0);
1007     alloc_region->free_pointer = page_address(0);
1008     alloc_region->end_addr = page_address(0);
1009    
1010 dtc 1.14 #if 0
1011     fprintf(stderr, "\n");
1012     #endif
1013 dtc 1.1 }
1014    
1015    
1016    
1017     static inline void *gc_quick_alloc(int nbytes);
1018    
1019 dtc 1.14 /*
1020     * Allocate a possibly large object.
1021     */
1022     static void *gc_alloc_large(int nbytes, int unboxed,
1023     struct alloc_region *alloc_region)
1024 dtc 1.1 {
1025     int first_page;
1026     int last_page;
1027     int region_size;
1028     int restart_page;
1029     int bytes_found;
1030     int num_pages;
1031     int orig_first_page_bytes_used;
1032     int byte_cnt;
1033     int more;
1034     int bytes_used;
1035     int next_page;
1036     int large = (nbytes >= large_object_size);
1037 dtc 1.15 int mmask, mflags;
1038 dtc 1.1
1039 dtc 1.14 #if 0
1040     if (nbytes > 200000)
1041     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1042     #endif
1043 dtc 1.1
1044 dtc 1.14 #if 0
1045     fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1046     nbytes, gc_alloc_generation);
1047     #endif
1048 dtc 1.1
1049 dtc 1.14 /*
1050     * If the object is small, and there is room in the current region
1051     * then allocation it in the current region.
1052     */
1053     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1054 dtc 1.1 return gc_quick_alloc(nbytes);
1055 dtc 1.14
1056     /*
1057     * Search for a contiguous free region of at least nbytes. If it's a
1058     * large object then align it on a page boundary by searching for a
1059     * free page.
1060     */
1061    
1062     /*
1063     * To allow the allocation of small objects without the danger of
1064     * using a page in the current boxed region, the search starts after
1065     * the current boxed free region. XX could probably keep a page
1066     * index ahead of the current region and bumped up here to save a
1067     * lot of re-scanning.
1068     */
1069 dtc 1.1 if (unboxed)
1070     restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1071     else
1072     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1073     if (restart_page <= alloc_region->last_page)
1074 dtc 1.14 restart_page = alloc_region->last_page + 1;
1075 dtc 1.1
1076 dtc 1.15 /* Setup the mask and matching flags. */
1077    
1078     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1079     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1080     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1081     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1082     | gc_alloc_generation;
1083    
1084 dtc 1.1 do {
1085     first_page = restart_page;
1086 dtc 1.14
1087 dtc 1.1 if (large)
1088 dtc 1.23 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1089 dtc 1.1 first_page++;
1090     else
1091 dtc 1.23 while (first_page < dynamic_space_pages) {
1092 dtc 1.15 int flags = page_table[first_page].flags;
1093     if (!(flags & PAGE_ALLOCATED_MASK)
1094     || ((flags & mmask) == mflags &&
1095 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1096 dtc 1.15 break;
1097 dtc 1.1 first_page++;
1098 dtc 1.15 }
1099 dtc 1.14
1100 dtc 1.1 /* Check for a failure */
1101 dtc 1.23 if (first_page >= dynamic_space_pages) {
1102 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1103     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1104 dtc 1.14 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1105 dtc 1.1 print_generation_stats(1);
1106     exit(1);
1107     }
1108 dtc 1.14
1109 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1110 dtc 1.14
1111     #if 0
1112     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1113     first_page, page_table[first_page].bytes_used);
1114     #endif
1115    
1116 dtc 1.1 last_page = first_page;
1117 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1118 dtc 1.1 num_pages = 1;
1119 dtc 1.14 while (bytes_found < nbytes
1120 dtc 1.23 && last_page < dynamic_space_pages - 1
1121 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
1122 dtc 1.1 last_page++;
1123     num_pages++;
1124 dtc 1.17 bytes_found += PAGE_SIZE;
1125 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1126 dtc 1.1 }
1127 dtc 1.14
1128 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1129     + PAGE_SIZE * (last_page - first_page);
1130 dtc 1.14
1131 dtc 1.1 gc_assert(bytes_found == region_size);
1132 dtc 1.14
1133     #if 0
1134     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1135     last_page, bytes_found, num_pages);
1136     #endif
1137    
1138 dtc 1.1 restart_page = last_page + 1;
1139     }
1140 dtc 1.23 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1141 dtc 1.14
1142 dtc 1.1 /* Check for a failure */
1143 dtc 1.23 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1144 dtc 1.14 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1145 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1146     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1147 dtc 1.1 print_generation_stats(1);
1148     exit(1);
1149     }
1150 dtc 1.14
1151     #if 0
1152     if (large)
1153     fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1154 dtc 1.1 gc_alloc_generation, nbytes, bytes_found,
1155 dtc 1.14 first_page, last_page, page_address(first_page));
1156     #endif
1157 dtc 1.1
1158     gc_assert(first_page > alloc_region->last_page);
1159     if (unboxed)
1160     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1161     last_page;
1162     else
1163     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1164    
1165     /* Setup the pages. */
1166     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1167 dtc 1.14
1168     /*
1169     * If the first page was free then setup the gen, and
1170     * first_object_offset.
1171     */
1172 dtc 1.15
1173     if (large)
1174     mflags |= PAGE_LARGE_OBJECT_MASK;
1175 dtc 1.1 if (page_table[first_page].bytes_used == 0) {
1176 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1177 dtc 1.1 page_table[first_page].first_object_offset = 0;
1178     }
1179    
1180 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
1181     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1182     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1183     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1184 dtc 1.14
1185 dtc 1.1 byte_cnt = 0;
1186 dtc 1.14
1187     /*
1188     * Calc. the number of bytes used in this page. This is not
1189     * always the number of new bytes, unless it was free.
1190     */
1191 dtc 1.1 more = 0;
1192 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used;
1193 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1194     bytes_used = PAGE_SIZE;
1195 dtc 1.1 more = 1;
1196     }
1197     page_table[first_page].bytes_used = bytes_used;
1198     byte_cnt += bytes_used;
1199    
1200 dtc 1.14 next_page = first_page + 1;
1201 dtc 1.1
1202 dtc 1.14 /*
1203     * All the rest of the pages should be free. Need to set their
1204     * first_object_offset pointer to the start of the region, and set
1205     * the bytes_used.
1206     */
1207 dtc 1.1 while (more) {
1208 dtc 1.14 #if 0
1209     fprintf(stderr, "+");
1210     #endif
1211    
1212 dtc 1.15 gc_assert(!PAGE_ALLOCATED(next_page));
1213 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1214 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1215 dtc 1.14
1216 dtc 1.1 page_table[next_page].first_object_offset =
1217 dtc 1.17 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1218 dtc 1.14
1219 dtc 1.1 /* Calc. the number of bytes used in this page. */
1220     more = 0;
1221 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1222 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1223     bytes_used = PAGE_SIZE;
1224 dtc 1.1 more = 1;
1225     }
1226     page_table[next_page].bytes_used = bytes_used;
1227     byte_cnt += bytes_used;
1228 dtc 1.14
1229 dtc 1.1 next_page++;
1230     }
1231 dtc 1.14
1232     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1233 dtc 1.1
1234     bytes_allocated += nbytes;
1235     generations[gc_alloc_generation].bytes_allocated += nbytes;
1236    
1237     /* Add the region to the new_areas if requested. */
1238     if (!unboxed)
1239 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1240    
1241 dtc 1.17 /* Bump up the last_free_page */
1242 dtc 1.14 if (last_page + 1 > last_free_page) {
1243     last_free_page = last_page + 1;
1244 dtc 1.1 SetSymbolValue(ALLOCATION_POINTER,
1245 dtc 1.17 (lispobj) ((char *) heap_base +
1246     PAGE_SIZE * last_free_page));
1247 dtc 1.14 }
1248    
1249     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1250 dtc 1.1 }
1251    
1252 dtc 1.14 /*
1253     * Allocate bytes from the boxed_region. It first checks if there is
1254     * room, if not then it calls gc_alloc_new_region to find a new region
1255     * with enough space. A pointer to the start of the region is returned.
1256     */
1257     static void *gc_alloc(int nbytes)
1258 dtc 1.1 {
1259     void *new_free_pointer;
1260    
1261 dtc 1.14 #if 0
1262     fprintf(stderr, "gc_alloc %d\n",nbytes);
1263     #endif
1264 dtc 1.1
1265     /* Check if there is room in the current alloc region. */
1266     new_free_pointer = boxed_region.free_pointer + nbytes;
1267 dtc 1.14
1268 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1269     /* If so then allocate from the current alloc region. */
1270     void *new_obj = boxed_region.free_pointer;
1271     boxed_region.free_pointer = new_free_pointer;
1272 dtc 1.14
1273 dtc 1.1 /* Check if the alloc region is almost empty. */
1274 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1275 dtc 1.1 /* If so finished with the current region. */
1276 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1277 dtc 1.1 /* Setup a new region. */
1278 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1279 dtc 1.1 }
1280 dtc 1.14 return (void *) new_obj;
1281 dtc 1.1 }
1282 dtc 1.14
1283 dtc 1.1 /* Else not enough free space in the current region. */
1284    
1285 dtc 1.14 /*
1286     * If there is a bit of room left in the current region then
1287     * allocate a large object.
1288     */
1289     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1290     return gc_alloc_large(nbytes, 0, &boxed_region);
1291 dtc 1.1
1292     /* Else find a new region. */
1293    
1294     /* Finished with the current region. */
1295 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1296    
1297 dtc 1.1 /* Setup a new region. */
1298 dtc 1.14 gc_alloc_new_region(nbytes, 0, &boxed_region);
1299    
1300 dtc 1.1 /* Should now be enough room. */
1301 dtc 1.14
1302 dtc 1.1 /* Check if there is room in the current region. */
1303     new_free_pointer = boxed_region.free_pointer + nbytes;
1304 dtc 1.14
1305 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1306     /* If so then allocate from the current region. */
1307     void *new_obj = boxed_region.free_pointer;
1308     boxed_region.free_pointer = new_free_pointer;
1309    
1310     /* Check if the current region is almost empty. */
1311 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1312 dtc 1.1 /* If so find, finished with the current region. */
1313 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1314    
1315 dtc 1.1 /* Setup a new region. */
1316 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1317 dtc 1.1 }
1318 dtc 1.14
1319     return (void *) new_obj;
1320 dtc 1.1 }
1321 dtc 1.14
1322 dtc 1.1 /* Shouldn't happen? */
1323     gc_assert(0);
1324 gerd 1.32 return 0;
1325 dtc 1.1 }
1326    
1327 dtc 1.14 /*
1328     * Allocate space from the boxed_region. If there is not enough free
1329     * space then call gc_alloc to do the job. A pointer to the start of
1330     * the region is returned.
1331     */
1332     static inline void *gc_quick_alloc(int nbytes)
1333 dtc 1.1 {
1334     void *new_free_pointer;
1335    
1336     /* Check if there is room in the current region. */
1337     new_free_pointer = boxed_region.free_pointer + nbytes;
1338 dtc 1.14
1339 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1340     /* If so then allocate from the current region. */
1341     void *new_obj = boxed_region.free_pointer;
1342     boxed_region.free_pointer = new_free_pointer;
1343 dtc 1.14 return (void *) new_obj;
1344 dtc 1.1 }
1345 dtc 1.14
1346 dtc 1.1 /* Else call gc_alloc */
1347 dtc 1.14 return gc_alloc(nbytes);
1348 dtc 1.1 }
1349    
1350 dtc 1.14 /*
1351     * Allocate space for the boxed object. If it is a large object then
1352     * do a large alloc else allocate from the current region. If there is
1353     * not enough free space then call gc_alloc to do the job. A pointer
1354     * to the start of the region is returned.
1355     */
1356     static inline void *gc_quick_alloc_large(int nbytes)
1357 dtc 1.1 {
1358     void *new_free_pointer;
1359    
1360     if (nbytes >= large_object_size)
1361     return gc_alloc_large(nbytes,0,&boxed_region);
1362    
1363     /* Check if there is room in the current region. */
1364     new_free_pointer = boxed_region.free_pointer + nbytes;
1365 dtc 1.14
1366 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1367     /* If so then allocate from the current region. */
1368     void *new_obj = boxed_region.free_pointer;
1369     boxed_region.free_pointer = new_free_pointer;
1370 dtc 1.14 return (void *) new_obj;
1371 dtc 1.1 }
1372 dtc 1.14
1373 dtc 1.1 /* Else call gc_alloc */
1374 dtc 1.14 return gc_alloc(nbytes);
1375 dtc 1.1 }
1376    
1377    
1378    
1379    
1380 dtc 1.14 static void *gc_alloc_unboxed(int nbytes)
1381 dtc 1.1 {
1382     void *new_free_pointer;
1383    
1384 dtc 1.14 #if 0
1385     fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1386     #endif
1387 dtc 1.1
1388     /* Check if there is room in the current region. */
1389     new_free_pointer = unboxed_region.free_pointer + nbytes;
1390 dtc 1.14
1391 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1392     /* If so then allocate from the current region. */
1393     void *new_obj = unboxed_region.free_pointer;
1394     unboxed_region.free_pointer = new_free_pointer;
1395 dtc 1.14
1396 dtc 1.1 /* Check if the current region is almost empty. */
1397     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1398     /* If so finished with the current region. */
1399 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1400    
1401 dtc 1.1 /* Setup a new region. */
1402 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1403 dtc 1.1 }
1404 dtc 1.14
1405     return (void *) new_obj;
1406 dtc 1.1 }
1407 dtc 1.14
1408 dtc 1.1 /* Else not enough free space in the current region. */
1409 dtc 1.14
1410     /*
1411     * If there is a bit of room left in the current region then
1412     * allocate a large object.
1413     */
1414     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1415     return gc_alloc_large(nbytes, 1, &unboxed_region);
1416 dtc 1.1
1417     /* Else find a new region. */
1418    
1419     /* Finished with the current region. */
1420     gc_alloc_update_page_tables(1,&unboxed_region);
1421 dtc 1.14
1422 dtc 1.1 /* Setup a new region. */
1423     gc_alloc_new_region(nbytes,1,&unboxed_region);
1424 dtc 1.14
1425 dtc 1.1 /* Should now be enough room. */
1426 dtc 1.14
1427 dtc 1.1 /* Check if there is room in the current region. */
1428     new_free_pointer = unboxed_region.free_pointer + nbytes;
1429 dtc 1.14
1430 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1431     /* If so then allocate from the current region. */
1432     void *new_obj = unboxed_region.free_pointer;
1433     unboxed_region.free_pointer = new_free_pointer;
1434 dtc 1.14
1435 dtc 1.1 /* Check if the current region is almost empty. */
1436     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1437     /* If so find, finished with the current region. */
1438 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1439    
1440 dtc 1.1 /* Setup a new region. */
1441 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1442 dtc 1.1 }
1443 dtc 1.14
1444     return (void *) new_obj;
1445 dtc 1.1 }
1446 dtc 1.14
1447 dtc 1.1 /* Shouldn't happen? */
1448     gc_assert(0);
1449 gerd 1.32 return 0;
1450 dtc 1.1 }
1451    
1452 dtc 1.14 static inline void *gc_quick_alloc_unboxed(int nbytes)
1453 dtc 1.1 {
1454     void *new_free_pointer;
1455    
1456     /* Check if there is room in the current region. */
1457     new_free_pointer = unboxed_region.free_pointer + nbytes;
1458 dtc 1.14
1459 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1460     /* If so then allocate from the current region. */
1461     void *new_obj = unboxed_region.free_pointer;
1462     unboxed_region.free_pointer = new_free_pointer;
1463 dtc 1.14
1464     return (void *) new_obj;
1465 dtc 1.1 }
1466 dtc 1.14
1467 dtc 1.1 /* Else call gc_alloc */
1468 dtc 1.14 return gc_alloc_unboxed(nbytes);
1469 dtc 1.1 }
1470    
1471 dtc 1.14 /*
1472     * Allocate space for the object. If it is a large object then do a
1473     * large alloc else allocate from the current region. If there is not
1474     * enough free space then call gc_alloc to do the job.
1475     *
1476     * A pointer to the start of the region is returned.
1477     */
1478     static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1479 dtc 1.1 {
1480     void *new_free_pointer;
1481    
1482     if (nbytes >= large_object_size)
1483     return gc_alloc_large(nbytes,1,&unboxed_region);
1484    
1485     /* Check if there is room in the current region. */
1486     new_free_pointer = unboxed_region.free_pointer + nbytes;
1487 dtc 1.14
1488 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1489     /* If so then allocate from the current region. */
1490     void *new_obj = unboxed_region.free_pointer;
1491     unboxed_region.free_pointer = new_free_pointer;
1492 dtc 1.14
1493     return (void *) new_obj;
1494 dtc 1.1 }
1495 dtc 1.14
1496 dtc 1.1 /* Else call gc_alloc */
1497 dtc 1.14 return gc_alloc_unboxed(nbytes);
1498 dtc 1.1 }
1499    
1500     /***************************************************************************/
1501    
1502    
1503     /* Scavenging/transporting routines derived from gc.c */
1504    
1505     static int (*scavtab[256])(lispobj *where, lispobj object);
1506     static lispobj (*transother[256])(lispobj object);
1507     static int (*sizetab[256])(lispobj *where);
1508    
1509     static struct weak_pointer *weak_pointers;
1510 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1511 dtc 1.1
1512     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1513    
1514    
1515     /* Predicates */
1516    
1517 dtc 1.14 static inline boolean from_space_p(lispobj obj)
1518 dtc 1.1 {
1519 dtc 1.14 int page_index = (void*) obj - heap_base;
1520     return page_index >= 0
1521 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1522 dtc 1.15 && PAGE_GENERATION(page_index) == from_space;
1523 dtc 1.14 }
1524    
1525     static inline boolean new_space_p(lispobj obj)
1526     {
1527     int page_index = (void*) obj - heap_base;
1528     return page_index >= 0
1529 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1530 dtc 1.15 && PAGE_GENERATION(page_index) == new_space;
1531 dtc 1.1 }
1532    
1533    
1534     /* Copying Objects */
1535    
1536    
1537     /* Copying Boxed Objects */
1538 dtc 1.14 static inline lispobj copy_object(lispobj object, int nwords)
1539 dtc 1.1 {
1540     int tag;
1541     lispobj *new;
1542     lispobj *source, *dest;
1543 dtc 1.14
1544 dtc 1.1 gc_assert(Pointerp(object));
1545     gc_assert(from_space_p(object));
1546     gc_assert((nwords & 0x01) == 0);
1547 dtc 1.14
1548 dtc 1.1 /* get tag of object */
1549     tag = LowtagOf(object);
1550    
1551     /* allocate space */
1552     new = gc_quick_alloc(nwords*4);
1553 dtc 1.14
1554 dtc 1.1 dest = new;
1555     source = (lispobj *) PTR(object);
1556 dtc 1.14
1557 dtc 1.1 /* copy the object */
1558     while (nwords > 0) {
1559     dest[0] = source[0];
1560     dest[1] = source[1];
1561     dest += 2;
1562     source += 2;
1563     nwords -= 2;
1564     }
1565 dtc 1.14
1566 dtc 1.1 /* return lisp pointer of new object */
1567 dtc 1.14 return (lispobj) new | tag;
1568 dtc 1.1 }
1569    
1570 dtc 1.14 /*
1571     * Copying Large Boxed Objects. If the object is in a large object
1572     * region then it is simply promoted, else it is copied. If it's large
1573     * enough then it's copied to a large object region.
1574     *
1575     * Vectors may have shrunk. If the object is not copied the space
1576     * needs to be reclaimed, and the page_tables corrected.
1577     */
1578     static lispobj copy_large_object(lispobj object, int nwords)
1579 dtc 1.1 {
1580     int tag;
1581     lispobj *new;
1582     lispobj *source, *dest;
1583     int first_page;
1584 dtc 1.14
1585 dtc 1.1 gc_assert(Pointerp(object));
1586     gc_assert(from_space_p(object));
1587     gc_assert((nwords & 0x01) == 0);
1588    
1589 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1590     fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1591 dtc 1.1
1592     /* Check if it's a large object. */
1593 dtc 1.14 first_page = find_page_index((void *) object);
1594 dtc 1.1 gc_assert(first_page >= 0);
1595    
1596 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1597 dtc 1.1 /* Promote the object. */
1598     int remaining_bytes;
1599     int next_page;
1600     int bytes_freed;
1601     int old_bytes_used;
1602 dtc 1.15 int mmask, mflags;
1603 dtc 1.14
1604     /*
1605     * Note: Any page write protection must be removed, else a later
1606     * scavenge_newspace may incorrectly not scavenge these pages.
1607     * This would not be necessary if they are added to the new areas,
1608     * but lets do it for them all (they'll probably be written
1609     * anyway?).
1610     */
1611 dtc 1.1
1612     gc_assert(page_table[first_page].first_object_offset == 0);
1613    
1614     next_page = first_page;
1615 dtc 1.14 remaining_bytes = nwords * 4;
1616 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1617 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1618     gc_assert(PAGE_ALLOCATED(next_page));
1619     gc_assert(!PAGE_UNBOXED(next_page));
1620     gc_assert(PAGE_LARGE_OBJECT(next_page));
1621 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1622 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1623     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1624 dtc 1.14
1625 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1626 dtc 1.14
1627     /*
1628     * Remove any write protection. Should be able to religh on the
1629     * WP flag to avoid redundant calls.
1630     */
1631 dtc 1.15 if (PAGE_WRITE_PROTECTED(next_page)) {
1632 dtc 1.17 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1633 dtc 1.15 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1634 dtc 1.1 }
1635 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1636 dtc 1.1 next_page++;
1637     }
1638    
1639 dtc 1.14 /*
1640     * Now only one page remains, but the object may have shrunk so
1641     * there may be more unused pages which will be freed.
1642     */
1643    
1644 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1645     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1646 dtc 1.14
1647 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1648     gc_assert(PAGE_ALLOCATED(next_page));
1649     gc_assert(!PAGE_UNBOXED(next_page));
1650 dtc 1.14
1651 dtc 1.1 /* Adjust the bytes_used. */
1652     old_bytes_used = page_table[next_page].bytes_used;
1653     page_table[next_page].bytes_used = remaining_bytes;
1654 dtc 1.14
1655 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1656 dtc 1.14
1657 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1658     | PAGE_GENERATION_MASK;
1659     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1660    
1661 dtc 1.1 /* Free any remaining pages; needs care. */
1662     next_page++;
1663 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1664 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1665 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1666     - next_page)) {
1667 dtc 1.14 /*
1668     * Checks out OK, free the page. Don't need to both zeroing
1669     * pages as this should have been done before shrinking the
1670     * object. These pages shouldn't be write protected as they
1671     * should be zero filled.
1672     */
1673 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1674 dtc 1.14
1675 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1676 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1677 dtc 1.1 page_table[next_page].bytes_used = 0;
1678     bytes_freed += old_bytes_used;
1679     next_page++;
1680     }
1681 dtc 1.14
1682     if (gencgc_verbose && bytes_freed > 0)
1683     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1684    
1685     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1686     generations[new_space].bytes_allocated += 4 * nwords;
1687 dtc 1.1 bytes_allocated -= bytes_freed;
1688 dtc 1.14
1689 dtc 1.1 /* Add the region to the new_areas if requested. */
1690 dtc 1.14 add_new_area(first_page, 0, nwords * 4);
1691 dtc 1.1
1692 dtc 1.14 return object;
1693 dtc 1.1 }
1694     else {
1695     /* get tag of object */
1696     tag = LowtagOf(object);
1697 dtc 1.14
1698 dtc 1.1 /* allocate space */
1699 dtc 1.14 new = gc_quick_alloc_large(nwords * 4);
1700    
1701 dtc 1.1 dest = new;
1702     source = (lispobj *) PTR(object);
1703 dtc 1.14
1704 dtc 1.1 /* copy the object */
1705     while (nwords > 0) {
1706     dest[0] = source[0];
1707     dest[1] = source[1];
1708     dest += 2;
1709     source += 2;
1710     nwords -= 2;
1711     }
1712 dtc 1.14
1713 dtc 1.1 /* return lisp pointer of new object */
1714 dtc 1.14 return (lispobj) new | tag;
1715 dtc 1.1 }
1716     }
1717    
1718     /* Copying UnBoxed Objects. */
1719 dtc 1.14 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1720 dtc 1.1 {
1721     int tag;
1722     lispobj *new;
1723     lispobj *source, *dest;
1724 dtc 1.14
1725 dtc 1.1 gc_assert(Pointerp(object));
1726     gc_assert(from_space_p(object));
1727     gc_assert((nwords & 0x01) == 0);
1728 dtc 1.14
1729 dtc 1.1 /* get tag of object */
1730     tag = LowtagOf(object);
1731    
1732     /* allocate space */
1733     new = gc_quick_alloc_unboxed(nwords*4);
1734 dtc 1.14
1735 dtc 1.1 dest = new;
1736     source = (lispobj *) PTR(object);
1737 dtc 1.14
1738     /* Copy the object */
1739 dtc 1.1 while (nwords > 0) {
1740     dest[0] = source[0];
1741     dest[1] = source[1];
1742     dest += 2;
1743     source += 2;
1744     nwords -= 2;
1745     }
1746 dtc 1.14
1747     /* Return lisp pointer of new object. */
1748     return (lispobj) new | tag;
1749 dtc 1.1 }
1750    
1751    
1752 dtc 1.14 /*
1753     * Copying Large Unboxed Objects. If the object is in a large object
1754     * region then it is simply promoted, else it is copied. If it's large
1755     * enough then it's copied to a large object region.
1756     *
1757     * Bignums and vectors may have shrunk. If the object is not copied
1758     * the space needs to be reclaimed, and the page_tables corrected.
1759     */
1760     static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1761 dtc 1.1 {
1762     int tag;
1763     lispobj *new;
1764     lispobj *source, *dest;
1765     int first_page;
1766 dtc 1.14
1767 dtc 1.1 gc_assert(Pointerp(object));
1768     gc_assert(from_space_p(object));
1769     gc_assert((nwords & 0x01) == 0);
1770    
1771 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1772     fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1773 dtc 1.1
1774     /* Check if it's a large object. */
1775 dtc 1.14 first_page = find_page_index((void *) object);
1776 dtc 1.1 gc_assert(first_page >= 0);
1777    
1778 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1779 dtc 1.14 /*
1780     * Promote the object. Note: Unboxed objects may have been
1781     * allocated to a BOXED region so it may be necessary to change
1782     * the region to UNBOXED.
1783     */
1784 dtc 1.1 int remaining_bytes;
1785     int next_page;
1786     int bytes_freed;
1787     int old_bytes_used;
1788 dtc 1.15 int mmask, mflags;
1789 dtc 1.14
1790 dtc 1.1 gc_assert(page_table[first_page].first_object_offset == 0);
1791 dtc 1.14
1792 dtc 1.1 next_page = first_page;
1793 dtc 1.14 remaining_bytes = nwords * 4;
1794 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1795 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1796     gc_assert(PAGE_ALLOCATED(next_page));
1797     gc_assert(PAGE_LARGE_OBJECT(next_page));
1798 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1799 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1800     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1801 dtc 1.14
1802 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1803     PAGE_UNBOXED_MASK | new_space);
1804 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1805 dtc 1.1 next_page++;
1806     }
1807 dtc 1.14
1808     /*
1809     * Now only one page remains, but the object may have shrunk so
1810     * there may be more unused pages which will be freed.
1811     */
1812    
1813 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1814     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1815 dtc 1.14
1816 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1817     | PAGE_GENERATION_MASK,
1818     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1819 dtc 1.14
1820 dtc 1.1 /* Adjust the bytes_used. */
1821     old_bytes_used = page_table[next_page].bytes_used;
1822     page_table[next_page].bytes_used = remaining_bytes;
1823 dtc 1.14
1824 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1825 dtc 1.14
1826 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1827     | PAGE_GENERATION_MASK;
1828     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1829    
1830 dtc 1.1 /* Free any remaining pages; needs care. */
1831     next_page++;
1832 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1833 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1834 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1835     - next_page)) {
1836 dtc 1.14 /*
1837     * Checks out OK, free the page. Don't need to both zeroing
1838     * pages as this should have been done before shrinking the
1839     * object. These pages shouldn't be write protected, even if
1840     * boxed they should be zero filled.
1841     */
1842 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1843 dtc 1.14
1844 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1845 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1846 dtc 1.1 page_table[next_page].bytes_used = 0;
1847     bytes_freed += old_bytes_used;
1848     next_page++;
1849     }
1850 dtc 1.14
1851     if (gencgc_verbose && bytes_freed > 0)
1852     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1853    
1854     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1855     generations[new_space].bytes_allocated += 4 * nwords;
1856 dtc 1.1 bytes_allocated -= bytes_freed;
1857 dtc 1.14
1858     return object;
1859 dtc 1.1 }
1860     else {
1861     /* get tag of object */
1862     tag = LowtagOf(object);
1863 dtc 1.14
1864 dtc 1.1 /* allocate space */
1865 dtc 1.14 new = gc_quick_alloc_large_unboxed(nwords * 4);
1866    
1867 dtc 1.1 dest = new;
1868     source = (lispobj *) PTR(object);
1869 dtc 1.14
1870 dtc 1.1 /* copy the object */
1871     while (nwords > 0) {
1872     dest[0] = source[0];
1873     dest[1] = source[1];
1874     dest += 2;
1875     source += 2;
1876     nwords -= 2;
1877     }
1878 dtc 1.14
1879 dtc 1.1 /* return lisp pointer of new object */
1880 dtc 1.14 return (lispobj) new | tag;
1881 dtc 1.1 }
1882     }
1883    
1884    
1885     /* Scavenging */
1886    
1887     #define DIRECT_SCAV 0
1888    
1889 dtc 1.14 static void scavenge(lispobj *start, long nwords)
1890 dtc 1.1 {
1891     while (nwords > 0) {
1892     lispobj object;
1893 dtc 1.20 int words_scavenged;
1894 dtc 1.14
1895 dtc 1.1 object = *start;
1896 dtc 1.14
1897 dtc 1.1 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1898 dtc 1.14
1899 dtc 1.1 #if DIRECT_SCAV
1900 dtc 1.20 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1901 dtc 1.1 #else
1902     if (Pointerp(object))
1903     /* It be a pointer. */
1904     if (from_space_p(object)) {
1905 dtc 1.14 /*
1906     * It currently points to old space. Check for a forwarding
1907     * pointer.
1908     */
1909     lispobj *ptr = (lispobj *) PTR(object);
1910 dtc 1.1 lispobj first_word = *ptr;
1911 dtc 1.14
1912 dtc 1.1 if(first_word == 0x01) {
1913     /* Yep, there be a forwarding pointer. */
1914     *start = ptr[1];
1915     words_scavenged = 1;
1916     }
1917     else
1918     /* Scavenge that pointer. */
1919     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1920     }
1921     else
1922 dtc 1.14 /* It points somewhere other than oldspace. Leave it alone. */
1923 dtc 1.1 words_scavenged = 1;
1924     else
1925     if ((object & 3) == 0)
1926     /* It's a fixnum. Real easy. */
1927     words_scavenged = 1;
1928     else
1929     /* It's some random header object. */
1930     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1931     #endif
1932 dtc 1.14
1933 dtc 1.1 start += words_scavenged;
1934     nwords -= words_scavenged;
1935     }
1936     gc_assert(nwords == 0);
1937     }
1938    
1939    
1940     /* Code and Code-Related Objects */
1941    
1942 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
1943 dtc 1.1
1944     static lispobj trans_function_header(lispobj object);
1945     static lispobj trans_boxed(lispobj object);
1946    
1947     #if DIRECT_SCAV
1948 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
1949 dtc 1.1 {
1950     gc_assert(Pointerp(object));
1951    
1952     if (from_space_p(object)) {
1953     lispobj first, *first_pointer;
1954 dtc 1.14
1955     /*
1956     * Object is a pointer into from space - check to see if it has
1957     * been forwarded.
1958     */
1959 dtc 1.1 first_pointer = (lispobj *) PTR(object);
1960     first = *first_pointer;
1961 dtc 1.14
1962 dtc 1.1 if (first == 0x01) {
1963     /* Forwarded */
1964     *where = first_pointer[1];
1965     return 1;
1966     }
1967     else {
1968     int type;
1969     lispobj copy;
1970 dtc 1.14
1971     /*
1972     * Must transport object -- object may point to either a
1973     * function header, a closure function header, or to a closure
1974     * header.
1975     */
1976    
1977 dtc 1.1 type = TypeOf(first);
1978     switch (type) {
1979     case type_FunctionHeader:
1980     case type_ClosureFunctionHeader:
1981     copy = trans_function_header(object);
1982     break;
1983     default:
1984     copy = trans_boxed(object);
1985     break;
1986     }
1987 dtc 1.14
1988 dtc 1.1 if (copy != object) {
1989     /* Set forwarding pointer. */
1990     first_pointer[0] = 0x01;
1991     first_pointer[1] = copy;
1992     }
1993 dtc 1.14
1994 dtc 1.1 first = copy;
1995     }
1996 dtc 1.14
1997 dtc 1.1 gc_assert(Pointerp(first));
1998     gc_assert(!from_space_p(first));
1999 dtc 1.14
2000 dtc 1.1 *where = first;
2001     }
2002     return 1;
2003     }
2004     #else
2005 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2006 dtc 1.1 {
2007     lispobj *first_pointer;
2008     lispobj copy;
2009    
2010     gc_assert(Pointerp(object));
2011 dtc 1.14
2012 dtc 1.1 /* Object is a pointer into from space - no a FP. */
2013     first_pointer = (lispobj *) PTR(object);
2014 dtc 1.14
2015     /*
2016     * Must transport object -- object may point to either a function
2017     * header, a closure function header, or to a closure header.
2018     */
2019    
2020 dtc 1.1 switch (TypeOf(*first_pointer)) {
2021     case type_FunctionHeader:
2022     case type_ClosureFunctionHeader:
2023     copy = trans_function_header(object);
2024     break;
2025     default:
2026     copy = trans_boxed(object);
2027     break;
2028     }
2029 dtc 1.14
2030 dtc 1.1 if (copy != object) {
2031     /* Set forwarding pointer */
2032     first_pointer[0] = 0x01;
2033     first_pointer[1] = copy;
2034     }
2035 dtc 1.14
2036 dtc 1.1 gc_assert(Pointerp(copy));
2037     gc_assert(!from_space_p(copy));
2038    
2039     *where = copy;
2040 dtc 1.14
2041 dtc 1.1 return 1;
2042     }
2043     #endif
2044    
2045 dtc 1.14 /*
2046     * Scan a x86 compiled code objected, looking for possible fixups that
2047     * have been missed after a move.
2048     *
2049     * Two types of fixups are needed:
2050     * 1. Absolution fixups to within the code object.
2051     * 2. Relative fixups to outside the code object.
2052     *
2053     * Currently only absolution fixups to the constant vector, or to the
2054     * code area are checked.
2055     */
2056     void sniff_code_object(struct code *code, unsigned displacement)
2057 dtc 1.1 {
2058     int nheader_words, ncode_words, nwords;
2059     void *p;
2060     void *constants_start_addr, *constants_end_addr;
2061     void *code_start_addr, *code_end_addr;
2062     int fixup_found = 0;
2063 dtc 1.14
2064 dtc 1.3 if (!check_code_fixups)
2065     return;
2066    
2067 dtc 1.14 /*
2068     * It's ok if it's byte compiled code. The trace table offset will
2069     * be a fixnum if it's x86 compiled code - check.
2070     */
2071 dtc 1.1 if (code->trace_table_offset & 0x3) {
2072 dtc 1.14 #if 0
2073     fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2074     #endif
2075 dtc 1.1 return;
2076     }
2077    
2078     /* Else it's x86 machine code. */
2079    
2080     ncode_words = fixnum_value(code->code_size);
2081 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) code);
2082 dtc 1.1 nwords = ncode_words + nheader_words;
2083    
2084 dtc 1.14 constants_start_addr = (void *) code + 5 * 4;
2085     constants_end_addr = (void *) code + nheader_words * 4;
2086     code_start_addr = (void *) code + nheader_words * 4;
2087     code_end_addr = (void *) code + nwords * 4;
2088 dtc 1.1
2089     /* Work through the unboxed code. */
2090     for (p = code_start_addr; p < code_end_addr; p++) {
2091 dtc 1.14 void *data = *(void **) p;
2092     unsigned d1 = *((unsigned char *) p - 1);
2093     unsigned d2 = *((unsigned char *) p - 2);
2094     unsigned d3 = *((unsigned char *) p - 3);
2095     unsigned d4 = *((unsigned char *) p - 4);
2096     unsigned d5 = *((unsigned char *) p - 5);
2097     unsigned d6 = *((unsigned char *) p - 6);
2098    
2099     /*
2100     * Check for code references.
2101     *
2102     * Check for a 32 bit word that looks like an absolute reference
2103     * to within the code adea of the code object.
2104     */
2105     if (data >= code_start_addr - displacement
2106     && data < code_end_addr - displacement) {
2107 dtc 1.1 /* Function header */
2108 dtc 1.14 if (d4 == 0x5e
2109     && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2110 dtc 1.1 /* Skip the function header */
2111 dtc 1.14 p += 6 * 4 - 4 - 1;
2112 dtc 1.1 continue;
2113     }
2114     /* Push imm32 */
2115     if (d1 == 0x68) {
2116     fixup_found = 1;
2117 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2118     (unsigned long) p, d6,d5,d4,d3,d2,d1,
2119     (unsigned long) data);
2120     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2121 dtc 1.1 }
2122     /* Mov [reg-8],imm32 */
2123 dtc 1.14 if (d3 == 0xc7
2124     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2125     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2126     && d1 == 0xf8) {
2127 dtc 1.1 fixup_found = 1;
2128 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2129     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2130     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n", (unsigned long) data);
2131 dtc 1.1 }
2132     /* Lea reg, [disp32] */
2133 dtc 1.14 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2134 dtc 1.1 fixup_found = 1;
2135 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2136     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2137     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n", (unsigned long) data);
2138 dtc 1.1 }
2139     }
2140    
2141 dtc 1.14 /*
2142     * Check for constant references.
2143     *
2144     * Check for a 32 bit word that looks like an absolution reference
2145     * to within the constant vector. Constant references will be
2146     * aligned.
2147     */
2148     if (data >= constants_start_addr - displacement
2149     && data < constants_end_addr - displacement
2150     && ((unsigned) data & 0x3) == 0) {
2151 dtc 1.1 /* Mov eax,m32 */
2152     if (d1 == 0xa1) {
2153     fixup_found = 1;
2154 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2155     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2156     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2157 dtc 1.1 }
2158 dtc 1.14
2159 dtc 1.1 /* Mov m32,eax */
2160     if (d1 == 0xa3) {
2161     fixup_found = 1;
2162 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2163     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2164     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2165 dtc 1.1 }
2166 dtc 1.14
2167 dtc 1.1 /* Cmp m32,imm32 */
2168 dtc 1.14 if (d1 == 0x3d && d2 == 0x81) {
2169 dtc 1.1 fixup_found = 1;
2170 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2171     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2172 dtc 1.1 /* XX Check this */
2173 gerd 1.32 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n", (unsigned long) data);
2174 dtc 1.1 }
2175 dtc 1.14
2176 dtc 1.1 /* Check for a mod=00, r/m=101 byte. */
2177     if ((d1 & 0xc7) == 5) {
2178     /* Cmp m32,reg */
2179     if (d2 == 0x39) {
2180     fixup_found = 1;
2181 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2182     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2183     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n", (unsigned long) data);
2184 dtc 1.1 }
2185     /* Cmp reg32,m32 */
2186     if (d2 == 0x3b) {
2187     fixup_found = 1;
2188 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2189     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2190     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n", (unsigned long) data);
2191 dtc 1.1 }
2192     /* Mov m32,reg32 */
2193     if (d2 == 0x89) {
2194     fixup_found = 1;
2195 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2196     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2197     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n", (unsigned long) data);
2198 dtc 1.1 }
2199     /* Mov reg32,m32 */
2200     if (d2 == 0x8b) {
2201     fixup_found = 1;
2202 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2203     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2204     fprintf(stderr, "*** Mov reg32,0x%.8lx\n", (unsigned long) data);
2205 dtc 1.1 }
2206     /* Lea reg32,m32 */
2207     if (d2 == 0x8d) {
2208     fixup_found = 1;
2209 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2210     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2211     fprintf(stderr, "*** Lea reg32,0x%.8lx\n", (unsigned long) data);
2212 dtc 1.1 }
2213     }
2214     }
2215     }
2216 dtc 1.14
2217 dtc 1.1 /* If anything was found print out some info. on the code object. */
2218     if (fixup_found) {
2219 gerd 1.32 fprintf(stderr, "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2220     (unsigned long) code, nheader_words, ncode_words);
2221     fprintf(stderr, "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2222     (unsigned long) constants_start_addr, (unsigned long) constants_end_addr,
2223     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2224 dtc 1.1 }
2225     }
2226    
2227 dtc 1.14 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2228 dtc 1.1 {
2229     int nheader_words, ncode_words, nwords;
2230     void *constants_start_addr, *constants_end_addr;
2231     void *code_start_addr, *code_end_addr;
2232     lispobj fixups = NIL;
2233 dtc 1.14 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2234     struct vector *fixups_vector;
2235    
2236     /*
2237     * It's ok if it's byte compiled code. The trace table offset will
2238     * be a fixnum if it's x86 compiled code - check.
2239     */
2240 dtc 1.1 if (new_code->trace_table_offset & 0x3) {
2241 dtc 1.14 #if 0
2242     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2243     #endif
2244 dtc 1.1 return;
2245     }
2246    
2247     /* Else it's x86 machine code. */
2248     ncode_words = fixnum_value(new_code->code_size);
2249 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) new_code);
2250 dtc 1.1 nwords = ncode_words + nheader_words;
2251 dtc 1.14 #if 0
2252     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2253     new_code, nheader_words, ncode_words);
2254     #endif
2255     constants_start_addr = (void *) new_code + 5 * 4;
2256     constants_end_addr = (void *) new_code + nheader_words * 4;
2257     code_start_addr = (void *) new_code + nheader_words * 4;
2258 dtc 1.1 code_end_addr = (void *)new_code + nwords*4;
2259 dtc 1.14 #if 0
2260     fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2261     constants_start_addr, constants_end_addr,
2262     code_start_addr, code_end_addr);
2263     #endif
2264 dtc 1.1
2265 dtc 1.14 /*
2266     * The first constant should be a pointer to the fixups for this
2267     * code objects - Check.
2268     */
2269     fixups = new_code->constants[0];
2270 dtc 1.1
2271 dtc 1.14 /*
2272     * It will be 0 or the unbound-marker if there are no fixups, and
2273     * will be an other pointer if it is valid.
2274     */
2275     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2276 dtc 1.1 /* Check for possible errors. */
2277     if (check_code_fixups)
2278     sniff_code_object(new_code, displacement);
2279 dtc 1.14
2280     #if 0
2281     fprintf(stderr, "Fixups for code object not found!?\n");
2282     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2283 dtc 1.1 new_code, nheader_words, ncode_words);
2284 dtc 1.14 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2285     constants_start_addr, constants_end_addr,
2286     code_start_addr, code_end_addr);
2287     #endif
2288 dtc 1.1 return;
2289     }
2290    
2291 dtc 1.14 fixups_vector = (struct vector *) PTR(fixups);
2292 dtc 1.1
2293     /* Could be pointing to a forwarding pointer. */
2294 dtc 1.14 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2295     && fixups_vector->header == 0x01) {
2296 dtc 1.19 #if 0
2297 dtc 1.14 fprintf(stderr, "* FF\n");
2298 dtc 1.19 #endif
2299 dtc 1.1 /* If so then follow it. */
2300 dtc 1.14 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2301 dtc 1.1 }
2302 dtc 1.14
2303     #if 0
2304     fprintf(stderr, "Got the fixups\n");
2305     #endif
2306 dtc 1.1
2307     if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2308 dtc 1.14 /*
2309     * Got the fixups for the code block. Now work through the
2310     * vector, and apply a fixup at each address.
2311     */
2312 dtc 1.1 int length = fixnum_value(fixups_vector->length);
2313     int i;
2314     for (i = 0; i < length; i++) {
2315     unsigned offset = fixups_vector->data[i];
2316     /* Now check the current value of offset. */
2317 dtc 1.14 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2318    
2319     /*
2320     * If it's within the old_code object then it must be an
2321     * absolute fixup (relative ones are not saved).
2322     */
2323     if (old_value >= (unsigned) old_code
2324     && old_value < (unsigned) old_code + nwords * 4)
2325 dtc 1.1 /* So add the dispacement. */
2326 dtc 1.14 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2327 dtc 1.1 + displacement;
2328     else
2329 dtc 1.14 /*
2330     * It is outside the old code object so it must be a relative
2331     * fixup (absolute fixups are not saved). So subtract the
2332     * displacement.
2333     */
2334     *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2335 dtc 1.1 - displacement;
2336     }
2337     }
2338 dtc 1.14
2339 dtc 1.1 /* Check for possible errors. */
2340     if (check_code_fixups)
2341 dtc 1.14 sniff_code_object(new_code, displacement);
2342 dtc 1.1 }
2343    
2344 dtc 1.14 static struct code * trans_code(struct code *code)
2345 dtc 1.1 {
2346     struct code *new_code;
2347     lispobj l_code, l_new_code;
2348     int nheader_words, ncode_words, nwords;
2349     unsigned long displacement;
2350     lispobj fheaderl, *prev_pointer;
2351 dtc 1.14
2352     #if 0
2353     fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2354     (unsigned long) code);
2355     #endif
2356    
2357     /* If object has already been transported, just return pointer */
2358     if (*(lispobj *) code == 0x01)
2359     return (struct code*) (((lispobj *) code)[1]);
2360    
2361 dtc 1.1 gc_assert(TypeOf(code->header) == type_CodeHeader);
2362 dtc 1.14
2363 dtc 1.1 /* prepare to transport the code vector */
2364     l_code = (lispobj) code | type_OtherPointer;
2365 dtc 1.14
2366 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2367     nheader_words = HeaderValue(code->header);
2368     nwords = ncode_words + nheader_words;
2369     nwords = CEILING(nwords, 2);
2370 dtc 1.14
2371 dtc 1.1 l_new_code = copy_large_object(l_code, nwords);
2372     new_code = (struct code *) PTR(l_new_code);
2373    
2374     /* May not have been moved. */
2375     if (new_code == code)
2376     return new_code;
2377 dtc 1.14
2378 dtc 1.1 displacement = l_new_code - l_code;
2379 dtc 1.14
2380     #if 0
2381     fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2382     (unsigned long) code, (unsigned long) new_code);
2383     fprintf(stderr, "Code object is %d words long.\n", nwords);
2384     #endif
2385    
2386 dtc 1.1 /* set forwarding pointer */
2387 dtc 1.14 ((lispobj *) code)[0] = 0x01;
2388     ((lispobj *) code)[1] = l_new_code;
2389    
2390     /*
2391     * Set forwarding pointers for all the function headers in the code
2392     * object; also fix all self pointers.
2393     */
2394    
2395 dtc 1.1 fheaderl = code->entry_points;
2396     prev_pointer = &new_code->entry_points;
2397 dtc 1.14
2398 dtc 1.1 while (fheaderl != NIL) {
2399     struct function *fheaderp, *nfheaderp;
2400     lispobj nfheaderl;
2401 dtc 1.14
2402 dtc 1.1 fheaderp = (struct function *) PTR(fheaderl);
2403     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2404 dtc 1.14
2405     /*
2406     * Calcuate the new function pointer and the new function header.
2407     */
2408 dtc 1.1 nfheaderl = fheaderl + displacement;
2409     nfheaderp = (struct function *) PTR(nfheaderl);
2410 dtc 1.14
2411 dtc 1.1 /* set forwarding pointer */
2412 dtc 1.14 ((lispobj *) fheaderp)[0] = 0x01;
2413     ((lispobj *) fheaderp)[1] = nfheaderl;
2414    
2415     /* Fix self pointer */
2416 dtc 1.1 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2417 dtc 1.14
2418 dtc 1.1 *prev_pointer = nfheaderl;
2419 dtc 1.14
2420 dtc 1.1 fheaderl = fheaderp->next;
2421     prev_pointer = &nfheaderp->next;
2422     }
2423    
2424 dtc 1.14 #if 0
2425     sniff_code_object(new_code, displacement);
2426     #endif
2427     apply_code_fixups(code, new_code);
2428    
2429 dtc 1.1 return new_code;
2430     }
2431    
2432 dtc 1.14 static int scav_code_header(lispobj *where, lispobj object)
2433 dtc 1.1 {
2434     struct code *code;
2435     int nheader_words, ncode_words, nwords;
2436     lispobj fheaderl;
2437     struct function *fheaderp;
2438 dtc 1.14
2439 dtc 1.1 code = (struct code *) where;
2440     ncode_words = fixnum_value(code->code_size);
2441     nheader_words = HeaderValue(object);
2442     nwords = ncode_words + nheader_words;
2443     nwords = CEILING(nwords, 2);
2444    
2445     /* Scavenge the boxed section of the code data block */
2446     scavenge(where + 1, nheader_words - 1);
2447    
2448 dtc 1.14 /*
2449     * Scavenge the boxed section of each function object in the code
2450     * data block
2451     */
2452 dtc 1.1 fheaderl = code->entry_points;
2453     while (fheaderl != NIL) {
2454     fheaderp = (struct function *) PTR(fheaderl);
2455     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2456 dtc 1.14
2457 dtc 1.1 scavenge(&fheaderp->name, 1);
2458     scavenge(&fheaderp->arglist, 1);
2459     scavenge(&fheaderp->type, 1);
2460 dtc 1.14
2461 dtc 1.1 fheaderl = fheaderp->next;
2462     }
2463 dtc 1.14
2464 dtc 1.1 return nwords;
2465     }
2466    
2467 dtc 1.14 static lispobj trans_code_header(lispobj object)
2468 dtc 1.1 {
2469     struct code *ncode;
2470    
2471     ncode = trans_code((struct code *) PTR(object));
2472     return (lispobj) ncode | type_OtherPointer;
2473     }
2474    
2475 dtc 1.14 static int size_code_header(lispobj *where)
2476 dtc 1.1 {
2477     struct code *code;
2478     int nheader_words, ncode_words, nwords;
2479    
2480     code = (struct code *) where;
2481 dtc 1.14
2482 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2483     nheader_words = HeaderValue(code->header);
2484     nwords = ncode_words + nheader_words;
2485     nwords = CEILING(nwords, 2);
2486    
2487     return nwords;
2488     }
2489    
2490    
2491 dtc 1.14 static int scav_return_pc_header(lispobj *where, lispobj object)
2492 dtc 1.1 {
2493     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2494     fprintf(stderr, "Return PC Header.\n");
2495 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2496 dtc 1.1 (unsigned long) where, (unsigned long) object);
2497     lose(NULL);
2498     return 0;
2499     }
2500    
2501 dtc 1.14 static lispobj trans_return_pc_header(lispobj object)
2502 dtc 1.1 {
2503     struct function *return_pc;
2504     unsigned long offset;
2505     struct code *code, *ncode;
2506    
2507 dtc 1.14 fprintf(stderr, "*** trans_return_pc_header: will this work?\n");
2508    
2509 dtc 1.1 return_pc = (struct function *) PTR(object);
2510     offset = HeaderValue(return_pc->header) * 4;
2511    
2512     /* Transport the whole code object */
2513     code = (struct code *) ((unsigned long) return_pc - offset);
2514     ncode = trans_code(code);
2515 dtc 1.14
2516 dtc 1.1 return ((lispobj) ncode + offset) | type_OtherPointer;
2517     }
2518    
2519 dtc 1.14 /*
2520     * On the 386, closures hold a pointer to the raw address instead of
2521     * the function object.
2522     */
2523 dtc 1.1 #ifdef i386
2524 dtc 1.14 static int scav_closure_header(lispobj *where, lispobj object)
2525 dtc 1.1 {
2526     struct closure *closure;
2527     lispobj fun;
2528    
2529     closure = (struct closure *)where;
2530     fun = closure->function - RAW_ADDR_OFFSET;
2531     scavenge(&fun, 1);
2532     /* The function may have moved so update the raw address. But don't
2533     write unnecessarily. */
2534     if (closure->function != fun + RAW_ADDR_OFFSET)
2535     closure->function = fun + RAW_ADDR_OFFSET;
2536 dtc 1.14
2537 dtc 1.1 return 2;
2538     }
2539     #endif
2540    
2541 dtc 1.14 static int scav_function_header(lispobj *where, lispobj object)
2542 dtc 1.1 {
2543     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2544     fprintf(stderr, "Function Header.\n");
2545 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2546 dtc 1.1 (unsigned long) where, (unsigned long) object);
2547     lose(NULL);
2548     return 0;
2549     }
2550    
2551 dtc 1.14 static lispobj trans_function_header(lispobj object)
2552 dtc 1.1 {
2553     struct function *fheader;
2554     unsigned long offset;
2555     struct code *code, *ncode;
2556 dtc 1.14
2557 dtc 1.1 fheader = (struct function *) PTR(object);
2558     offset = HeaderValue(fheader->header) * 4;
2559 dtc 1.14
2560 dtc 1.1 /* Transport the whole code object */
2561     code = (struct code *) ((unsigned long) fheader - offset);
2562     ncode = trans_code(code);
2563 dtc 1.14
2564 dtc 1.1 return ((lispobj) ncode + offset) | type_FunctionPointer;
2565     }
2566    
2567    
2568     /* Instances */
2569    
2570     #if DIRECT_SCAV
2571 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2572 dtc 1.1 {
2573     if (from_space_p(object)) {
2574     lispobj first, *first_pointer;
2575 dtc 1.14
2576     /*
2577     * object is a pointer into from space. check to see if it has
2578     * been forwarded
2579     */
2580 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2581     first = *first_pointer;
2582 dtc 1.14
2583 dtc 1.1 if (first == 0x01)
2584     /* Forwarded. */
2585     first = first_pointer[1];
2586     else {
2587     first = trans_boxed(object);
2588     gc_assert(first != object);
2589     /* Set forwarding pointer */
2590     first_pointer[0] = 0x01;
2591     first_pointer[1] = first;
2592     }
2593     *where = first;
2594     }
2595     return 1;
2596     }
2597     #else
2598 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2599 dtc 1.1 {
2600     lispobj copy, *first_pointer;
2601 dtc 1.14
2602 dtc 1.1 /* Object is a pointer into from space - not a FP */
2603     copy = trans_boxed(object);
2604    
2605     gc_assert(copy != object);
2606    
2607     first_pointer = (lispobj *) PTR(object);
2608 dtc 1.14
2609 dtc 1.1 /* Set forwarding pointer. */
2610     first_pointer[0] = 0x01;
2611     first_pointer[1] = copy;
2612     *where = copy;
2613    
2614     return 1;
2615     }
2616     #endif
2617    
2618    
2619     /* Lists and Conses */
2620    
2621     static lispobj trans_list(lispobj object);
2622    
2623     #if DIRECT_SCAV
2624 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2625 dtc 1.1 {
2626     gc_assert(Pointerp(object));
2627    
2628     if (from_space_p(object)) {
2629     lispobj first, *first_pointer;
2630 dtc 1.14
2631     /*
2632     * Object is a pointer into from space - check to see if it has
2633     * been forwarded.
2634     */
2635 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2636     first = *first_pointer;
2637 dtc 1.14
2638 dtc 1.1 if (first == 0x01)
2639     /* Forwarded. */
2640     first = first_pointer[1];
2641     else {
2642     first = trans_list(object);
2643 dtc 1.14
2644 dtc 1.1 /* Set forwarding pointer */
2645     first_pointer[0] = 0x01;
2646     first_pointer[1] = first;
2647     }
2648 dtc 1.14
2649 dtc 1.1 gc_assert(Pointerp(first));
2650     gc_assert(!from_space_p(first));
2651     *where = first;
2652     }
2653     return 1;
2654     }
2655     #else
2656 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2657 dtc 1.1 {
2658     lispobj first, *first_pointer;
2659    
2660     gc_assert(Pointerp(object));
2661    
2662     /* Object is a pointer into from space - not FP */
2663 dtc 1.14
2664 dtc 1.1 first = trans_list(object);
2665     gc_assert(first != object);
2666    
2667     first_pointer = (lispobj *) PTR(object);
2668    
2669     /* Set forwarding pointer */
2670     first_pointer[0] = 0x01;
2671     first_pointer[1] = first;
2672    
2673     gc_assert(Pointerp(first));
2674     gc_assert(!from_space_p(first));
2675     *where = first;
2676     return 1;
2677     }
2678     #endif
2679    
2680 dtc 1.14 static lispobj trans_list(lispobj object)
2681 dtc 1.1 {
2682     lispobj new_list_pointer;
2683     struct cons *cons, *new_cons;
2684     lispobj cdr;
2685    
2686     gc_assert(from_space_p(object));
2687    
2688     cons = (struct cons *) PTR(object);
2689 dtc 1.14
2690 dtc 1.1 /* copy 'object' */
2691     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2692     new_cons->car = cons->car;
2693     new_cons->cdr = cons->cdr; /* updated later */
2694 dtc 1.14 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2695 dtc 1.1
2696     /* Grab the cdr before it is clobbered */
2697     cdr = cons->cdr;
2698    
2699     /* Set forwarding pointer (clobbers start of list). */
2700     cons->car = 0x01;
2701     cons->cdr = new_list_pointer;
2702    
2703     /* Try to linearize the list in the cdr direction to help reduce paging. */
2704     while (1) {
2705     lispobj new_cdr;
2706     struct cons *cdr_cons, *new_cdr_cons;
2707 dtc 1.14
2708 dtc 1.1 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2709 dtc 1.14 || *((lispobj *) PTR(cdr)) == 0x01)
2710 dtc 1.1 break;
2711 dtc 1.14
2712 dtc 1.1 cdr_cons = (struct cons *) PTR(cdr);
2713 dtc 1.14
2714 dtc 1.1 /* copy 'cdr' */
2715     new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2716     new_cdr_cons->car = cdr_cons->car;
2717     new_cdr_cons->cdr = cdr_cons->cdr;
2718 dtc 1.14 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2719    
2720 dtc 1.1 /* Grab the cdr before it is clobbered */
2721     cdr = cdr_cons->cdr;
2722 dtc 1.14
2723 dtc 1.1 /* Set forwarding pointer */
2724     cdr_cons->car = 0x01;
2725     cdr_cons->cdr = new_cdr;
2726 dtc 1.14
2727     /*
2728     * Update the cdr of the last cons copied into new space to keep
2729     * the newspace scavenge from having to do it.
2730 dtc 1.1 */
2731     new_cons->cdr = new_cdr;
2732 dtc 1.14
2733 dtc 1.1 new_cons = new_cdr_cons;
2734     }
2735 dtc 1.14
2736 dtc 1.1 return new_list_pointer;
2737     }
2738    
2739    
2740     /* Scavenging and Transporting Other Pointers */
2741    
2742     #if DIRECT_SCAV
2743 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
2744 dtc 1.1 {
2745     gc_assert(Pointerp(object));
2746    
2747     if (from_space_p(object)) {
2748     lispobj first, *first_pointer;
2749 dtc 1.14
2750     /*
2751     * Object is a pointer into from space. check to see if it has
2752     * been forwarded.
2753     */
2754 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2755     first = *first_pointer;
2756 dtc 1.14
2757 dtc 1.1 if (first == 0x01) {
2758     /* Forwarded. */
2759     first = first_pointer[1];
2760     *where = first;
2761     } else {
2762     first = (transother[TypeOf(first)])(object);
2763 dtc 1.14
2764 dtc 1.1 if (first != object) {
2765     /* Set forwarding pointer */
2766     first_pointer[0] = 0x01;
2767     first_pointer[1] = first;
2768     *where = first;
2769     }
2770     }
2771 dtc 1.14
2772 dtc 1.1 gc_assert(Pointerp(first));
2773     gc_assert(!from_space_p(first));
2774     }
2775     return 1;
2776     }
2777     #else
2778 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
2779 dtc 1.1 {
2780     lispobj first, *first_pointer;
2781    
2782     gc_assert(Pointerp(object));
2783    
2784     /* Object is a pointer into from space - not FP */
2785     first_pointer = (lispobj *) PTR(object);
2786    
2787     first = (transother[TypeOf(*first_pointer)])(object);
2788    
2789     if (first != object) {
2790     /* Set forwarding pointer */
2791     first_pointer[0] = 0x01;
2792     first_pointer[1] = first;
2793     *where = first;
2794     }
2795 dtc 1.14
2796 dtc 1.1 gc_assert(Pointerp(first));
2797     gc_assert(!from_space_p(first));
2798    
2799     return 1;
2800     }
2801     #endif
2802    
2803    
2804     /* Immediate, Boxed, and Unboxed Objects */
2805    
2806 dtc 1.14 static int size_pointer(lispobj *where)
2807 dtc 1.1 {
2808     return 1;
2809     }
2810    
2811 dtc 1.14 static int scav_immediate(lispobj *where, lispobj object)
2812 dtc 1.1 {
2813     return 1;
2814     }
2815    
2816 dtc 1.14 static lispobj trans_immediate(lispobj object)
2817 dtc 1.1 {
2818     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2819     lose(NULL);
2820     return NIL;
2821     }
2822    
2823 dtc 1.14 static int size_immediate(lispobj *where)
2824 dtc 1.1 {
2825     return 1;
2826     }
2827    
2828    
2829 dtc 1.14 static int scav_boxed(lispobj *where, lispobj object)
2830 dtc 1.1 {
2831     return 1;
2832     }
2833    
2834 dtc 1.14 static lispobj trans_boxed(lispobj object)
2835 dtc 1.1 {
2836     lispobj header;
2837     unsigned long length;
2838    
2839     gc_assert(Pointerp(object));
2840    
2841     header = *((lispobj *) PTR(object));
2842     length = HeaderValue(header) + 1;
2843     length = CEILING(length, 2);
2844    
2845     return copy_object(object, length);
2846     }
2847    
2848 dtc 1.14 static lispobj trans_boxed_large(lispobj object)
2849 dtc 1.1 {
2850     lispobj header;
2851     unsigned long length;
2852    
2853     gc_assert(Pointerp(object));
2854    
2855     header = *((lispobj *) PTR(object));
2856     length = HeaderValue(header) + 1;
2857     length = CEILING(length, 2);
2858    
2859     return copy_large_object(object, length);
2860     }
2861    
2862 dtc 1.14 static int size_boxed(lispobj *where)
2863 dtc 1.1 {
2864     lispobj header;
2865     unsigned long length;
2866    
2867     header = *where;
2868     length = HeaderValue(header) + 1;
2869     length = CEILING(length, 2);
2870    
2871     return length;
2872     }
2873    
2874 dtc 1.14 static int scav_fdefn(lispobj *where, lispobj object)
2875 dtc 1.1 {
2876     struct fdefn *fdefn;
2877 dtc 1.14
2878 dtc 1.1 fdefn = (struct fdefn *)where;
2879 dtc 1.14
2880     if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2881     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
2882    
2883 dtc 1.1 /* Don't write unnecessarily */
2884     if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2885     fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2886 dtc 1.14
2887 dtc 1.1 return sizeof(struct fdefn) / sizeof(lispobj);
2888     }
2889     else
2890     return 1;
2891     }
2892    
2893 dtc 1.14 static int scav_unboxed(lispobj *where, lispobj object)
2894 dtc 1.1 {
2895     unsigned long length;
2896    
2897     length = HeaderValue(object) + 1;
2898     length = CEILING(length, 2);
2899    
2900     return length;
2901     }
2902    
2903 dtc 1.14 static lispobj trans_unboxed(lispobj object)
2904 dtc 1.1 {
2905     lispobj header;
2906     unsigned long length;
2907    
2908    
2909     gc_assert(Pointerp(object));
2910    
2911     header = *((lispobj *) PTR(object));
2912     length = HeaderValue(header) + 1;
2913     length = CEILING(length, 2);
2914    
2915     return copy_unboxed_object(object, length);
2916     }
2917    
2918 dtc 1.14 static lispobj trans_unboxed_large(lispobj object)
2919 dtc 1.1 {
2920     lispobj header;
2921     unsigned long length;
2922    
2923    
2924     gc_assert(Pointerp(object));
2925    
2926     header = *((lispobj *) PTR(object));
2927     length = HeaderValue(header) + 1;
2928     length = CEILING(length, 2);
2929    
2930     return copy_large_unboxed_object(object, length);
2931     }
2932    
2933 dtc 1.14 static int size_unboxed(lispobj *where)
2934 dtc 1.1 {
2935     lispobj header;
2936     unsigned long length;
2937    
2938     header = *where;
2939     length = HeaderValue(header) + 1;
2940     length = CEILING(length, 2);
2941    
2942     return length;
2943     }
2944    
2945    
2946     /* Vector-Like Objects */
2947    
2948     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2949    
2950 dtc 1.14 static int scav_string(lispobj *where, lispobj object)
2951 dtc 1.1 {
2952     struct vector *vector;
2953     int length, nwords;
2954    
2955 dtc 1.14 /*
2956     * NOTE: Strings contain one more byte of data than the length
2957     * slot indicates.
2958     */
2959 dtc 1.1
2960     vector = (struct vector *) where;
2961     length = fixnum_value(vector->length) + 1;
2962     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2963    
2964     return nwords;
2965     }
2966    
2967 dtc 1.14 static lispobj trans_string(lispobj object)
2968 dtc 1.1 {
2969     struct vector *vector;
2970     int length, nwords;
2971    
2972     gc_assert(Pointerp(object));
2973    
2974 dtc 1.14 /*
2975     * NOTE: Strings contain one more byte of data than the length
2976     * slot indicates.
2977     */
2978 dtc 1.1
2979     vector = (struct vector *) PTR(object);
2980     length = fixnum_value(vector->length) + 1;
2981     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2982    
2983     return copy_large_unboxed_object(object, nwords);
2984     }
2985    
2986 dtc 1.14 static int size_string(lispobj *where)
2987 dtc 1.1 {
2988     struct vector *vector;
2989     int length, nwords;
2990    
2991 dtc 1.14 /*
2992     * NOTE: Strings contain one more byte of data than the length
2993     * slot indicates.
2994     */
2995 dtc 1.1
2996     vector = (struct vector *) where;
2997     length = fixnum_value(vector->length) + 1;
2998     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2999    
3000     return nwords;
3001     }
3002    
3003 gerd 1.32
3004     /************************************************************************
3005     Hash Tables
3006     ************************************************************************/
3007    
3008     /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3009     hash-new.lisp. */
3010    
3011     struct hash_table
3012     {
3013     lispobj instance_header; /* 0 */
3014     lispobj dummy2;
3015     lispobj test;
3016     lispobj test_fun;
3017     lispobj hash_fun;
3018     lispobj rehash_size; /* 5 */
3019     lispobj rehash_threshold;
3020     lispobj rehash_trigger;
3021     lispobj number_entries;
3022     lispobj table;
3023     lispobj weak_p; /* 10 */
3024     lispobj needing_rehash;
3025     lispobj next_free_kv;
3026     lispobj index_vector;
3027     lispobj next_vector;
3028     lispobj hash_vector; /* 15 */
3029     };
3030    
3031     /* The size of a hash-table in Lisp objects. */
3032 dtc 1.1
3033 gerd 1.32 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3034 dtc 1.1
3035 gerd 1.32 /* Compute the EQ-hash of KEY. This must be the same as what's used
3036     in hash-new.lisp. */
3037 dtc 1.1
3038 gerd 1.32 #define EQ_HASH(key) ((key) & 0x1fffffff)
3039 dtc 1.1
3040 gerd 1.32 /* List of weak hash tables chained through their WEAK-P slot. Set to
3041     NIL at the start of a collection.
3042    
3043     This is not optimal because, when a table is tenured, it won't be
3044     processed automatically; only the yougest generation is GC'd by
3045     default. On the other hand, all applications will need an
3046     occasional full GC anyway, so it's not that bad either. */
3047 dtc 1.1
3048 gerd 1.32 static lispobj weak_hash_tables;
3049 dtc 1.1
3050 gerd 1.32 /* Return true if OBJ will survive the current GC. */
3051 dtc 1.1
3052 gerd 1.32 static inline int
3053     survives_gc (lispobj obj)
3054     {
3055     if (!Pointerp (obj) || !from_space_p (obj))
3056     return 1;
3057     return *(lispobj *) PTR (obj) == 1;
3058     }
3059 dtc 1.14
3060 gerd 1.32 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3061     element, otherwise return null. If LENGTH is not null, return in it
3062     the array's length. */
3063 dtc 1.1
3064 gerd 1.32 static inline unsigned *
3065     u32_vector (lispobj obj, unsigned *length)
3066     {
3067     unsigned *ptr = NULL;
3068    
3069     if (Pointerp (obj))
3070     {
3071     lispobj *p = (lispobj *) PTR (obj);
3072    
3073     if (TypeOf (p[0]) == type_SimpleArrayUnsignedByte32)
3074     {
3075     ptr = (unsigned *) (p + 2);
3076     if (length)
3077     *length = fixnum_value (p[1]);
3078     }
3079     }
3080 dtc 1.1
3081 gerd 1.32 return ptr;
3082     }
3083 dtc 1.1
3084 gerd 1.32 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3085     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3086     in the hash-table's TABLE vector is KV_INDEX. */
3087    
3088     static inline void
3089     free_hash_entry (struct hash_table *hash_table, int hash_index,
3090     int kv_index)
3091     {
3092     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3093     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3094     int free_p = 1;
3095    
3096     if (index_vector[hash_index] == kv_index)
3097     /* The entry is the first in the collinion chain.
3098     Pop it from the list. */
3099     index_vector[hash_index] = next_vector[kv_index];
3100     else
3101     {
3102     /* The entry is not the first in the collision chain. */
3103     unsigned prev = index_vector[hash_index];
3104     unsigned i = next_vector[prev];
3105 dtc 1.1
3106 gerd 1.32 while (i && i != kv_index)
3107     prev = i, i = next_vector[i];
3108 dtc 1.1
3109 gerd 1.32 if (i == kv_index)
3110     next_vector[prev] = next_vector[kv_index];
3111     else
3112     free_p = 0;
3113 dtc 1.1 }
3114    
3115 gerd 1.32 if (free_p)
3116     {
3117     unsigned count = fixnum_value (hash_table->number_entries);
3118     gc_assert (count > 0);
3119     hash_table->number_entries = make_fixnum (count - 1);
3120     next_vector[kv_index] = hash_table->next_free_kv;
3121     hash_table->next_free_kv = make_fixnum (kv_index);
3122     }
3123     }
3124    
3125     /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3126     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3127     in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3128    
3129     static inline void
3130     record_for_rehashing (struct hash_table *hash_table, int hash_index,
3131     int kv_index)
3132     {
3133     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3134     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3135     int rehash_p = 1;
3136    
3137     if (index_vector[hash_index] == kv_index)
3138     /* This entry is at the head of the collision chain.
3139     Pop it from that list. */
3140     index_vector[hash_index] = next_vector[kv_index];
3141     else
3142     {
3143     unsigned prev = index_vector[hash_index];
3144     unsigned i = next_vector[prev];
3145 dtc 1.1
3146 gerd 1.32 while (i && i != kv_index)
3147     prev = i, i = next_vector[i];
3148    
3149     if (i == kv_index)
3150     next_vector[prev] = next_vector[kv_index];
3151     else
3152     rehash_p = 0;
3153 dtc 1.1 }
3154    
3155 gerd 1.32 if (rehash_p)
3156     {
3157     next_vector[kv_index] = fixnum_value (hash_table->needing_rehash);
3158     hash_table->needing_rehash = make_fixnum (kv_index);
3159 dtc 1.1 }
3160 gerd 1.32 }
3161 dtc 1.1
3162 gerd 1.32 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3163     non-zero means this function is called for a weak hash-table at the
3164     end of a GC. WEAK zero means this function is called for
3165     scavenging a non-weak hash-table. Value is the number of entries
3166     scheduled for rehashing or removed. */
3167    
3168     static void
3169     scav_hash_entries (struct hash_table *hash_table, int weak)
3170     {
3171     unsigned kv_length;
3172     lispobj *kv_vector;
3173     unsigned *index_vector, *next_vector, *hash_vector;
3174     unsigned length;
3175     lispobj empty_symbol;
3176     unsigned next_vector_length;
3177     unsigned i;
3178 dtc 1.1
3179 gerd 1.32 kv_vector = (lispobj *) PTR (hash_table->table);
3180     kv_length = fixnum_value (kv_vector[1]);
3181     kv_vector += 2;
3182    
3183     empty_symbol = kv_vector[1];
3184    
3185     index_vector = u32_vector (hash_table->index_vector, &length);
3186     next_vector = u32_vector (hash_table->next_vector, &next_vector_length);
3187     hash_vector = u32_vector (hash_table->hash_vector, 0);
3188    
3189     gc_assert (index_vector && next_vector);
3190     gc_assert (next_vector_length * 2 == kv_length);
3191 dtc 1.14
3192 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3193     {
3194 dtc 1.14 lispobj old_key = kv_vector[2 * i];
3195 gerd 1.32 unsigned int old_index = EQ_HASH (old_key) % length;
3196     lispobj new_key;
3197     unsigned int new_index;
3198    
3199     if (weak
3200     && !survives_gc (old_key)
3201     && index_vector[old_index] != 0
3202     && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3203     free_hash_entry (hash_table, old_index, i);
3204     else
3205     {
3206     /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3207     scavenge (&kv_vector[2 * i], 2);
3208     new_key = kv_vector[2 * i];
3209     new_index = EQ_HASH (new_key) % length;
3210    
3211     if (old_index != new_index
3212     && index_vector[old_index] != 0
3213     && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3214     && (new_key != empty_symbol
3215     || kv_vector[2 * i + 1] != empty_symbol))
3216     record_for_rehashing (hash_table, old_index, i);
3217     }
3218     }
3219     }
3220 dtc 1.14
3221 gerd 1.32 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3222     been already. Value is 1 if anything new has been scavenged, 0
3223     otherwise. */
3224 dtc 1.14
3225 gerd 1.32 static int
3226     scav_weak_entries (struct hash_table *hash_table)
3227     {
3228     lispobj *kv_vector;
3229     unsigned *index_vector, *hash_vector;
3230     unsigned length;
3231     unsigned next_vector_length;
3232     unsigned i, scavenged = 0;
3233    
3234     kv_vector = (lispobj *) PTR (hash_table->table) + 2;
3235    
3236     index_vector = u32_vector (hash_table->index_vector, &length);
3237     u32_vector (hash_table->next_vector, &next_vector_length);
3238     hash_vector = u32_vector (hash_table->hash_vector, 0);
3239 dtc 1.14
3240 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3241     {
3242     lispobj old_key = kv_vector[2 * i];
3243     unsigned int old_index = EQ_HASH (old_key) % length;
3244 dtc 1.14
3245 gerd 1.32 /* If the key survives, scavenge its value, for the case that
3246     the only reference to a key in a weak table is a value in
3247     another weak table. */
3248     if (survives_gc (old_key)
3249     && index_vector[old_index] != 0
3250     && (hash_vector == 0 || hash_vector[old_index] == 0x80000000))
3251     {
3252     scavenge (&kv_vector[2 * i + 1], 1);
3253     scavenged = 1;
3254     }
3255     }
3256 dtc 1.1
3257 gerd 1.32 return scavenged;
3258     }
3259 dtc 1.14
3260 gerd 1.32 /* Process weak hash-tables at the end of a GC. */
3261 dtc 1.1
3262 gerd 1.32 static void
3263     scan_weak_tables (void)
3264     {
3265     lispobj table, next;
3266     int more_scavenged;
3267 dtc 1.14
3268 gerd 1.32 /* Scavenge hash values of surviving keys, until there is nothing
3269     new. This is for the case that the only reference to a weak key
3270     is a value in another weak table. */
3271     do
3272     {
3273     more_scavenged = 0;
3274    
3275     for (table = weak_hash_tables; table != NIL; table = next)
3276     {
3277     struct hash_table *ht = (struct hash_table *) PTR (table);
3278     next = ht->weak_p;
3279     if (scav_weak_entries (ht))
3280     more_scavenged = 1;
3281 dtc 1.1 }
3282     }
3283 gerd 1.32 while (more_scavenged);
3284    
3285     for (table = weak_hash_tables; table != NIL; table = next)
3286     {
3287     struct hash_table *ht = (struct hash_table *) PTR (table);
3288     next = ht->weak_p;
3289     ht->weak_p = T;
3290     scav_hash_entries (ht, 1);
3291     }
3292    
3293     weak_hash_tables = NIL;
3294 dtc 1.1 }
3295    
3296 gerd 1.32 /* Scavenge a key/value vector of a hash-table. */
3297 dtc 1.1
3298 gerd 1.32 static int
3299     scav_hash_vector (lispobj *where, lispobj object)
3300     {
3301     unsigned int kv_length;
3302     lispobj *kv_vector;
3303     lispobj empty_symbol, hash_table_obj;
3304     struct hash_table *hash_table;
3305    
3306     if (HeaderValue (object) != subtype_VectorValidHashing)
3307     return 1;
3308    
3309     /* WHERE is a hash table key/value vector. First word is header,
3310     second is vector length. Keys and values follow after the
3311     length. The first value is the symbol :empty, the first key is a
3312     reference to the hash-table containing the key/value vector.
3313     (See hash-new.lisp, MAKE-HASH-TABLE.) */
3314    
3315     kv_length = fixnum_value (where[1]);
3316     kv_vector = where + 2;
3317     scavenge (kv_vector, 2);
3318    
3319     gc_assert (Pointerp (kv_vector[0]));
3320     gc_assert (Pointerp (kv_vector[1]));
3321    
3322     hash_table_obj = kv_vector[0];
3323     hash_table = (struct hash_table *) PTR (hash_table_obj);
3324     empty_symbol = kv_vector[1];
3325    
3326     gc_assert (where == (lispobj *) PTR (hash_table->table));
3327     gc_assert (TypeOf (hash_table->instance_header) == type_InstanceHeader);
3328     gc_assert (TypeOf (*(lispobj *) PTR (empty_symbol)) == type_SymbolHeader);
3329    
3330     /* Scavenging the hash table which fix the positions of the other
3331     needed objects. */
3332     scavenge ((lispobj *) hash_table, HASH_TABLE_SIZE);
3333    
3334     /* Testing for T here instead of NIL automatially makes sure we
3335     don't add the same table twice to the list of weak tables, should
3336     this function ever be called twice for the same object. */
3337     if (hash_table->weak_p == T)
3338     {
3339     hash_table->weak_p = weak_hash_tables;
3340     weak_hash_tables = hash_table_obj;
3341     }
3342     else
3343     scav_hash_entries (hash_table, 0);
3344    
3345     return CEILING (kv_length + 2, 2);
3346     }
3347    
3348    
3349 dtc 1.14 static lispobj trans_vector(lispobj object)
3350 dtc 1.1 {
3351     struct vector *vector;
3352     int length, nwords;
3353    
3354     gc_assert(Pointerp(object));
3355    
3356     vector = (struct vector *) PTR(object);
3357    
3358     length = fixnum_value(vector->length);
3359     nwords = CEILING(length + 2, 2);
3360    
3361     return copy_large_object(object, nwords);
3362     }
3363    
3364 dtc 1.14 static int size_vector(lispobj *where)
3365 dtc 1.1 {
3366     struct vector *vector;
3367     int length, nwords;
3368    
3369     vector = (struct vector *) where;
3370     length = fixnum_value(vector->length);
3371     nwords = CEILING(length + 2, 2);
3372    
3373     return nwords;
3374     }
3375    
3376    
3377 dtc 1.14 static int scav_vector_bit(lispobj *where, lispobj object)
3378 dtc 1.1 {
3379     struct vector *vector;
3380     int length, nwords;
3381    
3382     vector = (struct vector *) where;
3383     length = fixnum_value(vector->length);
3384     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3385    
3386     return nwords;
3387     }
3388    
3389 dtc 1.14 static lispobj trans_vector_bit(lispobj object)
3390 dtc 1.1 {
3391     struct vector *vector;
3392     int length, nwords;
3393    
3394     gc_assert(Pointerp(object));
3395    
3396     vector = (struct vector *) PTR(object);
3397     length = fixnum_value(vector->length);
3398     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3399    
3400     return copy_large_unboxed_object(object, nwords);
3401     }
3402    
3403 dtc 1.14 static int size_vector_bit(lispobj *where)
3404 dtc 1.1 {
3405     struct vector *vector;
3406     int length, nwords;
3407    
3408     vector = (struct vector *) where;
3409     length = fixnum_value(vector->length);
3410     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3411    
3412     return nwords;
3413     }
3414    
3415    
3416 dtc 1.14 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3417 dtc 1.1 {
3418     struct vector *vector;
3419     int length, nwords;
3420    
3421     vector = (struct vector *) where;
3422     length = fixnum_value(vector->length);
3423     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3424    
3425     return nwords;
3426     }
3427    
3428 dtc 1.14 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3429 dtc 1.1 {
3430     struct vector *vector;
3431     int length, nwords;
3432    
3433     gc_assert(Pointerp(object));
3434    
3435     vector = (struct vector *) PTR(object);
3436     length = fixnum_value(vector->length);
3437     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3438    
3439     return copy_large_unboxed_object(object, nwords);
3440     }
3441    
3442 dtc 1.14 static int size_vector_unsigned_byte_2(lispobj *where)
3443 dtc 1.1 {
3444     struct vector *vector;
3445     int length, nwords;
3446    
3447     vector = (struct vector *) where;
3448     length = fixnum_value(vector->length);
3449     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3450    
3451     return nwords;
3452     }
3453    
3454    
3455 dtc 1.14 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3456 dtc 1.1 {
3457     struct vector *vector;
3458     int length, nwords;
3459    
3460     vector = (struct vector *) where;
3461     length = fixnum_value(vector->length);
3462     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3463    
3464     return nwords;
3465     }
3466    
3467 dtc 1.14 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3468 dtc 1.1 {
3469     struct vector *vector;
3470     int length, nwords;
3471    
3472     gc_assert(Pointerp(object));
3473    
3474     vector = (struct vector *) PTR(object);
3475     length = fixnum_value(vector->length);
3476     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3477    
3478     return copy_large_unboxed_object(object, nwords);
3479     }
3480    
3481 dtc 1.14 static int size_vector_unsigned_byte_4(lispobj *where)
3482 dtc 1.1 {
3483     struct vector *vector;
3484     int length, nwords;
3485    
3486     vector = (struct vector *) where;
3487     length = fixnum_value(vector->length);
3488     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3489    
3490     return nwords;
3491     }
3492    
3493    
3494 dtc 1.14 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3495 dtc 1.1 {
3496     struct vector *vector;
3497     int length, nwords;
3498    
3499     vector = (struct vector *) where;
3500     length = fixnum_value(vector->length);
3501     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3502    
3503     return nwords;
3504     }
3505    
3506 dtc 1.14 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3507 dtc 1.1 {
3508     struct vector *vector;
3509     int length, nwords;
3510    
3511     gc_assert(Pointerp(object));
3512    
3513     vector = (struct vector *) PTR(object);
3514     length = fixnum_value(vector->length);
3515     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3516    
3517     return copy_large_unboxed_object(object, nwords);
3518     }
3519    
3520 dtc 1.14 static int size_vector_unsigned_byte_8(lispobj *where)
3521 dtc 1.1 {
3522     struct vector *vector;
3523     int length, nwords;
3524    
3525     vector = (struct vector *) where;
3526     length = fixnum_value(vector->length);
3527     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3528    
3529     return nwords;
3530     }
3531    
3532    
3533 dtc 1.14 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3534 dtc 1.1 {
3535     struct vector *vector;
3536     int length, nwords;
3537    
3538     vector = (struct vector *) where;
3539     length = fixnum_value(vector->length);
3540     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3541    
3542     return nwords;
3543     }
3544    
3545 dtc 1.14 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3546 dtc 1.1 {
3547     struct vector *vector;
3548     int length, nwords;
3549    
3550     gc_assert(Pointerp(object));
3551    
3552     vector = (struct vector *) PTR(object);
3553     length = fixnum_value(vector->length);
3554     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3555    
3556     return copy_large_unboxed_object(object, nwords);
3557     }
3558    
3559 dtc 1.14 static int size_vector_unsigned_byte_16(lispobj *where)
3560 dtc 1.1 {
3561     struct vector *vector;
3562     int length, nwords;
3563    
3564     vector = (struct vector *) where;
3565     length = fixnum_value(vector->length);
3566     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3567    
3568     return nwords;
3569     }
3570    
3571    
3572 dtc 1.14 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3573 dtc 1.1 {
3574     struct vector *vector;
3575     int length, nwords;
3576    
3577     vector = (struct vector *) where;
3578     length = fixnum_value(vector->length);
3579     nwords = CEILING(length + 2, 2);
3580    
3581     return nwords;
3582     }
3583    
3584 dtc 1.14 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3585 dtc 1.1 {
3586     struct vector *vector;
3587     int length, nwords;
3588    
3589     gc_assert(Pointerp(object));
3590    
3591     vector = (struct vector *) PTR(object);
3592     length = fixnum_value(vector->length);
3593     nwords = CEILING(length + 2, 2);
3594    
3595     return copy_large_unboxed_object(object, nwords);
3596     }
3597    
3598 dtc 1.14 static int size_vector_unsigned_byte_32(lispobj *where)
3599 dtc 1.1 {
3600     struct vector *vector;
3601     int length, nwords;
3602    
3603     vector = (struct vector *) where;
3604     length = fixnum_value(vector->length);
3605     nwords = CEILING(length + 2, 2);
3606    
3607     return nwords;
3608     }
3609    
3610    
3611 dtc 1.14 static int scav_vector_single_float(lispobj *where, lispobj object)
3612 dtc 1.1 {
3613     struct vector *vector;
3614     int length, nwords;
3615    
3616     vector = (struct vector *) where;
3617     length = fixnum_value(vector->length);
3618     nwords = CEILING(length + 2, 2);
3619    
3620     return nwords;
3621     }
3622    
3623 dtc 1.14 static lispobj trans_vector_single_float(lispobj object)
3624 dtc 1.1 {
3625     struct vector *vector;
3626     int length, nwords;
3627    
3628     gc_assert(Pointerp(object));
3629    
3630     vector = (struct vector *) PTR(object);
3631     length = fixnum_value(vector->length);
3632     nwords = CEILING(length + 2, 2);
3633    
3634     return copy_large_unboxed_object(object, nwords);
3635     }
3636    
3637 dtc 1.14 static int size_vector_single_float(lispobj *where)
3638 dtc 1.1 {
3639     struct vector *vector;
3640     int length, nwords;
3641    
3642     vector = (struct vector *) where;
3643     length = fixnum_value(vector->length);
3644     nwords = CEILING(length + 2, 2);
3645    
3646     return nwords;
3647     }
3648    
3649    
3650 dtc 1.14 static int scav_vector_double_float(lispobj *where, lispobj object)
3651 dtc 1.1 {
3652     struct vector *vector;
3653     int length, nwords;
3654    
3655     vector = (struct vector *) where;
3656     length = fixnum_value(vector->length);
3657     nwords = CEILING(length * 2 + 2, 2);
3658    
3659     return nwords;
3660     }
3661    
3662 dtc 1.14 static lispobj trans_vector_double_float(lispobj object)
3663 dtc 1.1 {
3664     struct vector *vector;
3665     int length, nwords;
3666    
3667     gc_assert(Pointerp(object));
3668    
3669     vector = (struct vector *) PTR(object);
3670     length = fixnum_value(vector->length);
3671     nwords = CEILING(length * 2 + 2, 2);
3672    
3673     return copy_large_unboxed_object(object, nwords);
3674     }
3675    
3676 dtc 1.14 static int size_vector_double_float(lispobj *where)
3677 dtc 1.1 {
3678     struct vector *vector;
3679     int length, nwords;
3680    
3681     vector = (struct vector *) where;
3682     length = fixnum_value(vector->length);
3683     nwords = CEILING(length * 2 + 2, 2);
3684    
3685     return nwords;
3686     }
3687    
3688    
3689 dtc 1.11 #ifdef type_SimpleArrayLongFloat
3690 dtc 1.14 static int scav_vector_long_float(lispobj *where, lispobj object)
3691 dtc 1.11 {
3692     struct vector *vector;
3693     int length, nwords;
3694    
3695     vector = (struct vector *) where;
3696     length = fixnum_value(vector->length);
3697     nwords = CEILING(length * 3 + 2, 2);
3698    
3699     return nwords;
3700     }
3701    
3702 dtc 1.14 static lispobj trans_vector_long_float(lispobj object)
3703 dtc 1.11 {
3704     struct vector *vector;
3705     int length, nwords;
3706    
3707     gc_assert(Pointerp(object));
3708    
3709     vector = (struct vector *) PTR(object);
3710     length = fixnum_value(vector->length);
3711     nwords = CEILING(length * 3 + 2, 2);
3712    
3713     return copy_large_unboxed_object(object, nwords);
3714     }
3715    
3716 dtc 1.14 static int size_vector_long_float(lispobj *where)
3717 dtc 1.11 {
3718     struct vector *vector;
3719     int length, nwords;
3720    
3721     vector = (struct vector *) where;
3722     length = fixnum_value(vector->length);
3723     nwords = CEILING(length * 3 + 2, 2);
3724    
3725     return nwords;
3726     }
3727     #endif
3728    
3729    
3730 dtc 1.1 #ifdef type_SimpleArrayComplexSingleFloat
3731 dtc 1.14 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3732 dtc 1.1 {
3733     struct vector *vector;
3734     int length, nwords;
3735    
3736     vector = (struct vector *) where;
3737     length = fixnum_value(vector->length);
3738     nwords = CEILING(length * 2 + 2, 2);
3739    
3740     return nwords;
3741     }
3742    
3743 dtc 1.14 static lispobj trans_vector_complex_single_float(lispobj object)
3744 dtc 1.1 {
3745     struct vector *vector;
3746     int length, nwords;
3747    
3748     gc_assert(Pointerp(object));
3749    
3750     vector = (struct vector *) PTR(object);
3751     length = fixnum_value(vector->length);
3752     nwords = CEILING(length * 2 + 2, 2);
3753    
3754     return copy_large_unboxed_object(object, nwords);
3755     }
3756    
3757 dtc 1.14 static int size_vector_complex_single_float(lispobj *where)
3758 dtc 1.1 {
3759     struct vector *vector;
3760     int length, nwords;
3761    
3762     vector = (struct vector *) where;
3763     length = fixnum_value(vector->length);
3764     nwords = CEILING(length * 2 + 2, 2);
3765    
3766     return nwords;
3767     }
3768     #endif
3769    
3770     #ifdef type_SimpleArrayComplexDoubleFloat
3771 dtc 1.14 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3772 dtc 1.1 {
3773     struct vector *vector;
3774     int length, nwords;
3775    
3776     vector = (struct vector *) where;
3777