/[cmucl]/src/lisp/cgc.c
ViewVC logotype

Contents of /src/lisp/cgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.14 - (show annotations)
Wed Mar 19 09:17:10 2008 UTC (6 years, 1 month ago) by cshapiro
Branch: MAIN
CVS Tags: sparc-tramp-assem-base, post-merge-intl-branch, merged-unicode-utf16-extfmt-2009-06-11, unicode-utf16-extfmt-2009-03-27, snapshot-2008-08, snapshot-2008-09, sse2-packed-2008-11-12, snapshot-2008-05, snapshot-2008-06, snapshot-2008-07, intl-branch-working-2010-02-19-1000, unicode-string-buffer-impl-base, sse2-base, release-20b-pre1, release-20b-pre2, unicode-string-buffer-base, sse2-packed-base, sparc-tramp-assem-2010-07-19, amd64-dd-start, release-19f-pre1, snapshot-2008-12, snapshot-2008-11, intl-2-branch-base, GIT-CONVERSION, unicode-utf16-sync-2008-12, cross-sol-x86-merged, label-2009-03-16, release-19f-base, merge-sse2-packed, merge-with-19f, intl-branch-working-2010-02-11-1000, unicode-snapshot-2009-05, unicode-snapshot-2009-06, unicode-utf16-sync-2008-07, unicode-utf16-sync-2008-09, unicode-utf16-extfmts-sync-2008-12, RELEASE_20b, snapshot-2008-04, unicode-utf16-sync-label-2009-03-16, RELEASE_19f, release-20a-base, cross-sol-x86-base, unicode-utf16-char-support-2009-03-26, unicode-utf16-char-support-2009-03-25, unicode-utf16-extfmts-pre-sync-2008-11, snapshot-2008-10, snapshot-2010-12, snapshot-2010-11, unicode-utf16-sync-2008-11, snapshot-2011-09, snapshot-2011-06, snapshot-2011-07, snapshot-2011-04, snapshot-2011-02, snapshot-2011-03, snapshot-2011-01, pre-merge-intl-branch, snapshot-2010-05, snapshot-2010-04, snapshot-2010-07, snapshot-2010-06, snapshot-2010-01, snapshot-2010-03, snapshot-2010-02, snapshot-2010-08, label-2009-03-25, cross-sol-x86-2010-12-20, sse2-checkpoint-2008-10-01, intl-branch-2010-03-18-1300, sse2-merge-with-2008-11, sse2-merge-with-2008-10, RELEASE_20a, release-20a-pre1, snapshot-2009-11, snapshot-2009-12, unicode-utf16-extfmt-2009-06-11, portable-clx-import-2009-06-16, unicode-utf16-string-support, cross-sparc-branch-base, intl-branch-base, unicode-utf16-base, portable-clx-base, snapshot-2009-08, snapshot-2009-02, snapshot-2009-01, snapshot-2009-07, snapshot-2009-05, snapshot-2009-04, HEAD
Branch point for: RELEASE-19F-BRANCH, portable-clx-branch, cross-sparc-branch, RELEASE-20B-BRANCH, unicode-string-buffer-branch, sparc-tramp-assem-branch, sse2-packed-branch, RELEASE-20A-BRANCH, amd64-dd-branch, unicode-string-buffer-impl-branch, intl-branch, unicode-utf16-branch, cross-sol-x86-branch, sse2-branch, intl-2-branch, unicode-utf16-extfmt-branch
Changes since 1.13: +18 -18 lines
File MIME type: text/plain
Always use prototype style function definitions.  Consistently use the
void keyword to specify empty parameter lists.
1 /* cgc.c -*- Mode: C; comment-column: 40; -*-
2 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/cgc.c,v 1.14 2008/03/19 09:17:10 cshapiro Rel $
3 *
4 * Conservative Garbage Collector for CMUCL x86.
5 *
6 * This code is based on software written by William Lott, and
7 * Public Domain codes from Carnegie Mellon University, and has
8 * been placed in the Public Domain.
9 *
10 * Received from William 27 Jul 95.
11 *
12 * Debug, FreeBSD hooks, and integration by Paul Werkowski
13 *
14 *
15 */
16 #include <stdio.h>
17 #include <assert.h>
18 #include <signal.h>
19 #include <string.h>
20 #include "os.h" /* for SetSymbolValue */
21 #include "globals.h" /* For dynamic_space_size */
22 #include "x86-validate.h" /* for memory layout */
23 #include "x86-lispregs.h"
24 #include "lisp.h" /* for object defs */
25 #include "interrupt.h" /* interrupt_handlers */
26 #include "internals.h"
27 #include "cgc.h"
28
29 #if !defined MIN
30 #define MIN(a,b)(((a)<(b))?(a):(b))
31 #define MAX(a,b)(((a)>(b))?(a):(b))
32 #endif
33
34 #include <unistd.h>
35 #include <stdlib.h>
36 #if defined unix
37 #include <sys/param.h>
38 #endif
39 #include <sys/types.h>
40 #include <sys/time.h>
41 #include <sys/resource.h>
42
43
44 #define dprintf(t,exp) if(t){printf exp ; fflush(stdout);}
45
46 /* Object representation details. The allocator/collector knows
47 * almost nothing about lisp internals and is fairly general.
48 */
49
50 #define ALIGN_BITS 3
51 #define ALIGN_BYTES (1<<ALIGN_BITS)
52 #define ALIGNEDP(addr) ((((int)addr)&(ALIGN_BYTES-1)) == 0)
53
54 /* Type of an object. */
55 typedef struct object {
56 long header;
57 struct object *data[1];
58 } *obj_t;
59
60 /* Just leave unused space */
61 #define NOTE_EMPTY(ptr,bytes) {}
62
63
64 /* Collector datastructures */
65
66 #define BLOCK_BITS 16
67 #define BLOCK_BYTES (1<<BLOCK_BITS)
68 #define BLOCK_NUMBER(ptr) (((long)(ptr))>>BLOCK_BITS)
69 #define BLOCK_ADDRESS(num) ((void *)((num)<<BLOCK_BITS))
70
71 #define CHUNK_BITS 9
72 #define CHUNK_BYTES (1<<CHUNK_BITS)
73 #define CHUNK_NUMBER(ptr) (((long)(ptr))>>CHUNK_BITS)
74 #define CHUNK_ADDRESS(num) ((void *)((num)<<CHUNK_BITS))
75
76 #define BLOCK_CHUNKS (1<<(BLOCK_BITS-CHUNK_BITS))
77
78
79 #define ROUNDDOWN(val,x) ((val)&~((x)-1))
80 #define ROUNDUP(val,x) ROUNDDOWN((val)+(x)-1,x)
81
82 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
83 __FILE__, __LINE__)
84
85 #if 0
86 #define gc_assert(ex) {if (!(ex)) gc_abort();}
87 #else
88 #define gc_assert(ex)
89 #endif
90
91 char *alloc(int);
92
93
94 struct cluster {
95 /* Link to the next cluster. */
96 struct cluster *next;
97
98 /* The number of blocks in this cluster. */
99 int num_blocks;
100
101 /* Pointer to the first region. */
102 struct region *first_region;
103
104 /* Table index by the chunk number of some pointer minus the chunk */
105 /* number for the first region giving the number of chunks past */
106 /* the chunk holding the region header that spans that pointer. */
107 /* Actually, it might not be enough. So after backing up that far, */
108 /* try again. */
109 unsigned char region_offset[1];
110 };
111
112 /* The first word of this is arranged to look like a fixnum
113 * so as not to confuse 'room'.
114 */
115 struct region {
116 unsigned
117 res1:2, num_chunks:16, contains_small_objects:1, clean:1, hole:7;
118 struct region **prev;
119 struct region *next;
120 struct space *space;
121 };
122
123 #define REGION_OVERHEAD ROUNDUP(sizeof(struct region), ALIGN_BYTES)
124
125
126 struct space {
127 struct region *regions;
128 struct region **regions_tail;
129 char *alloc_ptr;
130 char *alloc_end;
131 };
132
133 /* Chain of all the clusters. */
134 struct cluster *clusters = NULL;
135 static int num_clusters = 0; /* for debugging */
136 int cgc_debug = 0; /* maybe set from Lisp */
137
138 /* Table indexed by block number giving the cluster that block is part of. */
139 static struct cluster **block_table = NULL;
140
141 /* The allocated memory block_table is offset from. */
142 static struct cluster **block_table_base = NULL;
143
144 /* The maximum bounds on the heap. */
145 static void *heap_base = NULL;
146 static void *heap_end = NULL;
147
148 /* The two dynamic spaces. */
149 static struct space space_0 = { NULL };
150 static struct space space_1 = { NULL };
151
152 /* Pointers it whichever dynamic space is currently newspace and oldspace */
153 static struct space *newspace = NULL;
154 static struct space *oldspace = NULL;
155
156 /* Free lists of regions. */
157 static struct region *small_region_free_list = NULL;
158 static struct region *large_region_free_list = NULL;
159 static void move_to_newspace(struct region *region);
160
161 #if defined TESTING
162 static void
163 print_region(struct region *r)
164 {
165 dprintf(1, ("[region %x %d <%x %x> %x]\n",
166 r, r->num_chunks, r->prev, r->next, r->space));
167 }
168 static void
169 print_regions(struct region *r, char *str)
170 {
171 printf("Regions %s:\n", str);
172 for (; r != NULL; r = r->next)
173 print_region(r);
174 }
175
176 static void
177 print_space(struct space *s)
178 {
179 struct region *r = s->regions;
180
181 dprintf(1, ("[space %x %s %s <%x - %x>]\n",
182 s,
183 (s == &space_0) ? "S0" : "S1",
184 (s == newspace) ? "NewSpace" : "OldSpace",
185 s->alloc_ptr, s->alloc_end));
186 print_regions(r, "");
187
188 }
189
190 void
191 print_spaces(void)
192 {
193 print_space(&space_0);
194 print_space(&space_1);
195 print_regions(large_region_free_list, "LRFL");
196 print_regions(small_region_free_list, "SRFL");
197 }
198
199 void
200 print_cluster(struct cluster *cluster)
201 {
202 printf("[cluster %x >%x %d]\n", cluster, cluster->next,
203 cluster->num_blocks);
204 print_regions(cluster->first_region, "cluster");
205 }
206
207 void
208 print_clusters(void)
209 {
210 struct cluster *cluster;
211
212 for (cluster = clusters; cluster != NULL; cluster = cluster->next)
213 print_cluster(cluster);
214 }
215 #endif /* TESTING */
216
217
218 /* Allocation/deallocation routines */
219
220 static void
221 init_region(struct region *region, int nchunks)
222 {
223 int region_block = BLOCK_NUMBER(region);
224 struct cluster *cluster = block_table[region_block];
225 int offset = CHUNK_NUMBER(region) - CHUNK_NUMBER(cluster->first_region);
226 int i;
227
228 dprintf(0, ("init region %x %d\n", region, nchunks));
229 *(long *) region = 0; /* clear fields */
230 region->num_chunks = nchunks;
231 if (nchunks > UCHAR_MAX) {
232 for (i = 0; i < UCHAR_MAX; i++)
233 cluster->region_offset[offset + i] = i;
234 for (; i < nchunks; i++)
235 cluster->region_offset[offset + i] = UCHAR_MAX;
236 } else {
237 for (i = 0; i < nchunks; i++)
238 cluster->region_offset[offset + i] = i;
239 }
240 }
241
242 static struct region *
243 maybe_alloc_large_region(int nchunks)
244 {
245 struct region *region, **prev;
246
247 prev = &large_region_free_list;
248 while ((region = *prev) != NULL) {
249 if (region->num_chunks >= nchunks) {
250 if (region->num_chunks == nchunks)
251 *prev = region->next;
252 else {
253 struct region *new
254 =
255
256 (struct region *) ((char *) region + nchunks * CHUNK_BYTES);
257 init_region(new, region->num_chunks - nchunks);
258 new->next = region->next;
259 new->prev = NULL;
260 new->space = NULL;
261 *prev = new;
262 region->num_chunks = nchunks;
263 }
264 region->next = NULL;
265 region->prev = NULL;
266 region->space = NULL;
267 return region;
268 }
269 prev = &region->next;
270 }
271 return NULL;
272 }
273
274
275 /* from os_zero */
276 static void
277 cgc_zero(addr, length)
278 os_vm_address_t addr;
279 os_vm_size_t length;
280 {
281 os_vm_address_t block_start = os_round_up_to_page(addr);
282 os_vm_address_t end = addr + length;
283 os_vm_size_t block_size;
284
285
286 if (block_start > addr)
287 memset((char *) addr, 0, MIN(block_start - addr, length))
288
289 if (block_start < end) {
290 length -= block_start - addr;
291
292 block_size = os_trunc_size_to_page(length);
293
294 if (block_size < length)
295 memset((char *) block_start + block_size, 0,
296 length - block_size);
297
298 if (block_size != 0) {
299 /* Now deallocate and allocate the block so that it */
300 /* faults in zero-filled. */
301
302 os_invalidate(block_start, block_size);
303 addr = os_validate(block_start, block_size);
304
305 if (addr == NULL || addr != block_start)
306 fprintf(stderr,
307 "cgc_zero: block moved, 0x%08x ==> 0x%08x!\n",
308 block_start, addr);
309 }
310 }
311 }
312
313 static void
314 compact_cluster(struct cluster *cluster)
315 {
316 int show = 0;
317 struct region *region = cluster->first_region;
318 struct region *end =
319 (struct region *) ((char *) region + cluster->num_blocks * BLOCK_BYTES);
320 int grown = 0;
321 unsigned max_chunks = cluster->num_blocks * BLOCK_CHUNKS;
322 struct region *large_additions = NULL;
323 struct region **large_prev = &large_additions;
324 struct region *small_additions = NULL;
325 struct region **small_prev = &small_additions;
326
327 dprintf(show, ("compact cluster %x\n", cluster));
328 while (region < end) {
329 struct region *next =
330 (struct region *) ((char *) region +
331
332 region->num_chunks * CHUNK_BYTES);
333 if (region->space != newspace) { /* was == NULL */
334 if (next < end && next->space != newspace) { /* was == NULL */
335 gc_assert(region >= cluster->first_region);
336 gc_assert(region->space == NULL);
337 gc_assert(next->space == NULL);
338 gc_assert(region->num_chunks > 0);
339 gc_assert(next->num_chunks > 0);
340 gc_assert((region->num_chunks + next->num_chunks) <=
341 max_chunks);
342 region->num_chunks += next->num_chunks;
343 grown = 1;
344 } else {
345 if (grown) {
346 init_region(region, region->num_chunks);
347 region->space = NULL;
348 grown = 0;
349 }
350 {
351 int ovh = REGION_OVERHEAD;
352
353 cgc_zero((os_vm_address_t) ((char *) region + ovh),
354 (os_vm_size_t) (region->num_chunks * CHUNK_BYTES) -
355 ovh);
356 }
357
358 if (region->num_chunks == 1) {
359 *small_prev = region;
360 small_prev = &region->next;
361 } else {
362 *large_prev = region;
363 large_prev = &region->next;
364 }
365 region = next;
366 }
367 } else
368 region = next;
369 }
370
371 *large_prev = large_region_free_list;
372 large_region_free_list = large_additions;
373 *small_prev = small_region_free_list;
374 small_region_free_list = small_additions;
375 }
376
377 static void
378 compact_free_regions(void)
379 {
380 struct cluster *cluster;
381
382 large_region_free_list = NULL;
383 small_region_free_list = NULL;
384
385 for (cluster = clusters; cluster != NULL; cluster = cluster->next)
386 compact_cluster(cluster);
387 }
388
389 /* WL code arranged to allocate new space via the sbrk() mechanism.
390 * However, I am going to start by allocating from the standard dynamic
391 * space. The idea is to use the normal allocation scheme for initial
392 * system build and switch to the cgc allocator when starting up a
393 * saved image when dynamic space is hopefully clean.
394 */
395 static struct region *
396 new_region(int nblocks)
397 {
398 /* take from existing dynamic space */
399 char *new = (char *) SymbolValue(ALLOCATION_POINTER);
400 struct region *region =
401
402 (struct region *) (ROUNDUP((long) new, BLOCK_BYTES));
403 int bn = BLOCK_NUMBER(region);
404
405 new += (nblocks * BLOCK_BYTES + ((char *) region - new));
406 SetSymbolValue(ALLOCATION_POINTER, (lispobj) new);
407 return region;
408 }
409
410 static void
411 new_cluster(int min_blocks)
412 {
413 int nblocks = min_blocks < 4 ? 4 : min_blocks;
414 int nchunks = nblocks << (BLOCK_BITS - CHUNK_BITS);
415 int i;
416 struct cluster *cluster = malloc(sizeof(struct cluster) + nchunks - 1);
417 struct region *region = new_region(nblocks);
418
419 int bn = BLOCK_NUMBER(region);
420
421 dprintf(cgc_debug, ("new cluster %x region@%x\n", cluster, region));
422 for (i = 0; i < nblocks; i++)
423 block_table[bn + i] = cluster;
424
425 num_clusters++;
426 cluster->next = clusters;
427 clusters = cluster;
428 cluster->num_blocks = nblocks;
429 cluster->first_region = region;
430
431 init_region(region, nchunks);
432
433 region->next = large_region_free_list;
434 large_region_free_list = region;
435 region->prev = NULL;
436 region->space = NULL;
437 }
438
439 unsigned long bytes_allocated = 0; /* Seen by (dynamic-usage) */
440 static unsigned long auto_gc_trigger = 0;
441 static int maybe_gc_called = 0;
442
443 static struct region *
444 alloc_large_region(int nchunks)
445 {
446 struct region *region;
447
448 {
449 region = maybe_alloc_large_region(nchunks);
450
451 if (region == NULL) {
452 new_cluster((nchunks + BLOCK_CHUNKS - 1) >>
453 (BLOCK_BITS - CHUNK_BITS));
454 region = maybe_alloc_large_region(nchunks);
455 gc_assert(region != NULL);
456 }
457 }
458 gc_assert(region->space == NULL);
459 return region;
460 }
461
462 static struct region *
463 alloc_small_region(void)
464 {
465 struct region *region = small_region_free_list;
466
467 if (region == NULL)
468 region = alloc_large_region(1);
469 else
470 small_region_free_list = region->next;
471 region->next = NULL;
472 region->prev = NULL;
473 region->space = NULL;
474 move_to_newspace(region);
475 return region;
476 }
477
478 static int chunks_freed = 0;
479
480 static void
481 free_region(struct region *region)
482 {
483 gc_assert(region->space && region->space == oldspace);
484 gc_assert(region->num_chunks > 0);
485
486 region->space = NULL; /* for compact_cluster? */
487 region->prev = NULL; /* housekeeping I hope */
488 chunks_freed += region->num_chunks;
489
490 if (region->num_chunks == 1) {
491 region->next = small_region_free_list;
492 small_region_free_list = region;
493 } else {
494 region->next = large_region_free_list;
495 large_region_free_list = region;
496 }
497 }
498
499 static void *
500 alloc_large(int nbytes)
501 {
502 int nchunks = (nbytes + REGION_OVERHEAD + CHUNK_BYTES - 1) >> CHUNK_BITS;
503 struct region *region = alloc_large_region(nchunks);
504
505 region->contains_small_objects = 0;
506 region->next = NULL;
507 region->prev = NULL;
508 region->space = NULL;
509 bytes_allocated += region->num_chunks * CHUNK_BYTES;
510 move_to_newspace(region);
511 return (char *) region + REGION_OVERHEAD;
512 }
513
514 void *
515 cgc_alloc(int nbytes)
516 {
517 void *res;
518
519 dprintf(0, ("alloc %d\n", nbytes));
520
521 if (nbytes > (CHUNK_BYTES - REGION_OVERHEAD))
522 res = alloc_large(nbytes);
523 else {
524 struct space *space = newspace;
525
526 if ((space->alloc_ptr + nbytes) > space->alloc_end) {
527 struct region *region;
528
529 if (space->alloc_ptr != NULL) {
530 int hole = space->alloc_end - space->alloc_ptr;
531
532 if (hole >= ALIGN_BYTES)
533 /* This wastes the space, eg suppose one cons
534 * has been allocated then a request for
535 * a maximum sized small obj comes in. I'd like
536 * to remember that there is still a lot of
537 * room left in this region. Maybe I could actually
538 * use the small_region_free_list in some way.
539 */
540 NOTE_EMPTY(space->alloc_ptr, hole);
541 }
542 region = alloc_small_region();
543 region->contains_small_objects = 1;
544 space->alloc_ptr = (char *) region + REGION_OVERHEAD;
545 space->alloc_end = (char *) region + CHUNK_BYTES;
546 bytes_allocated += region->num_chunks * CHUNK_BYTES;
547 }
548
549 res = space->alloc_ptr;
550 space->alloc_ptr += ROUNDUP(nbytes, ALIGN_BYTES);
551 }
552 return res;
553 }
554
555
556 static void
557 move_to_newspace(struct region *region)
558 {
559 /* (maybe) unlink region from oldspace and add to tail of
560 * newspace regions. Don't attempt to move a region that
561 * is already in newspace.
562 */
563 struct space *space = newspace;
564
565 if (region->space == oldspace) {
566 /* Remove region from list. The prev slot holds
567 * the address of the 'next' slot of the previous
568 * list entry, not a pointer to that region (why?)
569 */
570 *region->prev = region->next;
571 if (region->next)
572 region->next->prev = region->prev;
573 if (region->space->regions_tail == &region->next)
574 region->space->regions_tail = region->prev;
575 }
576 /* Append to newspace unless it has already been promoted. */
577 if (region->space != newspace) {
578 region->prev = space->regions_tail;
579 region->next = NULL;
580 *space->regions_tail = region;
581 space->regions_tail = &region->next;
582 region->space = space;
583 }
584 }
585
586 static struct region *
587 find_region(void *ptr)
588 {
589 struct cluster *cluster;
590 int cluster_chunk_num;
591 int chunk_num;
592 unsigned char delta;
593
594 ptr = (void *) ((int) ptr & ~0x3);
595 if (ptr < heap_base || ptr >= heap_end)
596 return NULL;
597
598 cluster = block_table[BLOCK_NUMBER(ptr)];
599 if (cluster == NULL)
600 return NULL;
601
602 if (ptr < (void *) cluster->first_region)
603 return NULL;
604
605 cluster_chunk_num = CHUNK_NUMBER(cluster->first_region);
606 chunk_num = CHUNK_NUMBER(ptr);
607
608 while (delta = cluster->region_offset[chunk_num - cluster_chunk_num])
609 chunk_num -= delta;
610
611 return CHUNK_ADDRESS(chunk_num);
612 }
613
614 /* Interface to std collector */
615 static inline boolean
616 from_space_p(lispobj obj)
617 {
618 struct region *region = find_region((void *) obj);
619
620 return (region != NULL && region->space == oldspace);
621 }
622 static inline boolean
623 new_space_p(lispobj obj)
624 {
625 struct region *region = find_region((void *) obj);
626
627 return (region != NULL && region->space == newspace);
628 }
629 static inline boolean
630 static_space_p(lispobj obj)
631 {
632 return (STATIC_SPACE_START < obj
633 && obj < SymbolValue(STATIC_SPACE_FREE_POINTER));
634 }
635
636 /* Predicate that returns true if an object is a pointer. */
637 #undef POINTERP
638 #define POINTERP(obj) Pointerp((obj)->header)
639
640 /* Predicate that returns true if an object has been forwarded. */
641 #define FORWARDED(obj) ((obj_t)(obj)->header == (obj_t)0x1)
642
643 /* Returns the forwarding pointer for the given object. */
644 #define FORWARDING_PTR(obj) ((lispobj)(obj)->data[0])
645
646 /* Marks obj as forwarded to new */
647 #define DEPOSIT_FORWARDING_PTR(obj,new) \
648 ((obj_t)(obj)->header = 0x1, (obj_t)(obj)->data[0] = (obj_t)new)
649
650 /* Returns an obj_t for the object starting at addr */
651 #define OBJECT_AT(addr) ((obj_t)(addr))
652
653 /* Returns the size (in bytes) of obj. */
654 #define OBJECT_SIZE(obj) (sizeOfObject((obj_t)obj)<<2)
655
656 /* Scavenges an object. */
657 #define SCAVENGE_OBJECT(obj) scavengex((lispobj*)obj)
658
659 #if 0
660 /* Makes a region of memory look like some kind of object. */
661 #define NOTE_EMPTY(ptr,bytes) \
662 (((obj_t)ptr)->header = (((bytes+ALIGN_BYTES-1)>>ALIGN_BITS)<<8) | 1)
663 #endif
664
665 static unsigned long bytes_copied = 0;
666
667 # define HAVE_FASTCOPY
668 #if defined HAVE_FASTCOPY
669 #define COPYDUAL(a,b,c) fastcopy16(a,b,c)
670 void fastcopy16(void *, void *, size_t);
671 #else
672 #define COPYDUAL(a,b,c) memmove(a,b,c)
673 #endif
674 static inline lispobj
675 copy(lispobj taggedobj)
676 {
677 obj_t source = (obj_t) PTR(taggedobj);
678 int nbytes = OBJECT_SIZE(source);
679
680 gc_assert(Pointerp(taggedobj));
681 gc_assert(!(nbytes & (ALIGN_BYTES - 1)));
682 {
683 int lowtag = LowtagOf(taggedobj);
684 obj_t newobj = cgc_alloc(nbytes);
685
686 COPYDUAL(newobj, source, nbytes);
687 bytes_copied += nbytes;
688 return ((lispobj) newobj | lowtag);
689 }
690 }
691
692
693 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
694 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
695
696 #define WEAK_POINTER_NWORDS \
697 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
698 static struct weak_pointer *weak_pointers;
699
700
701 /* Scavenging:
702 * CMU CL objects can be classified as BOXED, UNBOXED or other.
703 * Boxed objects have a header containing length and type followed
704 * by LENGTH tagged object descriptors which may be pointers.
705 * UNBOXED objects have a header but the data is other than
706 * tagged descriptors, such as floats, bignums, saps or code.
707 * Others (code) contain a mix of boxed and unboxed and some
708 * (cons) are like BOXED but without header. The scavenger needs
709 * to consider these different kinds of objects. I will use a
710 * table indexed by type to detect the simple cases of boxed
711 * or unboxed.
712 */
713 #define IMMED_OR_LOSE(thing) gc_assert(sct[TypeOf(thing)].sc_kind == SC_IMMED)
714 static void scavenge_pointer(lispobj *);
715 static int noise = 0;
716
717 typedef struct {
718 unsigned sc_kind:3, ve_l2bits:5;
719 } OSC_t;
720
721 OSC_t
722 make_OSC(int kind, int log2bits)
723 {
724 OSC_t thing;
725
726 thing.sc_kind = kind;
727 thing.ve_l2bits = log2bits;
728 return thing;
729 }
730
731 #define SETSCT(indx,kind,logbits) sct[indx] = make_OSC(kind,logbits)
732 #define SC_ISBOXED 1
733 #define SC_UNBOXED 2
734 #define SC_IMMED 3
735 #define SC_POINTER 4
736 #define SC_VECTOR 5
737 #define SC_STRING 6
738 #define SC_OTHER 7
739 #define SC_LOSER 0
740 static OSC_t sct[256];
741
742 int
743 sizeOfObject(obj_t obj)
744 {
745 int obj_type = TypeOf(obj->header);
746 OSC_t class = sct[obj_type];
747 struct vector *vector;
748 int length = 1;
749 int nwords = 1;
750
751 switch (class.sc_kind) {
752 case SC_POINTER:
753 case SC_IMMED:
754 return 1;
755 case SC_ISBOXED:
756 case SC_UNBOXED:
757 gc_assert(HeaderValue(obj->header) > 0);
758 nwords = length = HeaderValue(obj->header) + 1;
759 break;
760 case SC_STRING:
761 case SC_VECTOR:
762 {
763 int log2bits = class.ve_l2bits;
764 int bits_per_el = 1 << log2bits;
765 int extra = 0;
766 int els_per_word = 1 << (5 - log2bits);
767
768 if (log2bits > 5) {
769 els_per_word = 1;
770 extra = log2bits - 5;
771 }
772 length = ((struct vector *) obj)->length;
773 length = fixnum_value(length); /* Zero Length IS valid */
774 length += (class.sc_kind == SC_STRING);
775 length <<= extra;
776 nwords = NWORDS(length, els_per_word);
777 nwords += 2; /* header + length */
778 }
779 break;
780 case SC_OTHER:
781 switch (obj_type) {
782 case type_CodeHeader:
783 {
784 struct code *code;
785 int nheader_words, ncode_words;
786
787 code = (struct code *) obj;
788 ncode_words = fixnum_value(code->code_size);
789 nheader_words = HeaderValue(code->header);
790 nwords = ncode_words + nheader_words;
791 } break;
792 default:
793 fprintf(stderr, "GC losage: no size for other type %d\n",
794 obj_type);
795 gc_abort();
796 }
797 break;
798 default:
799 fprintf(stderr, "GC losage: no size for other type %d\n", obj_type);
800 gc_abort();
801 }
802 return CEILING(nwords, 2);
803 }
804
805 static void
806 init_osc(void)
807 {
808 int i;
809
810 for (i = 0; i < 256; i++)
811 SETSCT(i, SC_LOSER, 0);
812 for (i = 0; i < 32; i++) {
813 SETSCT(type_EvenFixnum | (i << 3), SC_IMMED, 0);
814 SETSCT(type_FunctionPointer | (i << 3), SC_POINTER, 0);
815 /* OtherImmediate0 */
816 SETSCT(type_ListPointer | (i << 3), SC_POINTER, 0);
817 SETSCT(type_OddFixnum | (i << 3), SC_IMMED, 0);
818 SETSCT(type_InstancePointer | (i << 3), SC_POINTER, 0);
819 /* OtherImmediate1 */
820 SETSCT(type_OtherPointer | (i << 3), SC_POINTER, 0);
821 }
822 SETSCT(type_Bignum, SC_UNBOXED, 0);
823 SETSCT(type_Ratio, SC_ISBOXED, 0);
824 SETSCT(type_SingleFloat, SC_UNBOXED, 0);
825 SETSCT(type_DoubleFloat, SC_UNBOXED, 0);
826 #if defined type_ComplexSingleFloat
827 SETSCT(type_ComplexSingleFloat, SC_UNBOXED, 0);
828 #endif
829 #if defined type_ComplexDoubleFloat
830 SETSCT(type_ComplexDoubleFloat, SC_UNBOXED, 0);
831 #endif
832 SETSCT(type_Complex, SC_ISBOXED, 0);
833 SETSCT(type_SimpleArray, SC_ISBOXED, 0);
834 SETSCT(type_SimpleString, SC_STRING, 3);
835 SETSCT(type_SimpleBitVector, SC_VECTOR, 0);
836 SETSCT(type_SimpleVector, SC_VECTOR, 5);
837 SETSCT(type_SimpleArrayUnsignedByte2, SC_VECTOR, 1);
838 SETSCT(type_SimpleArrayUnsignedByte4, SC_VECTOR, 2);
839 SETSCT(type_SimpleArrayUnsignedByte8, SC_VECTOR, 3);
840 SETSCT(type_SimpleArrayUnsignedByte16, SC_VECTOR, 4);
841 SETSCT(type_SimpleArrayUnsignedByte32, SC_VECTOR, 5);
842 #if defined type_SimpleArraySignedByte8
843 SETSCT(type_SimpleArraySignedByte8, SC_VECTOR, 3);
844 #endif
845 #if defined type_SimpleArraySignedByte16
846 SETSCT(type_SimpleArraySignedByte16, SC_VECTOR, 4);
847 #endif
848 #if defined type_SimpleArraySignedByte30
849 SETSCT(type_SimpleArraySignedByte30, SC_VECTOR, 5);
850 #endif
851 #if defined type_SimpleArraySignedByte32
852 SETSCT(type_SimpleArraySignedByte32, SC_VECTOR, 5);
853 #endif
854 SETSCT(type_SimpleArraySingleFloat, SC_VECTOR, 5);
855 SETSCT(type_SimpleArrayDoubleFloat, SC_VECTOR, 6);
856 #if defined type_SimpleArrayComplexSingleFloat
857 SETSCT(type_SimpleArrayComplexSingleFloat, SC_VECTOR, 6);
858 #endif
859 #if defined type_SimpleArrayComplexDoubleFloat
860 SETSCT(type_SimpleArrayComplexDoubleFloat, SC_VECTOR, 7);
861 #endif
862 SETSCT(type_ComplexString, SC_ISBOXED, 0);
863 SETSCT(type_ComplexBitVector, SC_ISBOXED, 0);
864 SETSCT(type_ComplexVector, SC_ISBOXED, 0);
865 SETSCT(type_ComplexArray, SC_ISBOXED, 0);
866 SETSCT(type_CodeHeader, SC_OTHER, 0);
867 SETSCT(type_FunctionHeader, SC_OTHER, 0);
868 SETSCT(type_ClosureFunctionHeader, SC_OTHER, 0);
869 SETSCT(type_ReturnPcHeader, SC_OTHER, 0);
870 SETSCT(type_ClosureHeader, SC_ISBOXED, 0);
871 SETSCT(type_FuncallableInstanceHeader, SC_ISBOXED, 0);
872 SETSCT(type_ByteCodeFunction, SC_ISBOXED, 0);
873 SETSCT(type_ByteCodeClosure, SC_ISBOXED, 0);
874 SETSCT(type_DylanFunctionHeader, SC_ISBOXED, 0);
875
876 SETSCT(type_ValueCellHeader, SC_ISBOXED, 0);
877 SETSCT(type_SymbolHeader, SC_ISBOXED, 0);
878 SETSCT(type_BaseChar, SC_IMMED, 0);
879 SETSCT(type_Sap, SC_UNBOXED, 0);
880 SETSCT(type_UnboundMarker, SC_IMMED, 0);
881 SETSCT(type_WeakPointer, SC_UNBOXED, 0);
882 SETSCT(type_InstanceHeader, SC_ISBOXED, 0);
883 SETSCT(type_Fdefn, SC_ISBOXED, 0);
884 }
885
886 static lispobj *scavenge(lispobj *, int);
887 static lispobj *scavenge_object(lispobj *);
888 static lispobj *scavengex(lispobj *);
889
890 static inline
891 scavenge_1word_obj(lispobj * addr)
892 {
893 if (Pointerp(*addr)) {
894 if (*addr != NIL && *addr != T)
895 scavenge_pointer(addr);
896 } else
897 IMMED_OR_LOSE(*addr);
898 }
899 static int debug_code = 0;
900 static int
901 scav_code_header(lispobj * where)
902 {
903 lispobj object = *where;
904 struct code *code;
905 int i, nheader_words, ncode_words, nwords;
906 lispobj fheaderl;
907 struct function *fheaderp;
908
909 dprintf(0, ("code: %x %x\n", where, object));
910 code = (struct code *) where;
911 ncode_words = fixnum_value(code->code_size);
912 nheader_words = HeaderValue(object);
913 nwords = ncode_words + nheader_words;
914 nwords = CEILING(nwords, 2);
915 /* Scavenge the boxed section of the code data block */
916 /* NOTE: seeing a problem where the trace_table_offset slot
917 * is a bogus list pointer instead of a fixnum such that
918 * junk gets moved to newspace which causes problems later.
919 * Purify doesn't look at that slot (a bug?). Need
920 * to figure out how it happens. Ans: from loading top-level
921 * forms that init byte-compiled functions like "defun fcn".
922 * Fix the loader to not do this and save some space!
923 */
924 for (i = 1; i < nheader_words; i++)
925 scavenge_1word_obj(where + i);
926
927 /* Scavenge the boxed section of each function object in the
928 * code data block.
929 */
930 fheaderl = code->entry_points;
931 while (fheaderl != NIL) {
932 fheaderp = (struct function *) PTR(fheaderl);
933 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
934 scavenge_1word_obj(&fheaderp->name);
935 scavenge_1word_obj(&fheaderp->arglist);
936 scavenge_1word_obj(&fheaderp->type);
937 fheaderl = fheaderp->next;
938 }
939 return nwords;
940 }
941
942 #define RAW_ADDR_OFFSET (6*sizeof(lispobj) - type_FunctionPointer)
943 #ifdef i386
944 static void
945 scavenge_fcn_header(struct function *object)
946 {
947 struct function *fheader = object;
948 unsigned long offset = HeaderValue(fheader->header) * 4;
949
950 /* Ok, we don't transport code here, but we do need to
951 * scavenge the constants and functions (of which this is one).
952 * This should be done as part of scavenging a live code object
953 * and we could now be trying to do CPR on a corpse!
954 */
955 struct code *code = (struct code *) ((unsigned long) fheader - offset);
956
957 gc_assert(TypeOf(fheader->header) == type_FunctionHeader);
958 scav_code_header((lispobj *) code);
959 }
960
961 static int docode = 0; /* maybe not needed */
962 static int
963 scav_closure_header(struct closure *closure)
964 {
965 /* Could also be a funcallable_instance. The x86 port has the
966 * raw code address in the function slot, not a lisp object.
967 * However, the function object is a known distance from the code.
968 */
969 lispobj fun, fheader1;
970 int i, words;
971
972 gc_assert(ALIGNEDP(closure));
973 words = HeaderValue(closure->header);
974 fun = closure->function - RAW_ADDR_OFFSET;
975 /* This needs to be done to get at live code. I now have no
976 * way to know if this has already been scavenged so I assume
977 * that it hasn't. Code that has been seen by purify is
978 * supposed RO and doesn't (shouldn't) need to be looked at
979 * so this maybe really redundant.
980 *
981 * I have seen one case where FI was incomplete with function
982 * and lexenv slots == 0! Is this a bug?
983 *
984 * Update, it appears this is not needed. I will disable execution
985 * by default but leave the code here in case something breaks.
986 */
987 if (docode && static_space_p(closure->function))
988 scavenge_fcn_header((struct function *) PTR(fun));
989 else /* "normal" */
990 scavenge_1word_obj(&fun);
991
992 /* Now the boxed part of the closure header. */
993 for (i = 0; i < words - 1; i++)
994 scavenge_1word_obj(&closure->info[i]);
995
996 return CEILING(words + 1, 2);
997 }
998 static int fnoise = 0; /* experimental */
999 static int
1000 scav_fdefn(lispobj * where)
1001 {
1002 /* I don't know if this is really needs to be special cased here.
1003 * raw_address should look like a fixnum and function is in static
1004 * space -- unless it is pointing to something in C like closure_tramp
1005 * or maybe undefined_tramp.
1006 * Actually function is in dynamic space if it is a byte-function!
1007 * Hmm, have seen case of function slot containing 1. Bug?
1008 */
1009 struct fdefn *fdefn = (struct fdefn *) where;
1010 int words = HeaderValue(fdefn->header);
1011 int fix_func =
1012
1013 ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr);
1014 scavenge_pointer(&fdefn->name);
1015 if (fnoise && LowtagOf(fdefn->function) == type_FunctionPointer) {
1016 obj_t fcnobj = (obj_t) PTR(fdefn->function);
1017
1018 switch (TypeOf(fcnobj->header)) {
1019 /* Can only be in static space and may need to scavenge code object.
1020 * Won't be noticed by scavenge_pointer().
1021 */
1022 case type_FunctionHeader:
1023 scavenge_fcn_header((struct function *) fcnobj);
1024 break;
1025 /* If in static space it was moved there by purify and we are
1026 * doing normal scavenge. Handle normally.
1027 */
1028 case type_FuncallableInstanceHeader:
1029 case type_ClosureHeader:
1030 scavenge_pointer(&fdefn->function);
1031 break;
1032 default:
1033 dprintf(1, ("Ignoring bogus value %x for fdefn function.\n",
1034 *fcnobj));
1035 }
1036 } else
1037 /* NIL for undefined function? */
1038 scavenge_pointer(&fdefn->function);
1039
1040 if (fix_func) { /* This shouldn't be needed yet. */
1041 fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
1042 }
1043 return sizeof(struct fdefn) / sizeof(lispobj);
1044 }
1045
1046 #endif
1047
1048 /* List scavenger taken from gc.c and adapted */
1049
1050 static FILE *log = NULL;
1051 static int scav_ro = 0; /* for testing */
1052 static int debug = 0;
1053 static void *trapaddr = 0;
1054 void
1055 check_trap(void *addr)
1056 {
1057 fprintf(stderr, "Trapped @ %x\n", addr);
1058 }
1059
1060 static lispobj
1061 trans_list(lispobj object)
1062 {
1063 lispobj new_list_pointer;
1064 struct cons *cons, *new_cons;
1065 int n = 0;
1066 lispobj cdr;
1067
1068 cons = (struct cons *) PTR(object);
1069
1070 /* copy 'object' */
1071 new_cons = (struct cons *) cgc_alloc(sizeof(struct cons));
1072
1073 new_cons->car = cons->car;
1074 new_cons->cdr = cons->cdr; /* updated later */
1075 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
1076 bytes_copied += sizeof(struct cons);
1077
1078 #if 0
1079 if (scav_ro > 1)
1080 check_trap(object);
1081 if (log)
1082 fprintf(log, "( %d cons @ #x%x -> #x%x car #x%x)\n",
1083 n++, cons, new_cons, new_cons->car);
1084 #endif
1085 /* Grab the cdr before it is clobbered */
1086 cdr = cons->cdr;
1087 /* Set forwarding pointer (clobbers start of list). */
1088 DEPOSIT_FORWARDING_PTR((obj_t) cons, new_list_pointer);
1089
1090 /* Try to linearize the list in the cdr direction to help reduce paging. */
1091 while (1) {
1092 lispobj new_cdr;
1093 struct cons *cdr_cons, *new_cdr_cons;
1094
1095 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
1096 || FORWARDED((obj_t) PTR(cdr)))
1097 break;
1098
1099 cdr_cons = (struct cons *) PTR(cdr);
1100
1101 /* copy 'cdr' */
1102 new_cdr_cons = (struct cons *) cgc_alloc(sizeof(struct cons));
1103
1104 new_cdr_cons->car = cdr_cons->car;
1105 new_cdr_cons->cdr = cdr_cons->cdr;
1106 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
1107 bytes_copied += sizeof(struct cons);
1108
1109 #if 0
1110 if (scav_ro > 1)
1111 check_trap(object);
1112 if (log)
1113 fprintf(log, "( %d cons @ #x%x -> #x%x car #x%x)\n",
1114 n++, cdr_cons, new_cdr_cons, cdr_cons->car);
1115 #endif
1116 /* Grab the cdr before it is clobbered */
1117 cdr = cdr_cons->cdr;
1118 /* Set forwarding pointer */
1119 DEPOSIT_FORWARDING_PTR((obj_t) cdr_cons, new_cdr);
1120
1121 /* Update the cdr of the last cons copied into new
1122 * space to keep the newspace scavenge from having to do it.
1123 */
1124 new_cons->cdr = new_cdr;
1125
1126 new_cons = new_cdr_cons;
1127 }
1128
1129 return new_list_pointer;
1130 }
1131
1132 /* Weak Pointers */
1133 static int weak_noise = 0;
1134 static int do_weak = 1;
1135 static int
1136 scav_weak_pointer(lispobj * where)
1137 {
1138 struct weak_pointer *wp = weak_pointers;
1139
1140 /* Push the weak pointer onto the list of weak pointers.
1141 * Do I have to watch for duplicates? Originally this was
1142 * part of trans_weak_pointer but that didn't work in the
1143 * case where the WP was in a promoted region.
1144 */
1145
1146 while (wp != NULL) {
1147 if (wp == (struct weak_pointer *) where)
1148 break;
1149 wp = wp->next;
1150 }
1151 if (wp == NULL) {
1152 wp = (struct weak_pointer *) where;
1153 wp->next = weak_pointers;
1154 weak_pointers = wp;
1155 if (!do_weak)
1156 scavenge_1word_obj(&wp->value);
1157 }
1158
1159 /* Do not let GC scavenge the value slot of the weak pointer
1160 * (that is why it is a weak pointer).
1161 */
1162
1163 return WEAK_POINTER_NWORDS;
1164 }
1165
1166 void
1167 scan_weak_pointers(void)
1168 {
1169 struct weak_pointer *wp;
1170
1171 for (wp = weak_pointers; wp != NULL; wp = wp->next) {
1172 lispobj value = wp->value;
1173 obj_t obj = (obj_t) PTR(value);
1174 lispobj first, *first_pointer;
1175
1176 dprintf(weak_noise, ("Weak pointer at 0x%08x\n", (unsigned long) wp));
1177 dprintf(weak_noise, ("Value: 0x%08x\n", (unsigned long) value));
1178
1179 if (Pointerp(value) && from_space_p(value)) {
1180 /* Now, we need to check if the object has been forwarded.
1181 * If it has been, the weak pointer is still good and needs
1182 * to be updated. Otherwise, the weak pointer needs to be nil'ed out.
1183 */
1184
1185 if (FORWARDED(obj))
1186 wp->value = FORWARDING_PTR(obj);
1187 else { /* break it */
1188 dprintf(weak_noise, ("Broken.\n"));
1189 wp->value = NIL;
1190 wp->broken = T;
1191 }
1192 }
1193 }
1194 }
1195
1196 static int
1197 scavenge_random_object(lispobj * addr)
1198 {
1199 lispobj header = *addr;
1200 int count = 1;
1201
1202 dprintf(noise > 1, ("soi: %x @ %x\n", header, addr));
1203 #if 0
1204 if (trapaddr == addr)
1205 check_trap(addr);
1206 #endif
1207 gc_assert(ALIGNEDP(addr));
1208
1209 switch (TypeOf(header)) {
1210 case type_SimpleVector:
1211 {
1212 struct vector *v = (struct vector *) addr;
1213 int i, n = fixnum_value(v->length);
1214
1215 if (HeaderValue(v->header) == subtype_VectorValidHashing)
1216 v->header =
1217 (subtype_VectorMustRehash << type_Bits) |
1218 type_SimpleVector;
1219 /* Look at each of the vector elements which can be any lisp object. */
1220 for (i = 0; i < n; i++)
1221 scavenge_1word_obj(&v->data[i]);
1222 count = CEILING(n + 2, 2);
1223 }
1224 break;
1225 case type_CodeHeader:
1226 count = scav_code_header(addr);
1227 break;
1228 /* We should never hit any of these, 'cause they occur buried in
1229 * the middle of code objects (and handled by the code just above).
1230 */
1231 case type_ClosureFunctionHeader:
1232 case type_ReturnPcHeader:
1233 gc_abort();
1234
1235 /* Except while looking at an fdefn and wanting to ensure
1236 * code object is looked at.
1237 */
1238 case type_FunctionHeader:
1239 scavenge_fcn_header((struct function *) addr);
1240 break;
1241 #if defined i386
1242 case type_ClosureHeader:
1243 case type_FuncallableInstanceHeader:
1244 case type_ByteCodeFunction:
1245 case type_ByteCodeClosure:
1246 case type_DylanFunctionHeader:
1247 count = scav_closure_header((struct closure *) addr);
1248 break;
1249 #endif
1250 case type_WeakPointer:
1251 count = scav_weak_pointer(addr);
1252 break;
1253 case type_Fdefn:
1254 /* We have to handle fdefn objects specially, so we can fix
1255 * up the raw function address.
1256 */
1257 count = scav_fdefn(addr);
1258 break;
1259 default:
1260 {
1261 OSC_t class = sct[TypeOf(header)];
1262
1263 switch (class.sc_kind) {
1264 case SC_IMMED:
1265 count = 1;
1266 break;
1267 case SC_ISBOXED:
1268 {
1269 int i, words = 1 + HeaderValue(header);
1270
1271 for (i = 1; i < words; i++)
1272 scavenge_1word_obj(addr + i);
1273 count = CEILING(words, 2);
1274 }
1275 break;
1276 case SC_UNBOXED:
1277 case SC_STRING:
1278 case SC_VECTOR: /* simple vector handled above */
1279 count = sizeOfObject((obj_t) addr);
1280 break;
1281 default:
1282 gc_abort();
1283 }
1284 }
1285 }
1286 return count;
1287 }
1288
1289 static void
1290 logcopy(lispobj * addr, lispobj tagged, int hdr, lispobj to)
1291 {
1292 if (log) {
1293 int kind = TypeOf(hdr);
1294 int words = sizeOfObject((obj_t) PTR(tagged));
1295
1296 fprintf(log, "(copy #x%x @ #x%x (#x%x %d) to #x%x)\n",
1297 tagged, addr, kind, words, to);
1298 }
1299 }
1300 static void
1301 maybe_transport(lispobj * addr, lispobj tagged, struct region *region)
1302 {
1303 obj_t obj = (obj_t) PTR(tagged);
1304
1305 gc_assert(ALIGNEDP(obj));
1306 gc_assert(Pointerp(tagged));
1307 gc_assert((void *) region != (void *) obj);
1308 #if 0
1309 if ((void *) obj == (void *) trapaddr)
1310 check_trap(obj);
1311 #endif
1312 if (region->contains_small_objects) {
1313 lispobj new = copy(tagged);
1314
1315 #if 0
1316 if (scav_ro > 1) /* debugging in RO space */
1317 check_trap(obj);
1318 #endif
1319 #if defined GOOSE_CHASE
1320 if (TypeOf(obj->header) == type_Fdefn) {
1321 struct fdefn *fdefn = (struct fdefn *) PTR(new);
1322
1323 if (fdefn->function < STATIC_SPACE_START)
1324 check_trap(obj);
1325 }
1326 #endif
1327 dprintf(0, ("copy %x @ %x (%x) to %x\n",
1328 tagged, addr, TypeOf(obj->header), new));
1329 logcopy(addr, tagged, obj->header, new);
1330 DEPOSIT_FORWARDING_PTR(obj, new);
1331 *addr = new;
1332 } else {
1333 move_to_newspace(region);
1334 dprintf(0, ("move %x\n", region));
1335 }
1336 }
1337
1338 void
1339 scavenge_pointer(lispobj * addr)
1340 {
1341 lispobj taggedobj = *addr; /* descriptor */
1342 obj_t obj = (obj_t) PTR(taggedobj); /* pointer to object */
1343
1344 gc_assert(Pointerp(taggedobj));
1345 #if 0
1346 if (addr == trapaddr)
1347 check_trap(addr);
1348 if (obj == trapaddr)
1349 check_trap(obj);
1350 #endif
1351 /* optimize out common static pointers */
1352 if (taggedobj != NIL && taggedobj != T) {
1353 struct region *region = find_region(obj);
1354
1355 /* Only interested in pointers into oldspace */
1356 if (region && region->space == oldspace) {
1357 if (FORWARDED(obj))
1358 *addr = FORWARDING_PTR(obj);
1359 else
1360 switch (LowtagOf(taggedobj)) {
1361 case type_ListPointer:
1362 dprintf(noise > 1, ("ListPointer @ %x...\n", addr));
1363 *addr = trans_list(taggedobj);
1364 dprintf(noise > 1, ("... -> %x\n", addr));
1365 break;
1366 case type_FunctionPointer:
1367 switch (TypeOf(obj->header)) {
1368 case type_ClosureHeader:
1369 case type_FuncallableInstanceHeader:
1370 case type_ByteCodeFunction:
1371 case type_ByteCodeClosure:
1372 case type_DylanFunctionHeader:
1373 maybe_transport(addr, taggedobj, region);
1374 break;
1375 default:
1376 gc_abort();
1377 }
1378 break;
1379 case type_InstancePointer:
1380 case type_OtherPointer:
1381 maybe_transport(addr, taggedobj, region);
1382 break;
1383 default:
1384 /* It was a pointer, but not one of them? */
1385 gc_abort();
1386 }
1387 }
1388 }
1389 }
1390
1391
1392 static lispobj *
1393 scavenge(lispobj * addr, int ptrs)
1394 {
1395 /* addr points to an aligned 32-bit word in some space. */
1396 struct region *region;
1397 lispobj *end = addr + ptrs;
1398 lispobj obj;
1399
1400 while (addr < end) {
1401 int count = 1;
1402
1403 obj = *addr; /* the lisp object */
1404 #if 0
1405 if (trapaddr == addr)
1406 check_trap(addr); /* gdb breakpoint */
1407 #endif
1408 if (Pointerp(obj)) /* lowtag odd */
1409 scavenge_pointer(addr);
1410 else if (obj & 0x3)
1411 /* some other immediate */
1412 /*
1413 * Some random header. Process some type dependent number
1414 * of words. May still be inside object after call and the
1415 * next cell can be any lisp object. We can either recurse
1416 * by calling scavenge here or let the caller do it.
1417 */
1418 count = scavenge_random_object(addr);
1419 else
1420 IMMED_OR_LOSE(obj);
1421
1422 addr += count;
1423 }
1424 return addr;
1425 }
1426
1427 static void
1428 scavenge_cons(lispobj * where)
1429 {
1430 /* Scavenge a two-word space */
1431 scavenge_1word_obj(where + 0); /* car */
1432 scavenge_1word_obj(where + 1); /* cdr */
1433 }
1434 static lispobj *
1435 scavenge_object(lispobj * start)
1436 {
1437 int length = sizeOfObject((obj_t) start);
1438 int words = scavenge_random_object(start);
1439
1440 gc_assert(length == words);
1441 return start + length;
1442 }
1443 static lispobj *
1444 scavengex(lispobj * obj)
1445 {
1446 /* Thing at this location is one of:
1447 * a - basic object with header.
1448 * b - cons object (no header).
1449 * so that the starting and ending addresses are aligned.
1450 */
1451 gc_assert(ALIGNEDP(obj));
1452 {
1453 lispobj first_word = *obj;
1454 OSC_t sc = sct[TypeOf(first_word)];
1455
1456 if (Pointerp(first_word) || sc.sc_kind == SC_IMMED) { /* Must be a cons object or unused space */
1457 scavenge_cons((lispobj *) obj);
1458 return obj + 2;
1459 } else { /* Must be a complex object with header */
1460 lispobj *next = scavenge_object(obj);
1461
1462 return next;
1463 }
1464 }
1465 }
1466
1467 static void
1468 scavenge_space(lispobj * where, int words, char *name)
1469 {
1470 int allocated = bytes_allocated;
1471 lispobj *end = where + words;
1472 lispobj *last;
1473
1474 bytes_copied = 0;
1475 if (name)
1476 dprintf(noise, (" %s", name));
1477 while (where < end) {
1478 last = where;
1479 where = scavengex(last);
1480 }
1481 gc_assert(where == end);
1482 if (name)
1483 dprintf(noise, (" %ld bytes moved, %ld bytes allocated.\n",
1484 bytes_copied, bytes_allocated - allocated));
1485 }
1486
1487 static int boxed_registers[] = BOXED_REGISTERS;
1488 static void
1489 preserve_interrupt_context(os_context_t * context)
1490 {
1491 int i;
1492
1493 /* Check each boxed register for a valid pointer and promote
1494 * its region when found.
1495 */
1496 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
1497 int index = boxed_registers[i];
1498 lispobj foo = SC_REG(context, index);
1499 struct region *region = find_region((void *) foo);
1500
1501 if (region && region->space == oldspace)
1502 move_to_newspace(region);
1503 }
1504 }
1505 static void
1506 preserve_interrupt_contexts(void)
1507 {
1508 int i, index;
1509 os_context_t *context;
1510
1511 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
1512 dprintf(noise, ("Number of active contexts: %d\n", index));
1513
1514 for (i = 0; i < index; i++) {
1515 context = lisp_interrupt_contexts[i];
1516 preserve_interrupt_context(context);
1517 }
1518 }
1519
1520
1521 static void
1522 flip_spaces(void)
1523 {
1524 struct space *temp = oldspace;
1525
1526 oldspace = newspace;
1527 newspace = temp;
1528 }
1529
1530 /* There should be no lisp objects on the C stack so will limit search
1531 * to just the assigned lisp stack area.
1532 */
1533 #if defined i386
1534 #define BOS (CONTROL_STACK_START+CONTROL_STACK_SIZE) /* x86-validate.h */
1535 /* Traverse stack in same direction as it was loaded to try and
1536 * preserve original ordering of pages. Good for the VM system I hope.
1537 */
1538 #define ACROSS_STACK(var) var=(void**)BOS-1; var > (void**)&var; var--
1539 #endif
1540
1541 void
1542 preserve_pointer(void *ptr)
1543 {
1544 if (ptr > heap_base && ptr < heap_end) {
1545 struct region *region = find_region(ptr);
1546
1547 if (region != NULL && region->space == oldspace) {
1548 dprintf(0, ("move %x\n", ptr));
1549 move_to_newspace(region);
1550 }
1551 }
1552 }
1553
1554 static void
1555 preserve_stack(void)
1556 {
1557 void **addr; /* auto var is current TOS */
1558
1559 for (ACROSS_STACK(addr))
1560 preserve_pointer(*addr);
1561 }
1562
1563 #ifdef CONTROL_STACKS
1564 /* Scavenge the thread stack conservative roots. */
1565 void
1566 scavenge_thread_stacks(void)
1567 {
1568 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
1569 int type = TypeOf(thread_stacks);
1570
1571 if (LowtagOf(thread_stacks) == type_OtherPointer) {
1572 struct vector *vector = (struct vector *) PTR(thread_stacks);
1573 int length, i;
1574
1575 if (TypeOf(vector->header) != type_SimpleVector)
1576 return;
1577 length = fixnum_value(vector->length);
1578 for (i = 0; i < length; i++) {
1579 lispobj stack_obj = vector->data[i];
1580
1581 if (LowtagOf(stack_obj) == type_OtherPointer) {
1582 struct vector *stack = (struct vector *) PTR(stack_obj);
1583 int length, j;
1584
1585 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
1586 return;
1587 length = fixnum_value(stack->length);
1588 /* fprintf(stderr,"Scavenging control stack %d of length %d words\n",
1589 i,length); */
1590 for (j = 0; j < length; j++)
1591 preserve_pointer((void *) stack->data[j]);
1592 }
1593 }
1594 }
1595 }
1596 #endif
1597
1598 static void
1599 zero_stack(void)
1600 {
1601 /* This is a bit tricky because we don't want to zap any
1602 * stack frames between here and the call to mmap. For now,
1603 * lets just be slow and careful.
1604 */
1605 long *p, *q;
1606 os_vm_address_t base = (os_vm_address_t) CONTROL_STACK_START;
1607 os_vm_size_t size = (char *) &base - (char *) base;
1608
1609 #if 0
1610 cgc_zero(base, size);
1611 #else
1612 p = (long *) base;
1613 q = (long *) &size;
1614 while (p < q)
1615 *p++ = 0;
1616 #endif
1617
1618 }
1619
1620 #if defined STATIC_BLUE_BAG
1621 static int fast_static = 1;
1622 static void
1623 scavenge_static(void)
1624 {
1625 /* Static space consists of alternating layers of
1626 * code objects that refer to read-only space (from purify),
1627 * static non-code objects that need looking at, and
1628 * newly loaded code objects that refer to dynamic space.
1629 * The number of these areas depends on how many times purify
1630 * ran while loading the system image. I will extend purify
1631 * to maintain a list of collectable areas and use that list
1632 * here to avoid scanning read-only code sections.
1633 */
1634 lispobj *ss0 = (lispobj *) PTR(NIL);
1635 lispobj *ssa = (lispobj *) (PTR(STATIC_BLUE_BAG));
1636 lispobj *ssz = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
1637 lispobj bag = SymbolValue(STATIC_BLUE_BAG);
1638 lispobj *end = NULL;
1639
1640 ssa += sizeOfObject(OBJECT_AT(ssa)); /* Genesis modifies plist entry */
1641 if (fast_static) {
1642 scavenge_space(ss0, ssa - ss0, "Static0");
1643 if (bag != NIL && LowtagOf(bag) == type_ListPointer) {
1644 char sbuf[128];
1645 struct cons *cons = (struct cons *) PTR(bag);
1646
1647 while (LowtagOf(cons->car) == type_ListPointer) {
1648 struct cons *pair = (struct cons *) PTR(cons->car);
1649 lispobj *ss1 = (lispobj *) pair->car;
1650 lispobj *ss2 = (lispobj *) pair->cdr;
1651
1652 if (end == NULL)
1653 end = ss2;
1654 sprintf(sbuf, "Static %x %d", ss1, ss2 - ss1);
1655 scavenge_space(ss1, ss2 - ss1, sbuf);
1656 if (cons->cdr != NIL && LowtagOf(cons->cdr) == type_ListPointer)
1657 cons = (struct cons *) PTR(cons->cdr);
1658 else
1659 break;
1660 }
1661 }
1662 if (end != NULL)
1663 scavenge_space(end, ssz - end, "Static");
1664 } else
1665 (scavenge_space(ss0, ssz - ss0, "Static-All"));
1666 }
1667 #endif
1668
1669 static void
1670 scavenge_roots(void)
1671 {
1672 /* So what should go here?
1673 * When cgc starts up after purify/save all live objects
1674 * are in read-only or static space, and anything in RO
1675 * can point only to RO or STATIC and can't be changed.
1676 * Anything in STATIC is subject to change (but not move).
1677 * . not read-only-space (probably most of the roots here)
1678 * . static-space (all compiled code at least)
1679 * . binding-stack
1680 * . weak-pointers
1681 ? do I allow GC from interrupt?)
1682 * . interrupt-context (regs same as stack)
1683 ****
1684 * Well, it turns out that RO space ain't exactly that as
1685 * somehow apparently cached 'compact-info-environment' stuff
1686 * modifies at least 2 locations in RO space. There is a note
1687 * in globaldb.lisp that alludes to this and provides a post GC
1688 * hook to blow the cache. Not a problem if gc is called from
1689 * the lisp wrapper. UPDATE: Found purify bug which forced
1690 * boxed vectors into RO. This may be what led to above.
1691 *
1692 */
1693 lispobj *rs0 = (lispobj *) READ_ONLY_SPACE_START;
1694 lispobj *rsz = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
1695 lispobj *ss0 = (lispobj *) STATIC_SPACE_START;
1696 lispobj *ssz = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
1697 lispobj *bs0 = (lispobj *) BINDING_STACK_START;
1698 lispobj *bsz = (lispobj *) SymbolValue(BINDING_STACK_POINTER);
1699
1700 if (scav_ro) {
1701 scav_ro++;
1702 scavenge_space(rs0, rsz - rs0, "RO");
1703 scav_ro--;
1704 }
1705 scavenge_static();
1706 scavenge_space(bs0, bsz - bs0, "Binding Stack");
1707
1708 dprintf(noise, ("Interrupt handlers (%u bytes) ...\n",
1709 sizeof(interrupt_handlers)));
1710
1711 scavenge((lispobj *) interrupt_handlers,
1712 sizeof(interrupt_handlers) / sizeof(lispobj));
1713
1714 }
1715
1716 static void
1717 scavenge_newspace(void)
1718 {
1719 /* Scavenge is going to start at the beginning of newspace which
1720 * is presumed to have some "root" object pointers lying about due
1721 * to promoting regions that may be aimed at by stack resident pointers,
1722 * copied small objects from scavenge_roots(), or promoted large_object
1723 * regions. Scavenge() will flush out more copied objects/promoted
1724 * regions that will get added to the end of newspace and eventually
1725 * scanned by this code -- until all referenced things (and maybe some
1726 * extra dead stuff) have been examined. At the end of this loop anything
1727 * in oldspace is trash.
1728 */
1729 struct region *current;
1730
1731 current = newspace->regions;
1732
1733 while (current != NULL) {
1734 if (current->contains_small_objects) {
1735 void *obj = (char *) current + REGION_OVERHEAD;
1736 void *end = (char *) current + current->num_chunks * CHUNK_BYTES;
1737
1738 while (obj < end)
1739 obj = SCAVENGE_OBJECT(OBJECT_AT(obj));
1740 gc_assert(obj == end);
1741 } else
1742 SCAVENGE_OBJECT(OBJECT_AT(((char *) current + REGION_OVERHEAD)));
1743 current = current->next;
1744 }
1745 }
1746
1747 static void
1748 free_oldspace(void)
1749 {
1750 struct region *region, *next;
1751
1752 chunks_freed = 0;
1753 for (region = oldspace->regions; region != NULL; region = next) {
1754 gc_assert(region->space != newspace);
1755 next = region->next;
1756 free_region(region);
1757 }
1758 oldspace->alloc_ptr = NULL;
1759 oldspace->alloc_end = NULL;
1760 oldspace->regions = NULL;
1761 oldspace->regions_tail = &oldspace->regions;
1762 }
1763
1764 static void
1765 verify_space(lispobj * start, size_t words)
1766 {
1767 while (words > 0) {
1768 size_t count = 1;
1769 lispobj thing = *(lispobj *) start;
1770
1771 if (Pointerp(thing)) {
1772 struct region *region = find_region((void *) thing);
1773
1774 if (region && region->space == NULL)
1775 fprintf(stderr, "Ptr %x @ %x sees Junk\n", thing, start);
1776 } else if (thing & 0x3) {
1777 obj_t obj = (obj_t) start;
1778
1779 switch (TypeOf(obj->header)) {
1780 case type_CodeHeader:
1781 {
1782 lispobj object = *start;
1783 struct code *code;
1784 int nheader_words, ncode_words, nwords;
1785 lispobj fheaderl;
1786 struct function *fheaderp;
1787
1788 code = (struct code *) start;
1789 ncode_words = fixnum_value(code->code_size);
1790 nheader_words = HeaderValue(object);
1791 nwords = ncode_words + nheader_words;
1792 nwords = CEILING(nwords, 2);
1793 /* Scavenge the boxed section of the code data block */
1794 verify_space(start + 1, nheader_words - 1);
1795
1796 /* Scavenge the boxed section of each function object in the
1797 * code data block.
1798 */
1799 fheaderl = code->entry_points;
1800 while (fheaderl != NIL) {
1801 fheaderp = (struct function *) PTR(fheaderl);
1802 gc_assert(TypeOf(fheaderp->header) ==
1803 type_FunctionHeader);
1804 verify_space(&fheaderp->name, 1);
1805 verify_space(&fheaderp->arglist, 1);
1806 verify_space(&fheaderp->type, 1);
1807 fheaderl = fheaderp->next;
1808 }
1809 count = nwords;
1810 break;
1811 }
1812 default:
1813 {
1814 OSC_t class = sct[TypeOf(obj->header)];
1815
1816 switch (class.sc_kind) {
1817 case SC_ISBOXED:
1818 case SC_IMMED:
1819 count = 1;
1820 break;
1821 case SC_UNBOXED:
1822 case SC_STRING:
1823 case SC_VECTOR:
1824 count = sizeOfObject((obj_t) start);
1825 break;
1826 default:
1827 gc_abort();
1828 }
1829 break;
1830 }
1831 }
1832 }
1833 start += count;
1834 words -= count;
1835 }
1836 }
1837
1838 /* For debug/test only. */
1839 static void
1840 verify_gc(void)
1841 {
1842 lispobj *rs0 = (lispobj *) READ_ONLY_SPACE_START;
1843 lispobj *rsz = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
1844 lispobj *ss0 = (lispobj *) STATIC_SPACE_START;
1845 lispobj *ssz = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
1846 lispobj *bs0 = (lispobj *) BINDING_STACK_START;
1847 lispobj *bsz = (lispobj *) SymbolValue(BINDING_STACK_POINTER);
1848 lispobj *cs0 = (lispobj *) & rs0;
1849 lispobj *csz = (lispobj *) BOS;
1850
1851 /* can't check stack easily because there may be non-valid
1852 * objects there (thats why we're doing this cgc stuff). In
1853 * particular there are raw return addresses which can be very
1854 * descriptorish looking!!!
1855
1856 verify_space(cs0, csz-cs0);
1857 */
1858 verify_space(rs0, rsz - rs0);
1859 verify_space(ss0, ssz - ss0);
1860 verify_space(bs0, bsz - bs0);
1861 }
1862 static void
1863 fixup_regions(struct region *region)
1864 {
1865 do {
1866 lispobj header = (lispobj) OBJECT_AT(region)->header;
1867
1868 if (static_space_p(header)) {
1869 /* Purify thought this header was a cons? Why? */
1870 struct cons *wrong = (struct cons *) PTR(header);
1871 struct cons *fixme = (struct cons *) region;
1872
1873 dprintf(1, ("\n--Fixing region header @ %x.", region));
1874 fixme->car = wrong->car; /* restore region header */
1875 fixme->cdr = wrong->cdr; /* restore prev pointer */
1876 wrong->car = wrong->cdr = 0;
1877 }
1878 region = region->next;
1879 }
1880 while (region != NULL);
1881 }
1882
1883 static void
1884 post_purify_fixup(struct space *space)
1885 {
1886 /* Purify may have messed up the region headers. This can happen
1887 * if there is a dead list pointer on the stack that now aims
1888 * at a region header (previously was valid memory). Purify attempts
1889 * to at least check for a valid object header but loses with lists.
1890 * This hack recovers the correct values and keeps us going. Can
1891 * this occur with other dead objects?
1892 */
1893 if (large_region_free_list)
1894 fixup_regions(large_region_free_list);
1895 if (small_region_free_list)
1896 fixup_regions(small_region_free_list);
1897 fixup_regions(space->regions);
1898 }
1899
1900
1901 static int dolog = 0; /* log copy ops to file */
1902 static int dover = 0; /* hunt pointers to oldspace */
1903 void
1904 cgc_collect_garbage(void)
1905 {
1906 unsigned long allocated = bytes_allocated;
1907
1908 dprintf(noise, ("GC\n"));
1909 if (dolog && !log)
1910 log = fopen("LOG.tmp", "w");
1911
1912 /* Initialize the weak pointer list. */
1913 weak_pointers = NULL;
1914
1915 dprintf(noise, ("[Flip Spaces]\n"));
1916 flip_spaces();
1917 preserve_interrupt_contexts();
1918 dprintf(noise, ("[Preserve Stack]\n"));
1919 preserve_stack();
1920 scavenge_thread_stacks();
1921 dprintf(noise, ("[Scavenge Roots]\n"));
1922 scavenge_roots();
1923 dprintf(noise, ("[Scavenge New]\n"));
1924 scavenge_newspace();
1925 scan_weak_pointers();
1926 dprintf(noise, ("[Free Oldspace]\n"));
1927 free_oldspace();
1928 if (dover) {
1929 dprintf(noise, ("[Checking]\n"));
1930 verify_gc();
1931 }
1932 dprintf(noise, ("[Compacting]\n"));
1933 compact_free_regions();
1934 /* The stack will be zeroed by scrub-control-stack in sub-gc which
1935 is more effecient. */
1936 /* zero_stack(); */
1937 if (log)
1938 fclose(log);
1939 log = NULL;
1940 dprintf(noise, (" %ld bytes copied.\n", (bytes_allocated - allocated)));
1941 dprintf(noise, (" %ld bytes (%ld pages) reclaimed.\n",
1942 chunks_freed * CHUNK_BYTES, chunks_freed));
1943 bytes_allocated -= chunks_freed * CHUNK_BYTES;
1944 maybe_gc_called = 0;
1945 }
1946
1947 void
1948 cgc_free_heap(void)
1949 {
1950 /* Like above but just zap everything 'cause purify has
1951 * cleaned house!
1952 */
1953 unsigned long allocated = bytes_allocated;
1954
1955 flip_spaces();
1956 post_purify_fixup(oldspace);
1957 free_oldspace();
1958 compact_free_regions();
1959 #if 0 /* purify is currently running on the C stack so don't do this */
1960 zero_stack();
1961 #endif
1962 bytes_allocated -= chunks_freed * CHUNK_BYTES;
1963 }
1964
1965
1966 void
1967 cgc_init_collector(void)
1968 {
1969 int max_blocks;
1970
1971 heap_base = (void *) DYNAMIC_0_SPACE_START;
1972
1973 /* I could actually use both spaces here but just 1 for now */
1974 heap_end = (char *) heap_base + dynamic_space_size;
1975
1976 max_blocks = BLOCK_NUMBER(heap_end) - BLOCK_NUMBER(heap_base);
1977 if ((block_table_base = malloc(sizeof(struct cluster *) * max_blocks))
1978 != NULL) {
1979 memset(block_table_base, 0, sizeof(struct cluster *) * max_blocks);
1980
1981 block_table = (block_table_base - BLOCK_NUMBER(heap_base));
1982
1983 space_0.regions_tail = &space_0.regions;
1984 space_1.regions_tail = &space_1.regions;
1985
1986 newspace = &space_0;
1987 oldspace = &space_1;
1988 } else
1989 perror("malloc cgc block table");
1990 init_osc(); /* Object Storage Class table */
1991 }
1992
1993
1994 void do_pending_interrupt(void);
1995
1996 int use_cgc_p = 0;
1997 char *
1998 alloc(int nbytes)
1999 {
2000 /* Alloc is only called from lisp code to allocate a number of
2001 words, the cgc GC uses cgc_alloc directly as the checks of the
2002 heap size and is not needed and interrupts are allways disabled
2003 during a GC. */
2004
2005 /* Assumes nbytes includes alignment. Python arranges for that
2006 * but the C startup code needed some help.
2007 */
2008 #if 0
2009 int bytes = (nbytes + (ALIGN_BYTES - 1)) & ~(ALIGN_BYTES - 1);
2010
2011 if (bytes != nbytes)
2012 fprintf(stderr, "Fixing unaligned allocation length %d.\n", nbytes);
2013 nbytes = bytes;
2014 #endif
2015 if (!use_cgc_p) {
2016 char *current = (char *) SymbolValue(ALLOCATION_POINTER);
2017 char *nxtfree = current + nbytes;
2018
2019 SetSymbolValue(ALLOCATION_POINTER, (lispobj) nxtfree);
2020 return current;
2021 } else {
2022 /* Lacking an interrupt driven scheme to notice when a GC might
2023 * be wise, we add some more overhead to the allocator here
2024 * before any needed state is acquired. Only need to do it once
2025 * though because lisp will remember *need to collect garbage*
2026 * and get to it when it can. */
2027 if (auto_gc_trigger /* Only when enabled */
2028 && bytes_allocated > auto_gc_trigger && !maybe_gc_called++) /* Only once */
2029 funcall0(SymbolFunction(MAYBE_GC));
2030
2031 if (SymbolValue(INTERRUPTS_ENABLED) == NIL)
2032 /* Interrupts are disable so no special care is needed */
2033 return cgc_alloc(nbytes);
2034 else {
2035 void *result;
2036
2037 /* Interrupts are enabled so set *interrupt-enabled* to nil
2038 before calling cgc_alloc to prevent cgc_alloc from being
2039 re-entered. */
2040 SetSymbolValue(INTERRUPTS_ENABLED, NIL);
2041
2042 result = cgc_alloc(nbytes);
2043
2044 /* Restore *interrupts-enabled* */
2045 SetSymbolValue(INTERRUPTS_ENABLED, T);
2046
2047 /* Check if an interrupt occured */
2048 if (SymbolValue(INTERRUPT_PENDING) == T)
2049 /* Handle any interrupts that occured during cgc_alloc */
2050 do_pending_interrupt();
2051
2052 return result;
2053 }
2054 }
2055 }
2056
2057 /* Interface to history. */
2058 void
2059 set_auto_gc_trigger(unsigned long dynamic_usage)
2060 {
2061 auto_gc_trigger += dynamic_usage;
2062 }
2063
2064 void
2065 clear_auto_gc_trigger(void)
2066 {
2067 auto_gc_trigger = 0;
2068 }
2069
2070 void
2071 gc_init(void)
2072 {
2073 cgc_init_collector();
2074 }
2075
2076 void
2077 collect_garbage(void)
2078 {
2079 /* SUB-GC wraps without-interrupt around call, but this
2080 * is going to absolutely block SIGINT.
2081 */
2082 /* #define REALLY_SAFE */
2083 #if defined REALLY_SAFE
2084 sigset_t newmask, oldmask;
2085
2086 sigemptyset(&newmask);
2087 sigaddset(&newmask, SIGINT);
2088 sigprocmask(SIG_BLOCK, &newmask, &oldmask);
2089 #endif
2090 cgc_collect_garbage();
2091 #if defined REALLY_SAFE
2092 sigprocmask(SIG_SETMASK, &oldmask, NULL);
2093 #endif
2094
2095 }
2096
2097 /* Some helpers for the debugger. */
2098
2099 /* Scan an area looking for an object which encloses the given
2100 pointer. Returns the object start on success or NULL on failure. */
2101 static lispobj *
2102 search_space(lispobj * start, size_t words, lispobj * pointer)
2103 {
2104 while (words > 0) {
2105 size_t count = 1;
2106 lispobj thing = *start;
2107
2108 /* If thing is an immediate then this is a cons */
2109 if (Pointerp(thing)
2110 || ((thing & 3) == 0) /* fixnum */
2111 ||(TypeOf(thing) == type_BaseChar)
2112 || (TypeOf(thing) == type_UnboundMarker))
2113 count = 2;
2114 else
2115 count = sizeOfObject((obj_t) start);
2116
2117 /* Check if the pointer is within this object? */
2118 if ((pointer >= start) && (pointer < (start + count))) {
2119 /* Found it. */
2120 /* fprintf(stderr,"* Found %x in %x %x\n",pointer, start, thing); */
2121 return (start);
2122 }
2123
2124 /* Round up the count */
2125 count = CEILING(count, 2);
2126
2127 start += count;
2128 words -= count;
2129 }
2130 return (NULL);
2131 }
2132
2133 static lispobj *
2134 search_read_only_space(lispobj * pointer)
2135 {
2136 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
2137 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
2138
2139 if ((pointer < start) || (pointer >= end))
2140 return NULL;
2141 return (search_space(start, (pointer + 2) - start, pointer));
2142 }
2143
2144 static lispobj *
2145 search_static_space(lispobj * pointer)
2146 {
2147 lispobj *start = (lispobj *) STATIC_SPACE_START;
2148 lispobj *end = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
2149
2150 if ((pointer < start) || (pointer >= end))
2151 return NULL;
2152 return (search_space(start, (pointer + 2) - start, pointer));
2153 }
2154
2155 /* Find the code object for the given pc. Return NULL on failure */
2156 lispobj *
2157 component_ptr_from_pc(lispobj * pc)
2158 {
2159 lispobj *object = NULL;
2160
2161 if (object = search_read_only_space(pc));
2162 else
2163 object = search_static_space(pc);
2164
2165 /* Found anything? */
2166 if (object)
2167 /* Check if it is a code object. */
2168 if (TypeOf(*object) == type_CodeHeader)
2169 return (object);
2170
2171 return (NULL);
2172 }

  ViewVC Help
Powered by ViewVC 1.1.5