Skip to content
cgc.c 58.5 KiB
Newer Older
ram's avatar
ram committed
/* cgc.c -*- Mode: C; comment-column: 40; -*-
 *
 * Conservative Garbage Collector for CMUCL x86.
 *
 * This code is based on software written by William Lott, and
 * Public Domain codes from Carnegie Mellon University, and has
 * been placed in the Public Domain.
ram's avatar
ram committed
 *
 * Received from William 27 Jul 95.
 *
 * Debug, FreeBSD hooks, and integration by Paul Werkowski
 *
ram's avatar
ram committed
 */
#include <stdio.h>
#include <assert.h>
#include <signal.h>
#include "os.h"			/* for SetSymbolValue */
#include "globals.h"		/* For dynamic_space_size */
#include "x86-validate.h"	/* for memory layout  */
ram's avatar
ram committed
#include "x86-lispregs.h"
#include "lisp.h"		/* for object defs */
#include "interrupt.h"		/* interrupt_handlers */
ram's avatar
ram committed
#include "internals.h"
#include "cgc.h"

#if !defined MIN
#define MIN(a,b)(((a)<(b))?(a):(b))
#define MAX(a,b)(((a)>(b))?(a):(b))
#endif

#include <unistd.h>
#include <stdlib.h>
#if defined unix
#include <sys/param.h>
#endif
#include <sys/types.h>
#include <sys/time.h>
#include <sys/resource.h>


#define dprintf(t,exp) if(t){printf exp ; fflush(stdout);}

/* Object representation details. The allocator/collector knows
 * almost nothing about lisp internals and is fairly general.
*/

#define ALIGN_BITS 3
#define ALIGN_BYTES (1<<ALIGN_BITS)
#define ALIGNEDP(addr) ((((int)addr)&(ALIGN_BYTES-1)) == 0)

/* Type of an object. */
typedef struct object {
    long header;
    struct object *data[1];
ram's avatar
ram committed
} *obj_t;

/* Just leave unused space */
#define NOTE_EMPTY(ptr,bytes) {}

ram's avatar
ram committed
/* Collector datastructures */

#define BLOCK_BITS 16
#define BLOCK_BYTES (1<<BLOCK_BITS)
#define BLOCK_NUMBER(ptr) (((long)(ptr))>>BLOCK_BITS)
#define BLOCK_ADDRESS(num) ((void *)((num)<<BLOCK_BITS))

#define CHUNK_BITS 9
#define CHUNK_BYTES (1<<CHUNK_BITS)
#define CHUNK_NUMBER(ptr) (((long)(ptr))>>CHUNK_BITS)
#define CHUNK_ADDRESS(num) ((void *)((num)<<CHUNK_BITS))

#define BLOCK_CHUNKS (1<<(BLOCK_BITS-CHUNK_BITS))


#define ROUNDDOWN(val,x) ((val)&~((x)-1))
#define ROUNDUP(val,x) ROUNDDOWN((val)+(x)-1,x)

#define gc_abort() lose("GC invariant lost!  File \"%s\", line %d\n", \
			__FILE__, __LINE__)

#if 0
#define gc_assert(ex) {if (!(ex)) gc_abort();}
#else
#define gc_assert(ex)
#endif

ram's avatar
ram committed

ram's avatar
ram committed
struct cluster {
    /* Link to the next cluster. */
    struct cluster *next;

    /* The number of blocks in this cluster. */
    int num_blocks;

    /* Pointer to the first region. */
    struct region *first_region;

    /* Table index by the chunk number of some pointer minus the chunk */
    /* number for the first region giving the number of chunks past */
    /* the chunk holding the region header that spans that pointer. */
    /* Actually, it might not be enough.  So after backing up that far, */
    /* try again. */
    unsigned char region_offset[1];
};

/* The first word of this is arranged to look like a fixnum
 * so as not to confuse 'room'.
 */
struct region {
    unsigned
      res1:2, num_chunks:16, contains_small_objects:1, clean:1, hole:7;
    struct region **prev;
    struct region *next;
    struct space *space;
ram's avatar
ram committed
};

#define REGION_OVERHEAD ROUNDUP(sizeof(struct region), ALIGN_BYTES)


struct space {
    struct region *regions;
    struct region **regions_tail;
    char *alloc_ptr;
    char *alloc_end;
};

/* Chain of all the clusters. */
struct cluster *clusters = NULL;
static int num_clusters = 0;	/* for debugging */
int cgc_debug = 0;		/* maybe set from Lisp */

ram's avatar
ram committed
/* Table indexed by block number giving the cluster that block is part of. */
static struct cluster **block_table = NULL;

/* The allocated memory block_table is offset from. */
static struct cluster **block_table_base = NULL;

/* The maximum bounds on the heap. */
static void *heap_base = NULL;
static void *heap_end = NULL;

/* The two dynamic spaces. */
static struct space space_0 = { NULL };
static struct space space_1 = { NULL };
ram's avatar
ram committed
/* Pointers it whichever dynamic space is currently newspace and oldspace */
static struct space *newspace = NULL;
static struct space *oldspace = NULL;
ram's avatar
ram committed
/* Free lists of regions. */
static struct region *small_region_free_list = NULL;
static struct region *large_region_free_list = NULL;
static void move_to_newspace(struct region *region);

#if defined TESTING
static void
print_region(struct region *r)
ram's avatar
ram committed
{
    dprintf(1, ("[region %x %d <%x %x> %x]\n",
		r, r->num_chunks, r->prev, r->next, r->space));
ram's avatar
ram committed
}
static void
print_regions(struct region *r, char *str)
ram's avatar
ram committed
{
    printf("Regions %s:\n", str);
    for (; r != NULL; r = r->next)
	print_region(r);
ram's avatar
ram committed
}

static void
print_space(struct space *s)
ram's avatar
ram committed
{
    struct region *r = s->regions;

    dprintf(1, ("[space %x %s %s <%x - %x>]\n",
		s,
		(s == &space_0) ? "S0" : "S1",
		(s == newspace) ? "NewSpace" : "OldSpace",
		s->alloc_ptr, s->alloc_end));
    print_regions(r, "");
ram's avatar
ram committed

}
ram's avatar
ram committed
{
    print_space(&space_0);
    print_space(&space_1);
    print_regions(large_region_free_list, "LRFL");
    print_regions(small_region_free_list, "SRFL");
ram's avatar
ram committed
}

void
print_cluster(struct cluster *cluster)
ram's avatar
ram committed
{
    printf("[cluster %x >%x %d]\n", cluster, cluster->next,
	   cluster->num_blocks);
    print_regions(cluster->first_region, "cluster");
ram's avatar
ram committed
}
ram's avatar
ram committed
{
    struct cluster *cluster;

    for (cluster = clusters; cluster != NULL; cluster = cluster->next)
	print_cluster(cluster);
ram's avatar
ram committed
}
#endif /* TESTING */

ram's avatar
ram committed
/* Allocation/deallocation routines */

static void
init_region(struct region *region, int nchunks)
ram's avatar
ram committed
{
    int region_block = BLOCK_NUMBER(region);
    struct cluster *cluster = block_table[region_block];
    int offset = CHUNK_NUMBER(region) - CHUNK_NUMBER(cluster->first_region);
    int i;

    dprintf(0, ("init region %x %d\n", region, nchunks));
    *(long *) region = 0;	/* clear fields */
ram's avatar
ram committed
    region->num_chunks = nchunks;
    if (nchunks > UCHAR_MAX) {
	for (i = 0; i < UCHAR_MAX; i++)
	    cluster->region_offset[offset + i] = i;
	for (; i < nchunks; i++)
	    cluster->region_offset[offset + i] = UCHAR_MAX;
ram's avatar
ram committed
	for (i = 0; i < nchunks; i++)
	    cluster->region_offset[offset + i] = i;
    }
}

static struct region *
maybe_alloc_large_region(int nchunks)
ram's avatar
ram committed
{
    struct region *region, **prev;

    prev = &large_region_free_list;
    while ((region = *prev) != NULL) {
	if (region->num_chunks >= nchunks) {
	    if (region->num_chunks == nchunks)
		*prev = region->next;
	    else {
		struct region *new
		    =

		    (struct region *) ((char *) region + nchunks * CHUNK_BYTES);
ram's avatar
ram committed
		init_region(new, region->num_chunks - nchunks);
		new->next = region->next;
		new->prev = NULL;
		new->space = NULL;
		*prev = new;
		region->num_chunks = nchunks;
	    }
	    region->next = NULL;
	    region->prev = NULL;
	    region->space = NULL;
	    return region;
	}
	prev = &region->next;
    }
    return NULL;
}


/* from os_zero */
static void
cgc_zero(addr, length)
     os_vm_address_t addr;
     os_vm_size_t length;
{
    os_vm_address_t block_start = os_round_up_to_page(addr);
    os_vm_address_t end = addr + length;
    os_vm_size_t block_size;


    if (block_start > addr)
	memset((char *) addr, 0, MIN(block_start - addr, length))

	    if (block_start < end) {
	    length -= block_start - addr;

	    block_size = os_trunc_size_to_page(length);

	    if (block_size < length)
		memset((char *) block_start + block_size, 0,
		       length - block_size);

	    if (block_size != 0) {
		/* Now deallocate and allocate the block so that it */
		/* faults in  zero-filled. */

		os_invalidate(block_start, block_size);
		addr = os_validate(block_start, block_size);

		if (addr == NULL || addr != block_start)
		    fprintf(stderr,
			    "cgc_zero: block moved, 0x%08x ==> 0x%08x!\n",
			    block_start, addr);
	    }
ram's avatar
ram committed
	}
}

static void
compact_cluster(struct cluster *cluster)
{
    int show = 0;
    struct region *region = cluster->first_region;
    struct region *end =
	(struct region *) ((char *) region + cluster->num_blocks * BLOCK_BYTES);
    int grown = 0;
    unsigned max_chunks = cluster->num_blocks * BLOCK_CHUNKS;
    struct region *large_additions = NULL;
    struct region **large_prev = &large_additions;
    struct region *small_additions = NULL;
    struct region **small_prev = &small_additions;

    dprintf(show, ("compact cluster %x\n", cluster));
    while (region < end) {
	struct region *next =
	    (struct region *) ((char *) region +

			       region->num_chunks * CHUNK_BYTES);
	if (region->space != newspace) {	/* was == NULL */
	    if (next < end && next->space != newspace) {	/* was == NULL */
		gc_assert(region >= cluster->first_region);
		gc_assert(region->space == NULL);
		gc_assert(next->space == NULL);
		gc_assert(region->num_chunks > 0);
		gc_assert(next->num_chunks > 0);
		gc_assert((region->num_chunks + next->num_chunks) <=
			  max_chunks);
		region->num_chunks += next->num_chunks;
		grown = 1;
	    } else {
		if (grown) {
		    init_region(region, region->num_chunks);
		    region->space = NULL;
		    grown = 0;
		}
		{
		    int ovh = REGION_OVERHEAD;
ram's avatar
ram committed

		    cgc_zero((os_vm_address_t) ((char *) region + ovh),
			     (os_vm_size_t) (region->num_chunks * CHUNK_BYTES) -
			     ovh);
		}

		if (region->num_chunks == 1) {
		    *small_prev = region;
		    small_prev = &region->next;
		} else {
		    *large_prev = region;
		    large_prev = &region->next;
		}
		region = next;
	    }
	} else
	    region = next;
ram's avatar
ram committed
    }

    *large_prev = large_region_free_list;
    large_region_free_list = large_additions;
    *small_prev = small_region_free_list;
    small_region_free_list = small_additions;
ram's avatar
ram committed
}

ram's avatar
ram committed
{
ram's avatar
ram committed
    large_region_free_list = NULL;
    small_region_free_list = NULL;

    for (cluster = clusters; cluster != NULL; cluster = cluster->next)
	compact_cluster(cluster);
}

/* WL code arranged to allocate new space via the sbrk() mechanism.
 * However, I am going to start by allocating from the standard dynamic
 * space. The idea is to use the normal allocation scheme for initial
 * system build and switch to the cgc allocator when starting up a
 * saved image when dynamic space is hopefully clean.
 */
static struct region *
new_region(int nblocks)
{
    /* take from existing dynamic space */
    char *new = (char *) SymbolValue(ALLOCATION_POINTER);
    struct region *region =

	(struct region *) (ROUNDUP((long) new, BLOCK_BYTES));
    int bn = BLOCK_NUMBER(region);

    new += (nblocks * BLOCK_BYTES + ((char *) region - new));
    SetSymbolValue(ALLOCATION_POINTER, (lispobj) new);
ram's avatar
ram committed
    return region;
}

static void
new_cluster(int min_blocks)
ram's avatar
ram committed
{
    int nblocks = min_blocks < 4 ? 4 : min_blocks;
    int nchunks = nblocks << (BLOCK_BITS - CHUNK_BITS);
ram's avatar
ram committed
    int i;
    struct cluster *cluster = malloc(sizeof(struct cluster) + nchunks - 1);
    struct region *region = new_region(nblocks);
ram's avatar
ram committed

    int bn = BLOCK_NUMBER(region);

    dprintf(cgc_debug, ("new cluster %x region@%x\n", cluster, region));
ram's avatar
ram committed
    for (i = 0; i < nblocks; i++)
	block_table[bn + i] = cluster;
ram's avatar
ram committed

    num_clusters++;
    cluster->next = clusters;
    clusters = cluster;
    cluster->num_blocks = nblocks;
    cluster->first_region = region;

    init_region(region, nchunks);

    region->next = large_region_free_list;
    large_region_free_list = region;
    region->prev = NULL;
    region->space = NULL;
}

unsigned long bytes_allocated = 0;	/* Seen by (dynamic-usage) */
ram's avatar
ram committed
static unsigned long auto_gc_trigger = 0;
static int maybe_gc_called = 0;

static struct region *
alloc_large_region(int nchunks)
ram's avatar
ram committed
{
    struct region *region;
ram's avatar
ram committed

ram's avatar
ram committed
	region = maybe_alloc_large_region(nchunks);

	if (region == NULL) {
	    new_cluster((nchunks + BLOCK_CHUNKS - 1) >>
			(BLOCK_BITS - CHUNK_BITS));
	    region = maybe_alloc_large_region(nchunks);
	    gc_assert(region != NULL);
	}
    }
    gc_assert(region->space == NULL);
    return region;
ram's avatar
ram committed
}

static struct region *
ram's avatar
ram committed
{
    struct region *region = small_region_free_list;

    if (region == NULL)
	region = alloc_large_region(1);
    else
	small_region_free_list = region->next;
    region->next = NULL;
    region->prev = NULL;
    region->space = NULL;
    move_to_newspace(region);
    return region;
}

static int chunks_freed = 0;

static void
free_region(struct region *region)
ram's avatar
ram committed
{
    gc_assert(region->space && region->space == oldspace);
    gc_assert(region->num_chunks > 0);
ram's avatar
ram committed

    region->space = NULL;	/* for compact_cluster? */
    region->prev = NULL;	/* housekeeping I hope */
    chunks_freed += region->num_chunks;
ram's avatar
ram committed

    if (region->num_chunks == 1) {
	region->next = small_region_free_list;
	small_region_free_list = region;
    } else {
	region->next = large_region_free_list;
	large_region_free_list = region;
ram's avatar
ram committed
    }
}

static void *
alloc_large(int nbytes)
ram's avatar
ram committed
{
    int nchunks = (nbytes + REGION_OVERHEAD + CHUNK_BYTES - 1) >> CHUNK_BITS;
ram's avatar
ram committed
    struct region *region = alloc_large_region(nchunks);
ram's avatar
ram committed
    region->contains_small_objects = 0;
    region->next = NULL;
    region->prev = NULL;
    region->space = NULL;
    bytes_allocated += region->num_chunks * CHUNK_BYTES;
ram's avatar
ram committed
    move_to_newspace(region);
    return (char *) region + REGION_OVERHEAD;
ram's avatar
ram committed
}

void *
cgc_alloc(int nbytes)
ram's avatar
ram committed
{
ram's avatar
ram committed

    dprintf(0, ("alloc %d\n", nbytes));

    if (nbytes > (CHUNK_BYTES - REGION_OVERHEAD))
	res = alloc_large(nbytes);
    else {
	struct space *space = newspace;

	if ((space->alloc_ptr + nbytes) > space->alloc_end) {
	    struct region *region;

	    if (space->alloc_ptr != NULL) {
		int hole = space->alloc_end - space->alloc_ptr;

		if (hole >= ALIGN_BYTES)
		    /* This wastes the space, eg suppose one cons
		     * has been allocated then a request for
		     * a maximum sized small obj comes in. I'd like
		     * to remember that there is still a lot of
		     * room left in this region. Maybe I could actually
		     * use the small_region_free_list in some way.
		     */
		    NOTE_EMPTY(space->alloc_ptr, hole);
ram's avatar
ram committed
	    }
	    region = alloc_small_region();
	    region->contains_small_objects = 1;
	    space->alloc_ptr = (char *) region + REGION_OVERHEAD;
	    space->alloc_end = (char *) region + CHUNK_BYTES;
	    bytes_allocated += region->num_chunks * CHUNK_BYTES;
ram's avatar
ram committed
	}

	res = space->alloc_ptr;
	space->alloc_ptr += ROUNDUP(nbytes, ALIGN_BYTES);
ram's avatar
ram committed
    }
ram's avatar
ram committed
}


static void
move_to_newspace(struct region *region)
ram's avatar
ram committed
{
    /* (maybe) unlink region from oldspace and add to tail of 
     * newspace regions. Don't attempt to move a region that
     * is already in newspace.
     */
ram's avatar
ram committed
    struct space *space = newspace;

    if (region->space == oldspace) {
ram's avatar
ram committed
	/* Remove region from list. The prev slot holds
	 * the address of the 'next' slot of the previous
	 * list entry, not a pointer to that region (why?)
	 */
	*region->prev = region->next;
	if (region->next)
	    region->next->prev = region->prev;
	if (region->space->regions_tail == &region->next)
	    region->space->regions_tail = region->prev;
    }
ram's avatar
ram committed
    /* Append to newspace unless it has already been promoted. */
    if (region->space != newspace) {
ram's avatar
ram committed
	region->prev = space->regions_tail;
	region->next = NULL;
	*space->regions_tail = region;
	space->regions_tail = &region->next;
	region->space = space;
ram's avatar
ram committed
}

static struct region *
find_region(void *ptr)
ram's avatar
ram committed
{
    struct cluster *cluster;
    int cluster_chunk_num;
    int chunk_num;
    unsigned char delta;

    ptr = (void *) ((int) ptr & ~0x3);
ram's avatar
ram committed
    if (ptr < heap_base || ptr >= heap_end)
	return NULL;

    cluster = block_table[BLOCK_NUMBER(ptr)];
    if (cluster == NULL)
	return NULL;

    if (ptr < (void *) cluster->first_region)
ram's avatar
ram committed
	return NULL;

    cluster_chunk_num = CHUNK_NUMBER(cluster->first_region);
    chunk_num = CHUNK_NUMBER(ptr);

    while (delta = cluster->region_offset[chunk_num - cluster_chunk_num])
	chunk_num -= delta;

    return CHUNK_ADDRESS(chunk_num);
}

/* Interface to std collector */
static inline boolean
from_space_p(lispobj obj)
{
    struct region *region = find_region((void *) obj);

    return (region != NULL && region->space == oldspace);
ram's avatar
ram committed
}
static inline boolean
new_space_p(lispobj obj)
{
    struct region *region = find_region((void *) obj);

    return (region != NULL && region->space == newspace);
ram's avatar
ram committed
}
static inline boolean
static_space_p(lispobj obj)
{
    return (STATIC_SPACE_START < obj
	    && obj < SymbolValue(STATIC_SPACE_FREE_POINTER));
ram's avatar
ram committed
}

/* Predicate that returns true if an object is a pointer. */
#undef  POINTERP
#define POINTERP(obj) Pointerp((obj)->header)

/* Predicate that returns true if an object has been forwarded. */
#define FORWARDED(obj) ((obj_t)(obj)->header == (obj_t)0x1)

/* Returns the forwarding pointer for the given object. */
#define FORWARDING_PTR(obj) ((lispobj)(obj)->data[0])

/* Marks obj as forwarded to new */
#define DEPOSIT_FORWARDING_PTR(obj,new) \
 ((obj_t)(obj)->header = 0x1, (obj_t)(obj)->data[0] = (obj_t)new)

/* Returns an obj_t for the object starting at addr */
#define OBJECT_AT(addr) ((obj_t)(addr))

/* Returns the size (in bytes) of obj. */
#define OBJECT_SIZE(obj) (sizeOfObject((obj_t)obj)<<2)

/* Scavenges an object. */
#define SCAVENGE_OBJECT(obj) scavengex((lispobj*)obj)

#if 0
/* Makes a region of memory look like some kind of object. */
#define NOTE_EMPTY(ptr,bytes) \
    (((obj_t)ptr)->header = (((bytes+ALIGN_BYTES-1)>>ALIGN_BITS)<<8) | 1)
#endif

static unsigned long bytes_copied = 0;
ram's avatar
ram committed
#   define  HAVE_FASTCOPY
#if defined HAVE_FASTCOPY
#define COPYDUAL(a,b,c) fastcopy16(a,b,c)
void fastcopy16(void *, void *, size_t);
ram's avatar
ram committed
#else
#define COPYDUAL(a,b,c) memmove(a,b,c)
#endif
static inline lispobj
copy(lispobj taggedobj)
{
    obj_t source = (obj_t) PTR(taggedobj);
    int nbytes = OBJECT_SIZE(source);

    gc_assert(Pointerp(taggedobj));
    gc_assert(!(nbytes & (ALIGN_BYTES - 1)));
    {
	int lowtag = LowtagOf(taggedobj);
	obj_t newobj = cgc_alloc(nbytes);
ram's avatar
ram committed

	COPYDUAL(newobj, source, nbytes);
	bytes_copied += nbytes;
	return ((lispobj) newobj | lowtag);
    }
}
ram's avatar
ram committed

ram's avatar
ram committed
#define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
#define NWORDS(x,y) (CEILING((x),(y)) / (y))

#define WEAK_POINTER_NWORDS \
	CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
static struct weak_pointer *weak_pointers;

ram's avatar
ram committed
/* Scavenging:
 * CMU CL objects can be classified as BOXED, UNBOXED or other.
 * Boxed objects have a header containing length and type followed
 * by LENGTH tagged object descriptors which may be pointers.
 * UNBOXED objects have a header but the data is other than
 * tagged descriptors, such as floats, bignums, saps or code.
 * Others (code) contain a mix of boxed and unboxed and some
 * (cons) are like BOXED but without header. The scavenger needs
 * to consider these different kinds of objects. I will use a
 * table indexed by type to detect the simple cases of boxed
 * or unboxed.
 */
#define IMMED_OR_LOSE(thing) gc_assert(sct[TypeOf(thing)].sc_kind == SC_IMMED)
static void scavenge_pointer(lispobj *);
ram's avatar
ram committed
static int noise = 0;

typedef struct {
    unsigned sc_kind:3, ve_l2bits:5;
} OSC_t;
ram's avatar
ram committed

OSC_t
make_OSC(int kind, int log2bits)
ram's avatar
ram committed
{
    OSC_t thing;

    thing.sc_kind = kind;
    thing.ve_l2bits = log2bits;
    return thing;
ram's avatar
ram committed
}
ram's avatar
ram committed
#define SETSCT(indx,kind,logbits) sct[indx] = make_OSC(kind,logbits)
#define SC_ISBOXED 1
#define SC_UNBOXED 2
#define SC_IMMED   3
#define SC_POINTER 4
#define SC_VECTOR  5
#define SC_STRING  6
#define SC_OTHER   7
#define SC_LOSER   0
static OSC_t sct[256];

int
sizeOfObject(obj_t obj)
{
    int obj_type = TypeOf(obj->header);
    OSC_t class = sct[obj_type];
    struct vector *vector;
    int length = 1;
    int nwords = 1;

    switch (class.sc_kind) {
      case SC_POINTER:
      case SC_IMMED:
	  return 1;
      case SC_ISBOXED:
      case SC_UNBOXED:
	  gc_assert(HeaderValue(obj->header) > 0);
	  nwords = length = HeaderValue(obj->header) + 1;
	  break;
      case SC_STRING:
      case SC_VECTOR:
ram's avatar
ram committed
	  {
	      int log2bits = class.ve_l2bits;
	      int bits_per_el = 1 << log2bits;
	      int extra = 0;
	      int els_per_word = 1 << (5 - log2bits);

	      if (log2bits > 5) {
		  els_per_word = 1;
		  extra = log2bits - 5;
	      }
	      length = ((struct vector *) obj)->length;
	      length = fixnum_value(length);	/* Zero Length IS valid */
	      length += (class.sc_kind == SC_STRING);
	      length <<= extra;
	      nwords = NWORDS(length, els_per_word);
	      nwords += 2;	/* header + length */
ram's avatar
ram committed
	  }
	  break;
      case SC_OTHER:
	  switch (obj_type) {
	    case type_CodeHeader:
		{
		    struct code *code;
		    int nheader_words, ncode_words;

		    code = (struct code *) obj;
		    ncode_words = fixnum_value(code->code_size);
		    nheader_words = HeaderValue(code->header);
		    nwords = ncode_words + nheader_words;
		} break;
	    default:
		fprintf(stderr, "GC losage: no size for other type %d\n",
			obj_type);
		gc_abort();
	  }
	  break;
      default:
	  fprintf(stderr, "GC losage: no size for other type %d\n", obj_type);
ram's avatar
ram committed
	  gc_abort();
    }
    return CEILING(nwords, 2);
ram's avatar
ram committed
}

ram's avatar
ram committed
{
    int i;

    for (i = 0; i < 256; i++)
	SETSCT(i, SC_LOSER, 0);
    for (i = 0; i < 32; i++) {
	SETSCT(type_EvenFixnum | (i << 3), SC_IMMED, 0);
	SETSCT(type_FunctionPointer | (i << 3), SC_POINTER, 0);
	/* OtherImmediate0 */
	SETSCT(type_ListPointer | (i << 3), SC_POINTER, 0);
	SETSCT(type_OddFixnum | (i << 3), SC_IMMED, 0);
	SETSCT(type_InstancePointer | (i << 3), SC_POINTER, 0);
	/* OtherImmediate1 */
	SETSCT(type_OtherPointer | (i << 3), SC_POINTER, 0);
ram's avatar
ram committed
    }
    SETSCT(type_Bignum, SC_UNBOXED, 0);
    SETSCT(type_Ratio, SC_ISBOXED, 0);
    SETSCT(type_SingleFloat, SC_UNBOXED, 0);
    SETSCT(type_DoubleFloat, SC_UNBOXED, 0);
#if defined type_ComplexSingleFloat
    SETSCT(type_ComplexSingleFloat, SC_UNBOXED, 0);
#endif
#if defined type_ComplexDoubleFloat
    SETSCT(type_ComplexDoubleFloat, SC_UNBOXED, 0);
    SETSCT(type_Complex, SC_ISBOXED, 0);
    SETSCT(type_SimpleArray, SC_ISBOXED, 0);
    SETSCT(type_SimpleString, SC_STRING, 3);
    SETSCT(type_SimpleBitVector, SC_VECTOR, 0);
    SETSCT(type_SimpleVector, SC_VECTOR, 5);
    SETSCT(type_SimpleArrayUnsignedByte2, SC_VECTOR, 1);
    SETSCT(type_SimpleArrayUnsignedByte4, SC_VECTOR, 2);
    SETSCT(type_SimpleArrayUnsignedByte8, SC_VECTOR, 3);
    SETSCT(type_SimpleArrayUnsignedByte16, SC_VECTOR, 4);
    SETSCT(type_SimpleArrayUnsignedByte32, SC_VECTOR, 5);
pw's avatar
pw committed
#if defined type_SimpleArraySignedByte8
    SETSCT(type_SimpleArraySignedByte8, SC_VECTOR, 3);
pw's avatar
pw committed
#endif
#if defined type_SimpleArraySignedByte16
    SETSCT(type_SimpleArraySignedByte16, SC_VECTOR, 4);
pw's avatar
pw committed
#endif
#if defined type_SimpleArraySignedByte30
    SETSCT(type_SimpleArraySignedByte30, SC_VECTOR, 5);
pw's avatar
pw committed
#endif
#if defined type_SimpleArraySignedByte32
    SETSCT(type_SimpleArraySignedByte32, SC_VECTOR, 5);
pw's avatar
pw committed
#endif
    SETSCT(type_SimpleArraySingleFloat, SC_VECTOR, 5);
    SETSCT(type_SimpleArrayDoubleFloat, SC_VECTOR, 6);
#if defined type_SimpleArrayComplexSingleFloat
    SETSCT(type_SimpleArrayComplexSingleFloat, SC_VECTOR, 6);
#endif
#if defined type_SimpleArrayComplexDoubleFloat
    SETSCT(type_SimpleArrayComplexDoubleFloat, SC_VECTOR, 7);
    SETSCT(type_ComplexString, SC_ISBOXED, 0);
    SETSCT(type_ComplexBitVector, SC_ISBOXED, 0);
    SETSCT(type_ComplexVector, SC_ISBOXED, 0);
    SETSCT(type_ComplexArray, SC_ISBOXED, 0);
    SETSCT(type_CodeHeader, SC_OTHER, 0);
    SETSCT(type_FunctionHeader, SC_OTHER, 0);
    SETSCT(type_ClosureFunctionHeader, SC_OTHER, 0);
    SETSCT(type_ReturnPcHeader, SC_OTHER, 0);
    SETSCT(type_ClosureHeader, SC_ISBOXED, 0);
    SETSCT(type_FuncallableInstanceHeader, SC_ISBOXED, 0);
    SETSCT(type_ByteCodeFunction, SC_ISBOXED, 0);
    SETSCT(type_ByteCodeClosure, SC_ISBOXED, 0);
    SETSCT(type_DylanFunctionHeader, SC_ISBOXED, 0);

    SETSCT(type_ValueCellHeader, SC_ISBOXED, 0);
    SETSCT(type_SymbolHeader, SC_ISBOXED, 0);
    SETSCT(type_BaseChar, SC_IMMED, 0);
    SETSCT(type_Sap, SC_UNBOXED, 0);
    SETSCT(type_UnboundMarker, SC_IMMED, 0);
    SETSCT(type_WeakPointer, SC_UNBOXED, 0);
    SETSCT(type_InstanceHeader, SC_ISBOXED, 0);
    SETSCT(type_Fdefn, SC_ISBOXED, 0);
ram's avatar
ram committed
}

static lispobj *scavenge(lispobj *, int);
static lispobj *scavenge_object(lispobj *);
static lispobj *scavengex(lispobj *);
ram's avatar
ram committed

static inline
scavenge_1word_obj(lispobj * addr)
ram's avatar
ram committed
{
    if (Pointerp(*addr)) {
	if (*addr != NIL && *addr != T)
	    scavenge_pointer(addr);
    } else
	IMMED_OR_LOSE(*addr);
ram's avatar
ram committed
}
static int debug_code = 0;
static int
scav_code_header(lispobj * where)
{
    lispobj object = *where;
    struct code *code;
    int i, nheader_words, ncode_words, nwords;
    lispobj fheaderl;
    struct function *fheaderp;

    dprintf(0, ("code: %x %x\n", where, object));
    code = (struct code *) where;
    ncode_words = fixnum_value(code->code_size);
    nheader_words = HeaderValue(object);
    nwords = ncode_words + nheader_words;
    nwords = CEILING(nwords, 2);
    /* Scavenge the boxed section of the code data block */
    /* NOTE: seeing a problem where the trace_table_offset slot
     * is a bogus list pointer instead of a fixnum such that 
     * junk gets moved to newspace which causes problems later.
     * Purify doesn't look at that slot (a bug?). Need
     * to figure out how it happens. Ans: from loading top-level
     * forms that init byte-compiled functions like "defun fcn".
     * Fix the loader to not do this and save some space!
     */
    for (i = 1; i < nheader_words; i++)
	scavenge_1word_obj(where + i);

    /* Scavenge the boxed section of each function object in the 
     * code data block.
     */
    fheaderl = code->entry_points;
    while (fheaderl != NIL) {
	fheaderp = (struct function *) PTR(fheaderl);
	gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
	scavenge_1word_obj(&fheaderp->name);
	scavenge_1word_obj(&fheaderp->arglist);
	scavenge_1word_obj(&fheaderp->type);
	fheaderl = fheaderp->next;
    }
    return nwords;
ram's avatar
ram committed
}

#define RAW_ADDR_OFFSET (6*sizeof(lispobj) - type_FunctionPointer)
#ifdef i386
static void
scavenge_fcn_header(struct function *object)
{
    struct function *fheader = object;
    unsigned long offset = HeaderValue(fheader->header) * 4;

    /* Ok, we don't transport code here, but we do need to
     * scavenge the constants and functions (of which this is one).
     * This should be done as part of scavenging a live code object
     * and we could now be trying to do CPR on a corpse!
     */
    struct code *code = (struct code *) ((unsigned long) fheader - offset);

    gc_assert(TypeOf(fheader->header) == type_FunctionHeader);
    scav_code_header((lispobj *) code);
ram's avatar
ram committed
}

static int docode = 0;		/* maybe not needed */
ram's avatar
ram committed
static int
scav_closure_header(struct closure *closure)
{
    /* Could also be a funcallable_instance. The x86 port has the
     * raw code address in the function slot, not a lisp object.
     * However, the function object is a known distance from the code.
     */
    lispobj fun, fheader1;
    int i, words;

    gc_assert(ALIGNEDP(closure));
    words = HeaderValue(closure->header);
    fun = closure->function - RAW_ADDR_OFFSET;
    /* This needs to be done to get at live code. I now have no
     * way to know if this has already been scavenged so I assume
     * that it hasn't. Code that has been seen by purify is
     * supposed RO and doesn't (shouldn't) need to be looked at
     * so this maybe really redundant.
     *
     * I have seen one case where FI was incomplete with function
     * and lexenv slots == 0! Is this a bug?
     *
     * Update, it appears this is not needed. I will disable execution
     * by default but leave the code here in case something breaks.
     */
    if (docode && static_space_p(closure->function))
	scavenge_fcn_header((struct function *) PTR(fun));
    else			/* "normal" */
	scavenge_1word_obj(&fun);

    /* Now the boxed part of the closure header. */
    for (i = 0; i < words - 1; i++)
	scavenge_1word_obj(&closure->info[i]);

    return CEILING(words + 1, 2);
}
static int fnoise = 0;		/* experimental */
ram's avatar
ram committed
static int
scav_fdefn(lispobj * where)
{