Skip to content
gencgc.c 205 KiB
Newer Older
/*
 * Generational Conservative Garbage Collector for CMUCL x86.
 *
 * This code was written by Douglas T. Crosher, based on Public Domain
 * codes from Carnegie Mellon University. This code has been placed in
 * the public domain, and is provided 'as is'.
 *
dtc's avatar
dtc committed
 * Douglas Crosher, 1996, 1997, 1998, 1999.
 * $Header: /Volumes/share2/src/cmucl/cvs2git/cvsroot/src/lisp/gencgc.c,v 1.72 2006/07/18 23:28:48 rtoy Exp $
dtc's avatar
dtc committed
 *
 */
rtoy's avatar
rtoy committed
#include <string.h>
dtc's avatar
dtc committed
#include "arch.h"
#include "internals.h"
#include "os.h"
#include "globals.h"
#include "interrupt.h"
#include "validate.h"
#include "lispregs.h"
dtc's avatar
dtc committed
#include "interr.h"
#define gc_abort() lose("GC invariant lost!  File \"%s\", line %d\n", \
			__FILE__, __LINE__)

cwang's avatar
cwang committed
#if (defined(i386) || defined(__x86_64))

#define set_alloc_pointer(value) \
  SetSymbolValue (ALLOCATION_POINTER, (value))
#define get_alloc_pointer() \
  SymbolValue (ALLOCATION_POINTER)
#define get_binding_stack_pointer() \
  SymbolValue (BINDING_STACK_POINTER)
#define get_pseudo_atomic_atomic() \
  SymbolValue (PSEUDO_ATOMIC_ATOMIC)
#define set_pseudo_atomic_atomic() \
  SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
#define clr_pseudo_atomic_atomic() \
  SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
#define get_pseudo_atomic_interrupted() \
  SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
#define clr_pseudo_atomic_interrupted() \
  SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))

#define set_current_region_free(value) \
  SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
#define set_current_region_end(value) \
  SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
#define get_current_region_free() \
  SymbolValue(CURRENT_REGION_FREE_POINTER)

#define set_current_region_end(value) \
  SetSymbolValue(CURRENT_REGION_END_ADDR, (value))

/*
 * current_dynamic_space_free_pointer contains the pseudo-atomic
 * stuff, so we need to preserve those bits when we give it a value.
 * This value better not have any bits set there either!
 */

/*
 * On sparc, we don't need to set the alloc_pointer in the code here
 * because the alloc pointer (current_dynamic_space_free_pointer) is
 * the same as *current-region-free-pointer* and is stored in
 * alloc-tn.
 */
#define set_alloc_pointer(value)
#define get_alloc_pointer() \
  ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
#define get_binding_stack_pointer() \
  (current_binding_stack_pointer)
#define get_pseudo_atomic_atomic() \
  ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
#define set_pseudo_atomic_atomic() \
  (current_dynamic_space_free_pointer \
   = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
#define clr_pseudo_atomic_atomic() \
  (current_dynamic_space_free_pointer \
   = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
#define get_pseudo_atomic_interrupted() \
  ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
#define clr_pseudo_atomic_interrupted() \
  (current_dynamic_space_free_pointer \
   = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
#define set_current_region_free(value) \
  current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))

#define get_current_region_free() \
  ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))

#define set_current_region_end(value) \
  SetSymbolValue(CURRENT_REGION_END_ADDR, (value))

rtoy's avatar
rtoy committed
#elif defined(DARWIN)
#ifndef pseudo_atomic_InterruptedValue
#define pseudo_atomic_InterruptedValue 1
#endif
#ifndef pseudo_atomic_Value
#define pseudo_atomic_Value 4
#endif

#define set_alloc_pointer(value) 
#define get_alloc_pointer() \
  ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
#define get_binding_stack_pointer() \
  (current_binding_stack_pointer)
#define get_pseudo_atomic_atomic() \
  ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
#define set_pseudo_atomic_atomic() \
  (current_dynamic_space_free_pointer \
   = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
#define clr_pseudo_atomic_atomic() \
  (current_dynamic_space_free_pointer \
   = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
#define get_pseudo_atomic_interrupted() \
  ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
#define clr_pseudo_atomic_interrupted() \
  (current_dynamic_space_free_pointer \
   = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))

#define set_current_region_free(value) \
  current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))

#define get_current_region_free() \
  ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))

#define set_current_region_end(value) \
  SetSymbolValue(CURRENT_REGION_END_ADDR, (value))

#error gencgc is not supported on this platform
/* Define for activating assertions.  */

rtoy's avatar
rtoy committed
#if defined(DARWIN)
#define GC_ASSERTIONS 1
#endif

/* Check for references to stack-allocated objects.  */

#ifdef GC_ASSERTIONS

static void *invalid_stack_start, *invalid_stack_end;

static inline void
check_escaped_stack_object(lispobj * where, lispobj obj)
{
rtoy's avatar
rtoy committed
#ifndef DARWIN
    void *p;

    if (Pointerp(obj)
	&& (p = (void *) PTR(obj),
	    (p >= (void *) CONTROL_STACK_START
	     && p < (void *) CONTROL_STACK_END))) {
	char *space;

	if (where >= (lispobj *) DYNAMIC_0_SPACE_START
	    && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
	    space = "dynamic space";
	else if (where >= (lispobj *) STATIC_SPACE_START
		 && where <
		 (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
		"static space";
	else if (where >= (lispobj *) READ_ONLY_SPACE_START
		 && where <
		 (lispobj *) (READ_ONLY_SPACE_START +
			      READ_ONLY_SPACE_SIZE)) space = "read-only space";
	else
	    space = NULL;

	/* GC itself uses some stack, so we can't tell exactly where the
	   invalid stack area starts.  Usually, it should be an error if a
	   reference to a stack-allocated object is found, although it
	   is valid to store a reference to a stack-allocated object
	   temporarily in another reachable object, as long as the
	   reference goes away at the end of a dynamic extent.  */

	if (p >= invalid_stack_start && p < invalid_stack_end)
	    lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
		 (unsigned long) obj, where, space);
	else if ((where >= (lispobj *) CONTROL_STACK_START
		  && where < (lispobj *) (CONTROL_STACK_END))
		 || (space == NULL)) {
	    /* Do nothing if it the reference is from the control stack,
	       because that will happen, and that's ok.  Or if it's from
	       an unknown space (typically from scavenging an interrupt
	       context. */
	}
#endif

	else
	    fprintf(stderr,
		    "Reference to stack-allocated object 0x%08lx at %p in %s\n",
		    (unsigned long) obj, where,
		    space ? space : "Unknown space");
rtoy's avatar
rtoy committed
#endif
}

#endif /* GC_ASSERTIONS */


#ifdef GC_ASSERTIONS
#define gc_assert(ex)		\
  do {				\
    if (!(ex)) gc_abort ();     \
  } while (0)
#define gc_assert(ex)  (void) 0
dtc's avatar
dtc committed
/*
 * The number of generations, an extra is added to this for use as a temp.
 */
#define NUM_GENERATIONS 6

/* Debugging variables. */

dtc's avatar
dtc committed
/*
 * The verbose level. All non-error messages are disabled at level 0;
 * and only a few rare messages are printed at level 1.
 */
unsigned gencgc_verbose = 0;
cracauer's avatar
 
cracauer committed
unsigned counters_verbose = 0;
dtc's avatar
dtc committed
/*
 * To enable the use of page protection to help avoid the scavenging
 * of pages that don't have pointers to younger generations.
 */
boolean enable_page_protection = TRUE;
cracauer's avatar
 
cracauer committed

dtc's avatar
dtc committed
/*
 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
 * Set to NUM_GENERATIONS to disable.
 */
int verify_gens = NUM_GENERATIONS;
dtc's avatar
dtc committed
/*
 * Enable a pre-scan verify of generation 0 before it's GCed.  (This
 * makes GC very, very slow, so don't enable this unless you really
 * need it!)
dtc's avatar
dtc committed
 */
boolean pre_verify_gen_0 = FALSE;

dtc's avatar
dtc committed
 * Enable checking for bad pointers after gc_free_heap called from purify.
#if 0 && defined(DARWIN)
rtoy's avatar
rtoy committed
boolean verify_after_free_heap = TRUE;
#else
boolean verify_after_free_heap = FALSE;
rtoy's avatar
rtoy committed
#endif
dtc's avatar
dtc committed
/*
 * Enable the printing of a note when code objects are found in the
 * dynamic space during a heap verify.
 */
boolean verify_dynamic_code_check = FALSE;

dtc's avatar
dtc committed
/*
 * Enable the checking of code objects for fixup errors after they are
 * transported.  (Only used for x86.)
dtc's avatar
dtc committed
 */
boolean check_code_fixups = FALSE;
dtc's avatar
dtc committed
/*
 * To enable unmapping of a page and re-mmaping it to have it zero filled.
 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
 * don't unmap.
dtc's avatar
dtc committed
 */
#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
boolean gencgc_unmap_zero = FALSE;
#else
boolean gencgc_unmap_zero = TRUE;
#endif

dtc's avatar
dtc committed
/*
 * Enable checking that newly allocated regions are zero filled.
 */
#if 0 && defined(DARWIN)
rtoy's avatar
rtoy committed
boolean gencgc_zero_check = TRUE;
boolean gencgc_enable_verify_zero_fill = TRUE;
#else
boolean gencgc_zero_check = FALSE;
boolean gencgc_enable_verify_zero_fill = FALSE;
rtoy's avatar
rtoy committed
#endif
/*
 * Enable checking that free pages are zero filled during gc_free_heap
 * called after purify.
 */
#if 0 && defined(DARWIN)
rtoy's avatar
rtoy committed
boolean gencgc_zero_check_during_free_heap = TRUE;
#else
boolean gencgc_zero_check_during_free_heap = FALSE;
rtoy's avatar
rtoy committed
#endif
dtc's avatar
dtc committed
/*
 * The minimum size for a large object.
 */
unsigned large_object_size = 4 * PAGE_SIZE;
dtc's avatar
dtc committed
/*
 * Enable the filtering of stack/register pointers. This could reduce
 * the number of invalid pointers accepted. It will probably degrades
 * interrupt safety during object initialisation.
 */
boolean enable_pointer_filter = TRUE;

dtc's avatar
dtc committed
/*
 * The total bytes allocated. Seen by (dynamic-usage)
 */
unsigned long bytes_allocated = 0;
cracauer's avatar
 
cracauer committed
/*
 * The total amount of bytes ever allocated.  Not decreased by GC.
 */

volatile unsigned long long bytes_allocated_sum = 0;

/*
 * GC trigger; a value of 0xffffffff represents disabled.
 */
unsigned long auto_gc_trigger = 0xffffffff;
/*
 * Number of pages to reserve for heap overflow.  We want some space
 * available on the heap when we are close to a heap overflow, so we
 * can handle the overflow.  But how much do we really need?  I (rtoy)
 * think 256 pages is probably a decent amount.  (That's 1 MB for x86,
 * 2 MB for sparc, which has 8K pages.)
 */

unsigned long reserved_heap_pages = 256;

dtc's avatar
dtc committed
/*
 * The src. and dest. generations. Set before a GC starts scavenging.
 */
static int from_space;
static int new_space;

dtc's avatar
dtc committed
/*
 * GC structures and variables.
 */
/*
 * Number of pages within the dynamic heap, setup from the size of the
 * dynamic space.
 */
unsigned dynamic_space_pages;

dtc's avatar
dtc committed
/*
 * An array of page structures is statically allocated.
cwang's avatar
cwang committed
 * This helps quickly map between an address and its page structure.
dtc's avatar
dtc committed
 */
dtc's avatar
dtc committed
/*
 * Heap base, needed for mapping addresses to page structures.
 */
static char *heap_base = NULL;
dtc's avatar
dtc committed
/*
 * Calculate the start address for the given page number.
 */
inline char *
page_address(int page_num)
    return heap_base + PAGE_SIZE * page_num;
dtc's avatar
dtc committed
/*
 * Find the page index within the page_table for the given address.
 * Returns -1 on failure.
 */
inline int
find_page_index(void *addr)
    int index = (char *) addr - heap_base;
    if (index >= 0) {
	index = (unsigned int) index / PAGE_SIZE;
	if (index < dynamic_space_pages)
	    return index;
    }
dtc's avatar
dtc committed
/*
 * A structure to hold the state of a generation.
 */
    /* The first page that gc_alloc checks on its next call. */
    int alloc_start_page;

    /* The first page that gc_alloc_unboxed checks on its next call. */
    int alloc_unboxed_start_page;

    /*
     * The first page that gc_alloc_large (boxed) considers on its next call.
     * Although it always allocates after the boxed_region.
     */
    int alloc_large_start_page;

    /*
     * The first page that gc_alloc_large (unboxed) considers on its next call.
     * Although it always allocates after the current_unboxed_region.
     */
    int alloc_large_unboxed_start_page;

    /* The bytes allocate to this generation. */
    int bytes_allocated;

    /* The number of bytes at which to trigger a GC */
    int gc_trigger;

    /* To calculate a new level for gc_trigger */
    int bytes_consed_between_gc;

    /* The number of GCs since the last raise. */
    int num_gc;

    /*
     * The average age at after which a GC will raise objects to the
     * next generation.
     */
    int trigger_age;

    /*
     * The cumulative sum of the bytes allocated to this generation. It
     * is cleared after a GC on this generation, and update before new
     * objects are added from a GC of a younger generation. Dividing by
     * the bytes_allocated will give the average age of the memory in
     * this generation since its last GC.
     */
    int cum_sum_bytes_allocated;

    /*
     * A minimum average memory age before a GC will occur helps prevent
     * a GC when a large number of new live objects have been added, in
     * which case a GC could be a waste of time.
     */
    double min_av_mem_age;
dtc's avatar
dtc committed
/*
 * An array of generation structures. There needs to be one more
 * generation structure than actual generations as the oldest
 * generations is temporarily raised then lowered.
 */
static struct generation generations[NUM_GENERATIONS + 1];
moore's avatar
 
moore committed
/* Statistics about a generation, extracted from the generations
   array.  This gets returned to Lisp.
*/

struct generation_stats {
    int bytes_allocated;
    int gc_trigger;
    int bytes_consed_between_gc;
    int num_gc;
    int trigger_age;
    int cum_sum_bytes_allocated;
    double min_av_mem_age;
moore's avatar
 
moore committed
};
moore's avatar
 
moore committed

dtc's avatar
dtc committed
/*
 * The oldest generation that will currently be GCed by default.
 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
 *
 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
 *
 * Setting this to 0 effectively disables the generational nature of
 * the GC. In some applications generational GC may not be useful
 * because there are no long-lived objects.
 *
 * An intermediate value could be handy after moving long-lived data
 * into an older generation so an unnecessary GC of this long-lived
 * data can be avoided.
 */
unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
dtc's avatar
dtc committed
/*
 * The maximum free page in the heap is maintained and used to update
 * ALLOCATION_POINTER which is used by the room function to limit its
 * search of the heap. XX Gencgc obviously needs to be better
 * integrated with the lisp code.
 */
static int last_free_page;

dtc's avatar
dtc committed
/*
 * Misc. heap functions.
 */
dtc's avatar
dtc committed
/*
 * Count the number of write protected pages within the given generation.
 */
static int
count_write_protect_generation_pages(int generation)
    int i;
    int cnt = 0;
    int mmask, mflags;
    mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
	| PAGE_GENERATION_MASK;
    mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
dtc's avatar
dtc committed

    for (i = 0; i < last_free_page; i++)
	if (PAGE_FLAGS(i, mmask) == mflags)
	    cnt++;
    return cnt;
dtc's avatar
dtc committed
/*
 * Count the number of pages within the given generation.
 */
static int
count_generation_pages(int generation)
    int i;
    int cnt = 0;
    int mmask, mflags;
    mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
    mflags = PAGE_ALLOCATED_MASK | generation;
dtc's avatar
dtc committed

    for (i = 0; i < last_free_page; i++)
	if (PAGE_FLAGS(i, mmask) == mflags)
	    cnt++;
    return cnt;
dtc's avatar
dtc committed
/*
 * Count the number of dont_move pages.
 */
static int
count_dont_move_pages(void)
    int i;
    int cnt = 0;
    int mmask;
    mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
dtc's avatar
dtc committed

    for (i = 0; i < last_free_page; i++)
	if (PAGE_FLAGS(i, mmask) == mmask)
	    cnt++;
    return cnt;
dtc's avatar
dtc committed
/*
 * Work through the pages and add up the number of bytes used for the
 * given generation.
 */
rtoy's avatar
rtoy committed
#ifdef GC_ASSERTIONS
static int
generation_bytes_allocated(int generation)
    int i;
    int bytes_allocated = 0;
    int mmask, mflags;
    mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
    mflags = PAGE_ALLOCATED_MASK | generation;
dtc's avatar
dtc committed

    for (i = 0; i < last_free_page; i++) {
	if (PAGE_FLAGS(i, mmask) == mflags)
	    bytes_allocated += page_table[i].bytes_used;
    }
    return bytes_allocated;
rtoy's avatar
rtoy committed
#endif
dtc's avatar
dtc committed
/*
 * Return the average age of the memory in a generation.
 */
static double
gen_av_mem_age(int gen)
    if (generations[gen].bytes_allocated == 0)
	return 0.0;
dtc's avatar
dtc committed

    return (double) generations[gen].cum_sum_bytes_allocated /
	(double) generations[gen].bytes_allocated;
dtc's avatar
dtc committed
/*
 * The verbose argument controls how much to print out:
 * 0 for normal level of detail; 1 for debugging.
 */
void
print_generation_stats(int verbose)
cwang's avatar
cwang committed
#if defined(i386) || defined(__x86_64)
#define FPU_STATE_SIZE 27
    int fpu_state[FPU_STATE_SIZE];
    /*
     * 32 (single-precision) FP registers, and the FP state register.
     * But Sparc V9 has 32 double-precision registers (equivalent to 64
     * single-precision, but can't be accessed), so we leave enough room
     * for that.
     */
#define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
    long long fpu_state[FPU_STATE_SIZE];
rtoy's avatar
rtoy committed
#elif defined(DARWIN)
#define FPU_STATE_SIZE 32
    long long fpu_state[FPU_STATE_SIZE];
#endif

    /*
     * This code uses the FP instructions which may be setup for Lisp so
     * they need to the saved and reset for C.
     */
    /* Number of generations to print out. */
    if (verbose)
	gens = NUM_GENERATIONS + 1;
    else
	gens = NUM_GENERATIONS;

    /* Print the heap stats */
    fprintf(stderr, "          Page count (%d KB)\n", PAGE_SIZE / 1024);
    fprintf(stderr,
	    "   Gen  Boxed Unboxed  LB   LUB    Alloc    Waste    Trigger   WP  GCs Mem-age\n");

    for (i = 0; i < gens; i++) {
	int j;
	int boxed_cnt = 0;
	int unboxed_cnt = 0;
	int large_boxed_cnt = 0;
	int large_unboxed_cnt = 0;

	for (j = 0; j < last_free_page; j++) {
	    int flags = page_table[j].flags;

	    if ((flags & PAGE_GENERATION_MASK) == i) {
		if (flags & PAGE_ALLOCATED_MASK) {
		    /*
		     * Count the number of boxed and unboxed pages within the
		     * given generation.
		     */
		    if (flags & PAGE_UNBOXED_MASK)
			if (flags & PAGE_LARGE_OBJECT_MASK)
			    large_unboxed_cnt++;
			else
			    unboxed_cnt++;
		    else if (flags & PAGE_LARGE_OBJECT_MASK)
			large_boxed_cnt++;
		    else
			boxed_cnt++;
		}
	    }
dtc's avatar
dtc committed

	gc_assert(generations[i].bytes_allocated ==
		  generation_bytes_allocated(i));
	fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
		i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
		generations[i].bytes_allocated,
		PAGE_SIZE * count_generation_pages(i) -
		generations[i].bytes_allocated, generations[i].gc_trigger,
		count_write_protect_generation_pages(i), generations[i].num_gc,
		gen_av_mem_age(i));
    }
    fprintf(stderr, "   Total bytes alloc=%ld\n", bytes_allocated);
    fpu_restore(fpu_state);
moore's avatar
 
moore committed
/* Get statistics that are kept "on the fly" out of the generation
   array.
*/
void
get_generation_stats(int gen, struct generation_stats *stats)
{
    if (gen <= NUM_GENERATIONS) {
	stats->bytes_allocated = generations[gen].bytes_allocated;
	stats->gc_trigger = generations[gen].gc_trigger;
	stats->bytes_consed_between_gc =
	    generations[gen].bytes_consed_between_gc;
	stats->num_gc = generations[gen].num_gc;
	stats->trigger_age = generations[gen].trigger_age;
	stats->cum_sum_bytes_allocated =
	    generations[gen].cum_sum_bytes_allocated;
	stats->min_av_mem_age = generations[gen].min_av_mem_age;
    }
moore's avatar
 
moore committed
}

void
set_gc_trigger(int gen, int trigger)
moore's avatar
 
moore committed
{
    if (gen <= NUM_GENERATIONS) {
	generations[gen].gc_trigger = trigger;
    }
moore's avatar
 
moore committed
}
void
set_trigger_age(int gen, int trigger_age)
moore's avatar
 
moore committed
{
    if (gen <= NUM_GENERATIONS) {
	generations[gen].trigger_age = trigger_age;
    }
moore's avatar
 
moore committed
}

void
set_min_mem_age(int gen, double min_mem_age)
moore's avatar
 
moore committed
{
    if (gen <= NUM_GENERATIONS) {
	generations[gen].min_av_mem_age = min_mem_age;
    }
moore's avatar
 
moore committed
}
dtc's avatar
dtc committed
/*
 * Allocation routines.
 *
 *
 * To support quick and inline allocation, regions of memory can be
 * allocated and then allocated from with just a free pointer and a
 * check against an end address.
 *
 * Since objects can be allocated to spaces with different properties
 * e.g. boxed/unboxed, generation, ages; there may need to be many
 * allocation regions.
 *
 * Each allocation region may be start within a partly used page.
 * Many features of memory use are noted on a page wise basis,
 * E.g. the generation; so if a region starts within an existing
 * allocated page it must be consistent with this page.
 *
 * During the scavenging of the newspace, objects will be transported
 * into an allocation region, and pointers updated to point to this
 * allocation region. It is possible that these pointers will be
 * scavenged again before the allocation region is closed, E.g. due to
 * trans_list which jumps all over the place to cleanup the list. It
 * is important to be able to determine properties of all objects
 * pointed to when scavenging, E.g to detect pointers to the
 * oldspace. Thus it's important that the allocation regions have the
 * correct properties set when allocated, and not just set when
 * closed.  The region allocation routines return regions with the
 * specified properties, and grab all the pages, setting there
 * properties appropriately, except that the amount used is not known.
 *
 * These regions are used to support quicker allocation using just a
 * free pointer. The actual space used by the region is not reflected
 * in the pages tables until it is closed. It can't be scavenged until
 * closed.
 *
 * When finished with the region it should be closed, which will
 * update the page tables for the actual space used returning unused
 * space. Further it may be noted in the new regions which is
 * necessary when scavenging the newspace.
 *
 * Large objects may be allocated directly without an allocation
 * region, the page tables are updated immediately.
 *
 * Unboxed objects don't contain points to other objects so don't need
 * scavenging. Further they can't contain pointers to younger
 * generations so WP is not needed.  By allocating pages to unboxed
 * objects the whole page never needs scavenging or write protecting.
 */
dtc's avatar
dtc committed
/*
 * Only using two regions at present, both are for the current
 * newspace generation.
 */
struct alloc_region boxed_region;
struct alloc_region unboxed_region;
moore's avatar
 
moore committed
#if 0
dtc's avatar
dtc committed
/*
 * X hack. current lisp code uses the following. Need coping in/out.
 */
void *current_region_free_pointer;
void *current_region_end_addr;
moore's avatar
 
moore committed
#endif

/* The generation currently being allocated to. X */
static int gc_alloc_generation;
rtoy's avatar
rtoy committed
extern void do_dynamic_space_overflow_warning(void);
extern void do_dynamic_space_overflow_error(void);

/* Handle heap overflow here, maybe. */
static void
handle_heap_overflow(const char *msg, int size)
    unsigned long heap_size_mb;

    if (msg) {
	fprintf(stderr, msg, size);
    }
#ifndef SPARSE_BLOCK_SIZE
#define SPARSE_BLOCK_SIZE (0)
#endif

    /* Figure out how many MB of heap we have */
    heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
    fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
	    heap_size_mb);
    /* Try to handle heap overflow somewhat gracefully if we can. */
#if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
    if (reserved_heap_pages == 0) {
	fprintf(stderr, "\n Returning to top-level.\n");
	do_dynamic_space_overflow_error();
    } else {
	fprintf(stderr,
		"  You can control heap size with the -dynamic-space-size commandline option.\n");
	do_dynamic_space_overflow_warning();
    print_generation_stats(1);
dtc's avatar
dtc committed
/*
 * Find a new region with room for at least the given number of bytes.
 *
 * It starts looking at the current generations alloc_start_page. So
 * may pick up from the previous region if there is enough space. This
 * keeps the allocation contiguous when scavenging the newspace.
 *
 * The alloc_region should have been closed by a call to
 * gc_alloc_update_page_tables, and will thus be in an empty state.
 *
 * To assist the scavenging functions, write protected pages are not
 * used. Free pages should not be write protected.
 *
 * It is critical to the conservative GC that the start of regions be
 * known. To help achieve this only small regions are allocated at a
 * time.
 *
 * During scavenging, pointers may be found that point within the
 * current region and the page generation must be set so pointers to
 * the from space can be recognised.  So the generation of pages in
 * the region are set to gc_alloc_generation.  To prevent another
 * allocation call using the same pages, all the pages in the region
 * are allocated, although they will initially be empty.
 */
static void
gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
{
    int first_page;
    int last_page;
    int region_size;
    int restart_page;
    int bytes_found;
    int num_pages;
    int i;
    int mmask, mflags;

    /* Shut up some compiler warnings */
    last_page = bytes_found = 0;

dtc's avatar
dtc committed
#if 0
    fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
	    nbytes, gc_alloc_generation);
dtc's avatar
dtc committed
#endif
    /* Check that the region is in a reset state. */
    gc_assert(alloc_region->first_page == 0
	      && alloc_region->last_page == -1
	      && alloc_region->free_pointer == alloc_region->end_addr);

    if (unboxed)
	restart_page =
	    generations[gc_alloc_generation].alloc_unboxed_start_page;
    else
	restart_page = generations[gc_alloc_generation].alloc_start_page;
    /*
     * Search for a contiguous free region of at least nbytes with the
     * given properties: boxed/unboxed, generation. First setting up the
     * mask and matching flags.
     */
    mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
	| PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
	| PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
    mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
	| gc_alloc_generation;
    do {
	first_page = restart_page;
	/*
	 * First search for a page with at least 32 bytes free, that is
	 * not write protected, or marked dont_move.
	 */
dtc's avatar
dtc committed

	while (first_page < dynamic_space_pages) {
	    int flags = page_table[first_page].flags;
	    if (!(flags & PAGE_ALLOCATED_MASK)
		|| ((flags & mmask) == mflags &&
		    page_table[first_page].bytes_used < PAGE_SIZE - 32))
		break;
	    first_page++;
	}
	/* Check for a failure */
	if (first_page >= dynamic_space_pages - reserved_heap_pages) {
	    handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
				 nbytes);
dtc's avatar
dtc committed

	gc_assert(!PAGE_WRITE_PROTECTED(first_page));
dtc's avatar
dtc committed

#if 0
	fprintf(stderr, "  first_page=%d bytes_used=%d\n",
		first_page, page_table[first_page].bytes_used);
dtc's avatar
dtc committed
#endif

	/*
	 * Now search forward to calculate the available region size.  It
	 * tries to keeps going until nbytes are found and the number of
	 * pages is greater than some level. This helps keep down the
	 * number of pages in a region.
	 */
	last_page = first_page;
	bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
	num_pages = 1;
	while ((bytes_found < nbytes || num_pages < 2)
	       && last_page < dynamic_space_pages - 1
	       && !PAGE_ALLOCATED(last_page + 1)) {
	    last_page++;
	    num_pages++;
	    bytes_found += PAGE_SIZE;
	    gc_assert(!PAGE_WRITE_PROTECTED(last_page));
	}
dtc's avatar
dtc committed

	region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
	    + PAGE_SIZE * (last_page - first_page);
dtc's avatar
dtc committed

	gc_assert(bytes_found == region_size);
dtc's avatar
dtc committed

#if 0
	fprintf(stderr, "  last_page=%d bytes_found=%d num_pages=%d\n",
		last_page, bytes_found, num_pages);
dtc's avatar
dtc committed
#endif

	restart_page = last_page + 1;
    }
    while (restart_page < dynamic_space_pages && bytes_found < nbytes);
dtc's avatar
dtc committed

    if (first_page >= dynamic_space_pages - reserved_heap_pages) {
	handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
			     nbytes);
    }
dtc's avatar
dtc committed

    /* Check for a failure */
    if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
	&& bytes_found < nbytes) {
	handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
			     nbytes);
    }
dtc's avatar
dtc committed
#if 0
    fprintf(stderr,
	    "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
	    gc_alloc_generation, bytes_found, first_page, last_page,
	    page_address(first_page));
#endif

    /* Setup the alloc_region. */
    alloc_region->first_page = first_page;