Skip to content
gencgc.c 233 KiB
Newer Older
/*
 * Generational Conservative Garbage Collector for CMUCL x86.
 *
 * This code was written by Douglas T. Crosher, based on Public Domain
 * codes from Carnegie Mellon University. This code has been placed in
 * the public domain, and is provided 'as is'.
 *
dtc's avatar
dtc committed
 * Douglas Crosher, 1996, 1997, 1998, 1999.
dtc's avatar
dtc committed
 */
rtoy's avatar
rtoy committed
#include <string.h>
dtc's avatar
dtc committed
#include "arch.h"
#include "internals.h"
#include "os.h"
#include "globals.h"
#include "interrupt.h"
#include "validate.h"
#include "lispregs.h"
dtc's avatar
dtc committed
#include "interr.h"
rtoy's avatar
rtoy committed
/*
 * This value in a hash table hash-vector means that the key uses
 * EQ-based hashing.  That is, the key might be using EQ or EQL for
 * the test.  This MUST match the value used in hash-new.lisp!
 */
#define EQ_BASED_HASH_VALUE     0x80000000
#define gc_abort() lose("GC invariant lost!  File \"%s\", line %d\n", \
			__FILE__, __LINE__)

cwang's avatar
cwang committed
#if (defined(i386) || defined(__x86_64))

#define set_alloc_pointer(value) \
  SetSymbolValue (ALLOCATION_POINTER, (value))
#define get_alloc_pointer() \
  SymbolValue (ALLOCATION_POINTER)
#define get_binding_stack_pointer() \
  SymbolValue (BINDING_STACK_POINTER)
#define get_pseudo_atomic_atomic() \
  SymbolValue (PSEUDO_ATOMIC_ATOMIC)
#define set_pseudo_atomic_atomic() \
  SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
#define clr_pseudo_atomic_atomic() \
  SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
#define get_pseudo_atomic_interrupted() \
  SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
#define clr_pseudo_atomic_interrupted() \
  SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))

#define set_current_region_free(value) \
  SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
#define set_current_region_end(value) \
  SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
#define get_current_region_free() \
  SymbolValue(CURRENT_REGION_FREE_POINTER)

#define set_current_region_end(value) \
  SetSymbolValue(CURRENT_REGION_END_ADDR, (value))

/*
 * current_dynamic_space_free_pointer contains the pseudo-atomic
 * stuff, so we need to preserve those bits when we give it a value.
 * This value better not have any bits set there either!
 */

/*
 * On sparc, we don't need to set the alloc_pointer in the code here
 * because the alloc pointer (current_dynamic_space_free_pointer) is
 * the same as *current-region-free-pointer* and is stored in
 * alloc-tn.
 */
#define set_alloc_pointer(value)
#define get_alloc_pointer() \
  ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
#define get_binding_stack_pointer() \
  (current_binding_stack_pointer)
#define get_pseudo_atomic_atomic() \
  ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
#define set_pseudo_atomic_atomic() \
  (current_dynamic_space_free_pointer \
   = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
#define clr_pseudo_atomic_atomic() \
  (current_dynamic_space_free_pointer \
   = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
#define get_pseudo_atomic_interrupted() \
  ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
#define clr_pseudo_atomic_interrupted() \
  (current_dynamic_space_free_pointer \
   = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
#define set_current_region_free(value) \
  current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))

#define get_current_region_free() \
  ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))

#define set_current_region_end(value) \
  SetSymbolValue(CURRENT_REGION_END_ADDR, (value))

cshapiro's avatar
cshapiro committed
#elif defined(DARWIN) && defined(__ppc__)
rtoy's avatar
rtoy committed
#ifndef pseudo_atomic_InterruptedValue
#define pseudo_atomic_InterruptedValue 1
#endif
#ifndef pseudo_atomic_Value
#define pseudo_atomic_Value 4
#endif

#define set_alloc_pointer(value) 
#define get_alloc_pointer() \
  ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
#define get_binding_stack_pointer() \
  (current_binding_stack_pointer)
#define get_pseudo_atomic_atomic() \
  ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
#define set_pseudo_atomic_atomic() \
  (current_dynamic_space_free_pointer \
   = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
#define clr_pseudo_atomic_atomic() \
  (current_dynamic_space_free_pointer \
   = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
#define get_pseudo_atomic_interrupted() \
  ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
#define clr_pseudo_atomic_interrupted() \
  (current_dynamic_space_free_pointer \
   = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))

#define set_current_region_free(value) \
  current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))

#define get_current_region_free() \
  ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))

#define set_current_region_end(value) \
  SetSymbolValue(CURRENT_REGION_END_ADDR, (value))

#error gencgc is not supported on this platform
/* Define for activating assertions.  */

#if defined(x86) && defined(SOLARIS)
#define GC_ASSERTIONS 1
#endif

/* Check for references to stack-allocated objects.  */

#ifdef GC_ASSERTIONS

static void *invalid_stack_start, *invalid_stack_end;

static inline void
check_escaped_stack_object(lispobj * where, lispobj obj)
{
cshapiro's avatar
cshapiro committed
#if !defined(DARWIN) && !defined(__ppc__)
    void *p;

    if (Pointerp(obj)
	&& (p = (void *) PTR(obj),
	    (p >= (void *) CONTROL_STACK_START
	     && p < (void *) control_stack_end))) {
	char *space;

	if (where >= (lispobj *) DYNAMIC_0_SPACE_START
	    && where < (lispobj *) (DYNAMIC_0_SPACE_START + dynamic_space_size))
	    space = "dynamic space";
	else if (where >= (lispobj *) STATIC_SPACE_START
		 && where <
		 (lispobj *) (STATIC_SPACE_START + static_space_size)) space =
		"static space";
	else if (where >= (lispobj *) READ_ONLY_SPACE_START
		 && where <
		 (lispobj *) (READ_ONLY_SPACE_START +
			      read_only_space_size)) space = "read-only space";
	else
	    space = NULL;

	/* GC itself uses some stack, so we can't tell exactly where the
	   invalid stack area starts.  Usually, it should be an error if a
	   reference to a stack-allocated object is found, although it
	   is valid to store a reference to a stack-allocated object
	   temporarily in another reachable object, as long as the
	   reference goes away at the end of a dynamic extent.  */

	if (p >= invalid_stack_start && p < invalid_stack_end)
	    lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
		 (unsigned long) obj, where, space);
	else if ((where >= (lispobj *) CONTROL_STACK_START
		  && where < (lispobj *) (control_stack_end))
		 || (space == NULL)) {
	    /* Do nothing if it the reference is from the control stack,
	       because that will happen, and that's ok.  Or if it's from
	       an unknown space (typically from scavenging an interrupt
	       context. */
	}
#endif

	else
	    fprintf(stderr,
		    "Reference to stack-allocated object 0x%08lx at %p in %s\n",
		    (unsigned long) obj, where,
		    space ? space : "Unknown space");
rtoy's avatar
rtoy committed
#endif
}

#endif /* GC_ASSERTIONS */


#ifdef GC_ASSERTIONS
#define gc_assert(ex)		\
  do {				\
    if (!(ex)) gc_abort ();     \
  } while (0)
#define gc_assert(ex)  (void) 0
dtc's avatar
dtc committed
/*
 * The number of generations, an extra is added to this for use as a temp.
 */
#define NUM_GENERATIONS 6

/* Debugging variables. */

dtc's avatar
dtc committed
/*
 * The verbose level. All non-error messages are disabled at level 0;
 * and only a few rare messages are printed at level 1.
 */
unsigned gencgc_verbose = 0;
cracauer's avatar
 
cracauer committed
unsigned counters_verbose = 0;
/*
 * If true, then some debugging information is printed when scavenging
 * static (malloc'ed) arrays.
 */
boolean debug_static_array_p = 0;

dtc's avatar
dtc committed
/*
 * To enable the use of page protection to help avoid the scavenging
 * of pages that don't have pointers to younger generations.
 */
boolean enable_page_protection = TRUE;
cracauer's avatar
 
cracauer committed

dtc's avatar
dtc committed
/*
 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
 * Set to NUM_GENERATIONS to disable.
 */
int verify_gens = NUM_GENERATIONS;
dtc's avatar
dtc committed
/*
 * Enable a pre-scan verify of generation 0 before it's GCed.  (This
 * makes GC very, very slow, so don't enable this unless you really
 * need it!)
dtc's avatar
dtc committed
 */
boolean pre_verify_gen_0 = FALSE;

dtc's avatar
dtc committed
 * Enable checking for bad pointers after gc_free_heap called from purify.
#if 0 && defined(DARWIN)
rtoy's avatar
rtoy committed
boolean verify_after_free_heap = TRUE;
#else
boolean verify_after_free_heap = FALSE;
rtoy's avatar
rtoy committed
#endif
dtc's avatar
dtc committed
/*
 * Enable the printing of a note when code objects are found in the
 * dynamic space during a heap verify.
 */
boolean verify_dynamic_code_check = FALSE;

dtc's avatar
dtc committed
/*
 * Enable the checking of code objects for fixup errors after they are
 * transported.  (Only used for x86.)
dtc's avatar
dtc committed
 */
boolean check_code_fixups = FALSE;

/* How have a page zero-filled. */
enum gencgc_unmap_mode {
    /* Unmap and mmap the region to get it zeroed */
    MODE_MAP,

    /* Memset the region to 0 */
    MODE_MEMSET,

    /*
     * Call madvise to allow the kernel to free the memory if needed.
     * But when the region needs to be used, we will zero it if
     * necessary
     */
    MODE_MADVISE,

    /*
     * Like madvise, except we don't actually call madvize and lazily
     * zero the region when needed.
     */
    MODE_LAZY,
dtc's avatar
dtc committed
/*
 * Control how freed regions should be zeroed.  Default to MODE_MEMSET
 * for all systems since tests indicate that it is much faster than
 * unmapping and re-mapping it to zero the region.  See enum
 * gencgc_unmap_made for other ossible options.
 *
 * XXX: Choose the appopriate mode for each OS/arch.
 * 
dtc's avatar
dtc committed
 * To enable unmapping of a page and re-mmaping it to have it zero filled.
 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
 * don't unmap.
dtc's avatar
dtc committed
 */
#if defined(DARWIN) || defined(__linux__) || defined(sparc)
enum gencgc_unmap_mode gencgc_unmap_zero = MODE_LAZY;
#else
enum gencgc_unmap_mode gencgc_unmap_zero = MODE_MEMSET;
#endif
dtc's avatar
dtc committed
/*
 * Enable checking that newly allocated regions are zero filled.
 */
#if 0 && defined(DARWIN)
rtoy's avatar
rtoy committed
boolean gencgc_zero_check = TRUE;
boolean gencgc_enable_verify_zero_fill = TRUE;
#else
boolean gencgc_zero_check = FALSE;
boolean gencgc_enable_verify_zero_fill = FALSE;
rtoy's avatar
rtoy committed
#endif
/*
 * Enable checking that free pages are zero filled during gc_free_heap
 * called after purify.
 */
#if 0 && defined(DARWIN)
rtoy's avatar
rtoy committed
boolean gencgc_zero_check_during_free_heap = TRUE;
#else
boolean gencgc_zero_check_during_free_heap = FALSE;
rtoy's avatar
rtoy committed
#endif
dtc's avatar
dtc committed
/*
 * The minimum size for a large object.
 */
unsigned large_object_size = 4 * GC_PAGE_SIZE;
dtc's avatar
dtc committed
/*
 * Enable the filtering of stack/register pointers. This could reduce
 * the number of invalid pointers accepted. It will probably degrades
 * interrupt safety during object initialisation.
 */
boolean enable_pointer_filter = TRUE;

dtc's avatar
dtc committed
/*
 * The total bytes allocated. Seen by (dynamic-usage)
 */
unsigned long bytes_allocated = 0;
cracauer's avatar
 
cracauer committed
/*
 * The total amount of bytes ever allocated.  Not decreased by GC.
 */

volatile unsigned long long bytes_allocated_sum = 0;

/*
 * GC trigger; a value of 0xffffffff represents disabled.
 */
unsigned long auto_gc_trigger = 0xffffffff;
/*
 * Number of pages to reserve for heap overflow.  We want some space
 * available on the heap when we are close to a heap overflow, so we
 * can handle the overflow.  But how much do we really need?  I (rtoy)
 * think 256 pages is probably a decent amount.  (That's 1 MB for x86,
 * 2 MB for sparc, which has 8K pages.)
 */

unsigned long reserved_heap_pages = 256;

dtc's avatar
dtc committed
/*
 * The src. and dest. generations. Set before a GC starts scavenging.
 */
static int from_space;
static int new_space;

dtc's avatar
dtc committed
/*
 * GC structures and variables.
 */
/*
 * Number of pages within the dynamic heap, setup from the size of the
 * dynamic space.
 */
unsigned dynamic_space_pages;

dtc's avatar
dtc committed
/*
 * An array of page structures is statically allocated.
cwang's avatar
cwang committed
 * This helps quickly map between an address and its page structure.
dtc's avatar
dtc committed
 */
dtc's avatar
dtc committed
/*
 * Heap base, needed for mapping addresses to page structures.
 */
static char *heap_base = NULL;
dtc's avatar
dtc committed
/*
 * Calculate the start address for the given page number.
 */
page_address(int page_num)
    return heap_base + GC_PAGE_SIZE * page_num;
dtc's avatar
dtc committed
/*
 * Find the page index within the page_table for the given address.
 * Returns -1 on failure.
 */
find_page_index(void *addr)
    int index = (char *) addr - heap_base;
	index = (unsigned int) index / GC_PAGE_SIZE;
	if (index < dynamic_space_pages)
	    return index;
    }
/*
 * This routine implements a write barrier used to record stores into
 * to boxed regions outside of generation 0.  When such a store occurs
 * this routine will be automatically invoked by the page fault
 * handler.  If passed an address outside of the dynamic space, this
 * routine will return immediately with a value of 0.  Otherwise, the
 * page belonging to the address is made writable, the protection
 * change is recorded in the garbage collector page table, and a value
 * of 1 is returned.
 */
int
gc_write_barrier(void *addr)
{
    int page_index = find_page_index(addr);

    /* Check if the fault is within the dynamic space. */
    if (page_index == -1) {
	 return 0;
    }

    /* The page should have been marked write protected */
    if (!PAGE_WRITE_PROTECTED(page_index))
	 fprintf(stderr,
		 "*** Page fault in page not marked as write protected\n");

    /* Un-protect the page */
    os_protect((os_vm_address_t) page_address(page_index), GC_PAGE_SIZE, OS_VM_PROT_ALL);
    page_table[page_index].flags &= ~PAGE_WRITE_PROTECTED_MASK;
    page_table[page_index].flags |= PAGE_WRITE_PROTECT_CLEARED_MASK;

    return 1;
}
dtc's avatar
dtc committed
/*
 * A structure to hold the state of a generation.
 */
#define MEM_AGE_SHIFT 16
#define MEM_AGE_SCALE (1 << MEM_AGE_SHIFT)

    /* The first page that gc_alloc checks on its next call. */
    int alloc_start_page;

    /* The first page that gc_alloc_unboxed checks on its next call. */
    int alloc_unboxed_start_page;

    /*
     * The first page that gc_alloc_large (boxed) considers on its next call.
     * Although it always allocates after the boxed_region.
     */
    int alloc_large_start_page;

    /*
     * The first page that gc_alloc_large (unboxed) considers on its next call.
     * Although it always allocates after the current_unboxed_region.
     */
    int alloc_large_unboxed_start_page;

    /* The bytes allocate to this generation. */
    int bytes_allocated;

    /* The number of bytes at which to trigger a GC */
    int gc_trigger;

    /* To calculate a new level for gc_trigger */
    int bytes_consed_between_gc;

    /* The number of GCs since the last raise. */
    int num_gc;

    /*
     * The average age at after which a GC will raise objects to the
     * next generation.
     */
    int trigger_age;

    /*
     * The cumulative sum of the bytes allocated to this generation. It
     * is cleared after a GC on this generation, and update before new
     * objects are added from a GC of a younger generation. Dividing by
     * the bytes_allocated will give the average age of the memory in
     * this generation since its last GC.
     */
    int cum_sum_bytes_allocated;

    /*
     * A minimum average memory age before a GC will occur helps prevent
     * a GC when a large number of new live objects have been added, in
     * which case a GC could be a waste of time.
     *
     * The age is represented as an integer between 0 and 32767
     * corresponding to an age of 0 to (just less than) 1.
dtc's avatar
dtc committed
/*
 * An array of generation structures. There needs to be one more
 * generation structure than actual generations as the oldest
 * generations is temporarily raised then lowered.
 */
static struct generation generations[NUM_GENERATIONS + 1];
moore's avatar
 
moore committed
/* Statistics about a generation, extracted from the generations
   array.  This gets returned to Lisp.
*/

struct generation_stats {
    int bytes_allocated;
    int gc_trigger;
    int bytes_consed_between_gc;
    int num_gc;
    int trigger_age;
    int cum_sum_bytes_allocated;
moore's avatar
 
moore committed
};
moore's avatar
 
moore committed

dtc's avatar
dtc committed
/*
 * The oldest generation that will currently be GCed by default.
 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
 *
 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
 *
 * Setting this to 0 effectively disables the generational nature of
 * the GC. In some applications generational GC may not be useful
 * because there are no long-lived objects.
 *
 * An intermediate value could be handy after moving long-lived data
 * into an older generation so an unnecessary GC of this long-lived
 * data can be avoided.
 */
unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
dtc's avatar
dtc committed
/*
 * The maximum free page in the heap is maintained and used to update
 * ALLOCATION_POINTER which is used by the room function to limit its
 * search of the heap. XX Gencgc obviously needs to be better
 * integrated with the lisp code.
 *
 * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
 * never updated.  So make this available (non-static).
dtc's avatar
dtc committed
 */
cshapiro's avatar
cshapiro committed
static void scan_weak_tables(void);
static void scan_weak_objects(void);
dtc's avatar
dtc committed
/*
 * Misc. heap functions.
 */
dtc's avatar
dtc committed
/*
 * Count the number of write protected pages within the given generation.
 */
static int
count_write_protect_generation_pages(int generation)
    int i;
    int cnt = 0;
    int mmask, mflags;
    mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
	| PAGE_GENERATION_MASK;
    mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
dtc's avatar
dtc committed

    for (i = 0; i < last_free_page; i++)
	if (PAGE_FLAGS(i, mmask) == mflags)
	    cnt++;
    return cnt;
dtc's avatar
dtc committed
/*
 * Count the number of pages within the given generation.
 */
static int
count_generation_pages(int generation)
    int i;
    int cnt = 0;
    int mmask, mflags;
    mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
    mflags = PAGE_ALLOCATED_MASK | generation;
dtc's avatar
dtc committed

    for (i = 0; i < last_free_page; i++)
	if (PAGE_FLAGS(i, mmask) == mflags)
	    cnt++;
    return cnt;
dtc's avatar
dtc committed
/*
 * Count the number of dont_move pages.
 */
static int
count_dont_move_pages(void)
    int i;
    int cnt = 0;
    int mmask;
    mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
dtc's avatar
dtc committed

    for (i = 0; i < last_free_page; i++)
	if (PAGE_FLAGS(i, mmask) == mmask)
	    cnt++;
    return cnt;
dtc's avatar
dtc committed
/*
 * Work through the pages and add up the number of bytes used for the
 * given generation.
 */
rtoy's avatar
rtoy committed
#ifdef GC_ASSERTIONS
static int
generation_bytes_allocated(int generation)
    int i;
    int bytes_allocated = 0;
    int mmask, mflags;
    mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
    mflags = PAGE_ALLOCATED_MASK | generation;
dtc's avatar
dtc committed

    for (i = 0; i < last_free_page; i++) {
	if (PAGE_FLAGS(i, mmask) == mflags)
	    bytes_allocated += page_table[i].bytes_used;
    }
    return bytes_allocated;
rtoy's avatar
rtoy committed
#endif
dtc's avatar
dtc committed
/*
 * Return the average age of the memory in a generation.
 */
gen_av_mem_age(int gen)
    if (generations[gen].bytes_allocated == 0)
dtc's avatar
dtc committed

    return (((long long) generations[gen].cum_sum_bytes_allocated) << MEM_AGE_SHIFT) /
	generations[gen].bytes_allocated;

void
save_fpu_state(void* state)
{
#if defined(i386) || defined(__x86_64)
    if (fpu_mode == SSE2) {
        sse_save(state);
    } else {
        fpu_save(state);
    }
#else
    fpu_save(state);
#endif    
}

void
restore_fpu_state(void* state)
{
#if defined(i386) || defined(__x86_64)
    if (fpu_mode == SSE2) {
        sse_restore(state);
    } else {
        fpu_restore(state);
    }
#else
    fpu_restore(state);

/*
 * The verbose argument controls how much to print out:
 * 0 for normal level of detail; 1 for debugging.
 */
void
print_generation_stats(int verbose)
{
    int i, gens;

    FPU_STATE(fpu_state);
    
    /*
     * This code uses the FP instructions which may be setup for Lisp so
     * they need to the saved and reset for C.
     */
    /* Number of generations to print out. */
    if (verbose)
	gens = NUM_GENERATIONS + 1;
    else
	gens = NUM_GENERATIONS;

    /* Print the heap stats */
    fprintf(stderr, "          Page count (%d KB)\n", GC_PAGE_SIZE / 1024);
    fprintf(stderr,
	    "   Gen  Boxed Unboxed  LB   LUB    Alloc    Waste    Trigger   WP  GCs Mem-age\n");

    for (i = 0; i < gens; i++) {
	int j;
	int boxed_cnt = 0;
	int unboxed_cnt = 0;
	int large_boxed_cnt = 0;
	int large_unboxed_cnt = 0;

	for (j = 0; j < last_free_page; j++) {
	    int flags = page_table[j].flags;

	    if ((flags & PAGE_GENERATION_MASK) == i) {
		if (flags & PAGE_ALLOCATED_MASK) {
		    /*
		     * Count the number of boxed and unboxed pages within the
		     * given generation.
		     */
		    if (flags & PAGE_UNBOXED_MASK)
			if (flags & PAGE_LARGE_OBJECT_MASK)
			    large_unboxed_cnt++;
			else
			    unboxed_cnt++;
		    else if (flags & PAGE_LARGE_OBJECT_MASK)
			large_boxed_cnt++;
		    else
			boxed_cnt++;
		}
	    }
dtc's avatar
dtc committed

	gc_assert(generations[i].bytes_allocated ==
		  generation_bytes_allocated(i));
	fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
		i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
		generations[i].bytes_allocated,
		GC_PAGE_SIZE * count_generation_pages(i) -
		generations[i].bytes_allocated, generations[i].gc_trigger,
		count_write_protect_generation_pages(i), generations[i].num_gc,
		(double)gen_av_mem_age(i) / MEM_AGE_SCALE);
    }
    fprintf(stderr, "   Total bytes alloc=%ld\n", bytes_allocated);
    restore_fpu_state(fpu_state);
moore's avatar
 
moore committed
/* Get statistics that are kept "on the fly" out of the generation
   array.
*/
void
get_generation_stats(int gen, struct generation_stats *stats)
{
    if (gen <= NUM_GENERATIONS) {
	stats->bytes_allocated = generations[gen].bytes_allocated;
	stats->gc_trigger = generations[gen].gc_trigger;
	stats->bytes_consed_between_gc =
	    generations[gen].bytes_consed_between_gc;
	stats->num_gc = generations[gen].num_gc;
	stats->trigger_age = generations[gen].trigger_age;
	stats->cum_sum_bytes_allocated =
	    generations[gen].cum_sum_bytes_allocated;
	stats->min_av_mem_age = generations[gen].min_av_mem_age;
    }
moore's avatar
 
moore committed
}

void
set_gc_trigger(int gen, int trigger)
moore's avatar
 
moore committed
{
    if (gen <= NUM_GENERATIONS) {
	generations[gen].gc_trigger = trigger;
    }
moore's avatar
 
moore committed
}
void
set_trigger_age(int gen, int trigger_age)
moore's avatar
 
moore committed
{
    if (gen <= NUM_GENERATIONS) {
	generations[gen].trigger_age = trigger_age;
    }
moore's avatar
 
moore committed
}

void
set_min_mem_age(int gen, double min_mem_age)
moore's avatar
 
moore committed
{
    if (gen <= NUM_GENERATIONS) {
	generations[gen].min_av_mem_age = min_mem_age * MEM_AGE_SCALE;
moore's avatar
 
moore committed
}
dtc's avatar
dtc committed
/*
 * Allocation routines.
 *
 *
 * To support quick and inline allocation, regions of memory can be
 * allocated and then allocated from with just a free pointer and a
 * check against an end address.
 *
 * Since objects can be allocated to spaces with different properties
 * e.g. boxed/unboxed, generation, ages; there may need to be many
 * allocation regions.
 *
 * Each allocation region may be start within a partly used page.
 * Many features of memory use are noted on a page wise basis,
 * E.g. the generation; so if a region starts within an existing
 * allocated page it must be consistent with this page.
 *
 * During the scavenging of the newspace, objects will be transported
 * into an allocation region, and pointers updated to point to this
 * allocation region. It is possible that these pointers will be
 * scavenged again before the allocation region is closed, E.g. due to
 * trans_list which jumps all over the place to cleanup the list. It
 * is important to be able to determine properties of all objects
 * pointed to when scavenging, E.g to detect pointers to the
 * oldspace. Thus it's important that the allocation regions have the
 * correct properties set when allocated, and not just set when
 * closed.  The region allocation routines return regions with the
 * specified properties, and grab all the pages, setting there
 * properties appropriately, except that the amount used is not known.
 *
 * These regions are used to support quicker allocation using just a
 * free pointer. The actual space used by the region is not reflected
 * in the pages tables until it is closed. It can't be scavenged until
 * closed.
 *
 * When finished with the region it should be closed, which will
 * update the page tables for the actual space used returning unused
 * space. Further it may be noted in the new regions which is
 * necessary when scavenging the newspace.
 *
 * Large objects may be allocated directly without an allocation
 * region, the page tables are updated immediately.
 *
 * Unboxed objects don't contain points to other objects so don't need
 * scavenging. Further they can't contain pointers to younger
 * generations so WP is not needed.  By allocating pages to unboxed
 * objects the whole page never needs scavenging or write protecting.
 */
dtc's avatar
dtc committed
/*
 * Only using two regions at present, both are for the current
 * newspace generation.
 */
struct alloc_region boxed_region;
struct alloc_region unboxed_region;
moore's avatar
 
moore committed
#if 0
dtc's avatar
dtc committed
/*
 * X hack. current lisp code uses the following. Need coping in/out.
 */
void *current_region_free_pointer;
void *current_region_end_addr;
moore's avatar
 
moore committed
#endif

/* The generation currently being allocated to. X */
static int gc_alloc_generation = 0;
rtoy's avatar
rtoy committed
extern void do_dynamic_space_overflow_warning(void);
extern void do_dynamic_space_overflow_error(void);

/* Handle heap overflow here, maybe. */
static void
handle_heap_overflow(const char *msg, int size)
    unsigned long heap_size_mb;

    if (msg) {
	fprintf(stderr, msg, size);
    }
#ifndef SPARSE_BLOCK_SIZE
#define SPARSE_BLOCK_SIZE (0)
#endif

    /* Figure out how many MB of heap we have */
    heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
    fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
	    heap_size_mb);
    /* Try to handle heap overflow somewhat gracefully if we can. */
#if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
    if (reserved_heap_pages == 0) {
	fprintf(stderr, "\n Returning to top-level.\n");
	do_dynamic_space_overflow_error();
    } else {
	fprintf(stderr,
		"  You can control heap size with the -dynamic-space-size commandline option.\n");
	do_dynamic_space_overflow_warning();
    print_generation_stats(1);
/*
 * Enable debug messages for MODE_MADVISE and MODE_LAZY
 */
boolean gencgc_debug_madvise = FALSE;
static inline void
handle_madvise_first_page(int first_page)
{
    int flags = page_table[first_page].flags;
        
    if (gencgc_debug_madvise) {
        fprintf(stderr, "first_page = %d, FLAGS = %x, orig = %d",
                first_page, flags, page_table[first_page].bytes_used);
    }
    
    if ((flags & PAGE_MADVISE_MASK) && !PAGE_ALLOCATED(first_page)) {
        int *page_start = (int *) page_address(first_page);
        
        if (gencgc_debug_madvise) {
            fprintf(stderr, ": marker = %x", *page_start);
        }
        if (*page_start != 0) {
            memset(page_start, 0, GC_PAGE_SIZE);
            page_table[first_page].flags &= ~PAGE_MADVISE_MASK;
    if (gencgc_debug_madvise) {
        fprintf(stderr, "\n");
    }
}

static void
handle_madvise_other_pages(int first_page, int last_page)
{
    int i;
    
    for (i = first_page + 1; i <= last_page; ++i) {
        if (page_table[i].flags & PAGE_MADVISE_MASK) {
            int *page_start = (int *) page_address(i);

            if (gencgc_debug_madvise) {
                fprintf(stderr, "MADVISE page %d, FLAGS = %x: marker %x\n",
                        i, page_table[i].flags, *page_start);
            }
            if (*page_start != 0) {
                memset(page_start, 0, GC_PAGE_SIZE);
                page_table[i].flags &= ~PAGE_MADVISE_MASK;
dtc's avatar
dtc committed
/*
 * Find a new region with room for at least the given number of bytes.