Newer
Older
/*
* Generational Conservative Garbage Collector for CMUCL x86.
*
* This code was written by Douglas T. Crosher, based on Public Domain
* codes from Carnegie Mellon University. This code has been placed in
* the public domain, and is provided 'as is'.
*
* $Header: /Volumes/share2/src/cmucl/cvs2git/cvsroot/src/lisp/gencgc.c,v 1.108 2010/07/19 23:08:37 rtoy Exp $
#include <limits.h>
#include "internals.h"
#include "os.h"
#include "globals.h"
#include "interrupt.h"
#include "validate.h"
#include "lispregs.h"
#include "gencgc.h"
/*
* This value in a hash table hash-vector means that the key uses
* EQ-based hashing. That is, the key might be using EQ or EQL for
* the test. This MUST match the value used in hash-new.lisp!
*/
#define EQ_BASED_HASH_VALUE 0x80000000
#define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
__FILE__, __LINE__)
#define set_alloc_pointer(value) \
SetSymbolValue (ALLOCATION_POINTER, (value))
#define get_alloc_pointer() \
SymbolValue (ALLOCATION_POINTER)
#define get_binding_stack_pointer() \
SymbolValue (BINDING_STACK_POINTER)
#define get_pseudo_atomic_atomic() \
SymbolValue (PSEUDO_ATOMIC_ATOMIC)
#define set_pseudo_atomic_atomic() \
SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
#define clr_pseudo_atomic_atomic() \
SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
#define get_pseudo_atomic_interrupted() \
SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
#define clr_pseudo_atomic_interrupted() \
SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
#define set_current_region_free(value) \
SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
#define set_current_region_end(value) \
SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
#define get_current_region_free() \
SymbolValue(CURRENT_REGION_FREE_POINTER)
#define set_current_region_end(value) \
SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
#elif defined(sparc)
/*
* current_dynamic_space_free_pointer contains the pseudo-atomic
* stuff, so we need to preserve those bits when we give it a value.
* This value better not have any bits set there either!
*/
/*
* On sparc, we don't need to set the alloc_pointer in the code here
* because the alloc pointer (current_dynamic_space_free_pointer) is
* the same as *current-region-free-pointer* and is stored in
* alloc-tn.
*/
#define set_alloc_pointer(value)
#define get_alloc_pointer() \
((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
#define get_binding_stack_pointer() \
(current_binding_stack_pointer)
#define get_pseudo_atomic_atomic() \
((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
#define set_pseudo_atomic_atomic() \
(current_dynamic_space_free_pointer \
= (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
#define clr_pseudo_atomic_atomic() \
(current_dynamic_space_free_pointer \
= (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
#define get_pseudo_atomic_interrupted() \
((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
#define clr_pseudo_atomic_interrupted() \
(current_dynamic_space_free_pointer \
= (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
#define set_current_region_free(value) \
current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
#define get_current_region_free() \
((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
#define set_current_region_end(value) \
SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
#ifndef pseudo_atomic_InterruptedValue
#define pseudo_atomic_InterruptedValue 1
#endif
#ifndef pseudo_atomic_Value
#define pseudo_atomic_Value 4
#endif
#define set_alloc_pointer(value)
#define get_alloc_pointer() \
((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
#define get_binding_stack_pointer() \
(current_binding_stack_pointer)
#define get_pseudo_atomic_atomic() \
((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
#define set_pseudo_atomic_atomic() \
(current_dynamic_space_free_pointer \
= (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
#define clr_pseudo_atomic_atomic() \
(current_dynamic_space_free_pointer \
= (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
#define get_pseudo_atomic_interrupted() \
((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
#define clr_pseudo_atomic_interrupted() \
(current_dynamic_space_free_pointer \
= (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
#define set_current_region_free(value) \
current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
#define get_current_region_free() \
((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
#define set_current_region_end(value) \
SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
#error gencgc is not supported on this platform
/* Define for activating assertions. */
#define GC_ASSERTIONS 1
#endif
/* Check for references to stack-allocated objects. */
#ifdef GC_ASSERTIONS
static void *invalid_stack_start, *invalid_stack_end;
static inline void
check_escaped_stack_object(lispobj * where, lispobj obj)
{
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
void *p;
if (Pointerp(obj)
&& (p = (void *) PTR(obj),
(p >= (void *) CONTROL_STACK_START
&& p < (void *) CONTROL_STACK_END))) {
char *space;
if (where >= (lispobj *) DYNAMIC_0_SPACE_START
&& where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
space = "dynamic space";
else if (where >= (lispobj *) STATIC_SPACE_START
&& where <
(lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
"static space";
else if (where >= (lispobj *) READ_ONLY_SPACE_START
&& where <
(lispobj *) (READ_ONLY_SPACE_START +
READ_ONLY_SPACE_SIZE)) space = "read-only space";
else
space = NULL;
/* GC itself uses some stack, so we can't tell exactly where the
invalid stack area starts. Usually, it should be an error if a
reference to a stack-allocated object is found, although it
is valid to store a reference to a stack-allocated object
temporarily in another reachable object, as long as the
reference goes away at the end of a dynamic extent. */
if (p >= invalid_stack_start && p < invalid_stack_end)
lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
(unsigned long) obj, where, space);
#ifndef i386
else if ((where >= (lispobj *) CONTROL_STACK_START
&& where < (lispobj *) (CONTROL_STACK_END))
|| (space == NULL)) {
/* Do nothing if it the reference is from the control stack,
because that will happen, and that's ok. Or if it's from
an unknown space (typically from scavenging an interrupt
context. */
}
#endif
else
fprintf(stderr,
"Reference to stack-allocated object 0x%08lx at %p in %s\n",
(unsigned long) obj, where,
space ? space : "Unknown space");
}
#endif /* GC_ASSERTIONS */
#ifdef GC_ASSERTIONS
#define gc_assert(ex) \
do { \
if (!(ex)) gc_abort (); \
} while (0)
#define gc_assert(ex) (void) 0
/*
* The number of generations, an extra is added to this for use as a temp.
*/
#define NUM_GENERATIONS 6
/* Debugging variables. */
/*
* The verbose level. All non-error messages are disabled at level 0;
* and only a few rare messages are printed at level 1.
*/
unsigned gencgc_verbose = 0;
/*
* To enable the use of page protection to help avoid the scavenging
* of pages that don't have pointers to younger generations.
*/
boolean enable_page_protection = TRUE;
/*
* Hunt for pointers to old-space, when GCing generations >= verify_gen.
* Set to NUM_GENERATIONS to disable.
*/
int verify_gens = NUM_GENERATIONS;
* Enable a pre-scan verify of generation 0 before it's GCed. (This
* makes GC very, very slow, so don't enable this unless you really
* need it!)
boolean pre_verify_gen_0 = FALSE;
* Enable checking for bad pointers after gc_free_heap called from purify.
#if 0 && defined(DARWIN)
boolean verify_after_free_heap = TRUE;
#else
boolean verify_after_free_heap = FALSE;
/*
* Enable the printing of a note when code objects are found in the
* dynamic space during a heap verify.
*/
boolean verify_dynamic_code_check = FALSE;
/*
* Enable the checking of code objects for fixup errors after they are
* transported. (Only used for x86.)
boolean check_code_fixups = FALSE;
/*
* To enable unmapping of a page and re-mmaping it to have it zero filled.
* Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
* don't unmap.
#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
boolean gencgc_unmap_zero = FALSE;
#else
boolean gencgc_unmap_zero = TRUE;
#endif
/*
* Enable checking that newly allocated regions are zero filled.
*/
#if 0 && defined(DARWIN)
boolean gencgc_zero_check = TRUE;
boolean gencgc_enable_verify_zero_fill = TRUE;
#else
boolean gencgc_zero_check = FALSE;
boolean gencgc_enable_verify_zero_fill = FALSE;
/*
* Enable checking that free pages are zero filled during gc_free_heap
* called after purify.
*/
#if 0 && defined(DARWIN)
boolean gencgc_zero_check_during_free_heap = TRUE;
#else
boolean gencgc_zero_check_during_free_heap = FALSE;
unsigned large_object_size = 4 * PAGE_SIZE;
/*
* Enable the filtering of stack/register pointers. This could reduce
* the number of invalid pointers accepted. It will probably degrades
* interrupt safety during object initialisation.
*/
boolean enable_pointer_filter = TRUE;
/*
* The total bytes allocated. Seen by (dynamic-usage)
*/
unsigned long bytes_allocated = 0;
/*
* The total amount of bytes ever allocated. Not decreased by GC.
*/
volatile unsigned long long bytes_allocated_sum = 0;
/*
* GC trigger; a value of 0xffffffff represents disabled.
*/
unsigned long auto_gc_trigger = 0xffffffff;
/*
* Number of pages to reserve for heap overflow. We want some space
* available on the heap when we are close to a heap overflow, so we
* can handle the overflow. But how much do we really need? I (rtoy)
* think 256 pages is probably a decent amount. (That's 1 MB for x86,
* 2 MB for sparc, which has 8K pages.)
*/
unsigned long reserved_heap_pages = 256;
/*
* The src. and dest. generations. Set before a GC starts scavenging.
*/
static int from_space;
static int new_space;
/*
* Number of pages within the dynamic heap, setup from the size of the
* dynamic space.
*/
unsigned dynamic_space_pages;
struct page *page_table;
/*
* Heap base, needed for mapping addresses to page structures.
*/
/*
* Calculate the start address for the given page number.
*/
page_address(int page_num)
return heap_base + PAGE_SIZE * page_num;
/*
* Find the page index within the page_table for the given address.
* Returns -1 on failure.
*/
find_page_index(void *addr)
int index = (char *) addr - heap_base;
if (index >= 0) {
index = (unsigned int) index / PAGE_SIZE;
if (index < dynamic_space_pages)
return index;
}
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
/*
* This routine implements a write barrier used to record stores into
* to boxed regions outside of generation 0. When such a store occurs
* this routine will be automatically invoked by the page fault
* handler. If passed an address outside of the dynamic space, this
* routine will return immediately with a value of 0. Otherwise, the
* page belonging to the address is made writable, the protection
* change is recorded in the garbage collector page table, and a value
* of 1 is returned.
*/
int
gc_write_barrier(void *addr)
{
int page_index = find_page_index(addr);
/* Check if the fault is within the dynamic space. */
if (page_index == -1) {
return 0;
}
/* The page should have been marked write protected */
if (!PAGE_WRITE_PROTECTED(page_index))
fprintf(stderr,
"*** Page fault in page not marked as write protected\n");
/* Un-protect the page */
os_protect((os_vm_address_t) page_address(page_index), PAGE_SIZE, OS_VM_PROT_ALL);
page_table[page_index].flags &= ~PAGE_WRITE_PROTECTED_MASK;
page_table[page_index].flags |= PAGE_WRITE_PROTECT_CLEARED_MASK;
return 1;
}
struct generation {
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
/* The first page that gc_alloc checks on its next call. */
int alloc_start_page;
/* The first page that gc_alloc_unboxed checks on its next call. */
int alloc_unboxed_start_page;
/*
* The first page that gc_alloc_large (boxed) considers on its next call.
* Although it always allocates after the boxed_region.
*/
int alloc_large_start_page;
/*
* The first page that gc_alloc_large (unboxed) considers on its next call.
* Although it always allocates after the current_unboxed_region.
*/
int alloc_large_unboxed_start_page;
/* The bytes allocate to this generation. */
int bytes_allocated;
/* The number of bytes at which to trigger a GC */
int gc_trigger;
/* To calculate a new level for gc_trigger */
int bytes_consed_between_gc;
/* The number of GCs since the last raise. */
int num_gc;
/*
* The average age at after which a GC will raise objects to the
* next generation.
*/
int trigger_age;
/*
* The cumulative sum of the bytes allocated to this generation. It
* is cleared after a GC on this generation, and update before new
* objects are added from a GC of a younger generation. Dividing by
* the bytes_allocated will give the average age of the memory in
* this generation since its last GC.
*/
int cum_sum_bytes_allocated;
/*
* A minimum average memory age before a GC will occur helps prevent
* a GC when a large number of new live objects have been added, in
* which case a GC could be a waste of time.
*/
double min_av_mem_age;
/*
* An array of generation structures. There needs to be one more
* generation structure than actual generations as the oldest
* generations is temporarily raised then lowered.
*/
static struct generation generations[NUM_GENERATIONS + 1];
/* Statistics about a generation, extracted from the generations
array. This gets returned to Lisp.
*/
struct generation_stats {
int bytes_allocated;
int gc_trigger;
int bytes_consed_between_gc;
int num_gc;
int trigger_age;
int cum_sum_bytes_allocated;
double min_av_mem_age;
/*
* The oldest generation that will currently be GCed by default.
* Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
*
* The default of (NUM_GENERATIONS - 1) enables GC on all generations.
*
* Setting this to 0 effectively disables the generational nature of
* the GC. In some applications generational GC may not be useful
* because there are no long-lived objects.
*
* An intermediate value could be handy after moving long-lived data
* into an older generation so an unnecessary GC of this long-lived
* data can be avoided.
*/
unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
/*
* The maximum free page in the heap is maintained and used to update
* ALLOCATION_POINTER which is used by the room function to limit its
* search of the heap. XX Gencgc obviously needs to be better
* integrated with the lisp code.
*
* Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
* never updated. So make this available (non-static).
int last_free_page;
static void scan_weak_tables(void);
static void scan_weak_objects(void);
/*
* Count the number of write protected pages within the given generation.
*/
static int
count_write_protect_generation_pages(int generation)
int i;
int cnt = 0;
int mmask, mflags;
mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
| PAGE_GENERATION_MASK;
mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
for (i = 0; i < last_free_page; i++)
if (PAGE_FLAGS(i, mmask) == mflags)
cnt++;
return cnt;
/*
* Count the number of pages within the given generation.
*/
static int
count_generation_pages(int generation)
int i;
int cnt = 0;
int mmask, mflags;
mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
mflags = PAGE_ALLOCATED_MASK | generation;
for (i = 0; i < last_free_page; i++)
if (PAGE_FLAGS(i, mmask) == mflags)
cnt++;
return cnt;
static int
count_dont_move_pages(void)
int i;
int cnt = 0;
int mmask;
mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
for (i = 0; i < last_free_page; i++)
if (PAGE_FLAGS(i, mmask) == mmask)
cnt++;
return cnt;
/*
* Work through the pages and add up the number of bytes used for the
* given generation.
*/
static int
generation_bytes_allocated(int generation)
int i;
int bytes_allocated = 0;
int mmask, mflags;
mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
mflags = PAGE_ALLOCATED_MASK | generation;
for (i = 0; i < last_free_page; i++) {
if (PAGE_FLAGS(i, mmask) == mflags)
bytes_allocated += page_table[i].bytes_used;
}
return bytes_allocated;
/*
* Return the average age of the memory in a generation.
*/
static double
gen_av_mem_age(int gen)
if (generations[gen].bytes_allocated == 0)
return 0.0;
return (double) generations[gen].cum_sum_bytes_allocated /
(double) generations[gen].bytes_allocated;
/*
* The verbose argument controls how much to print out:
* 0 for normal level of detail; 1 for debugging.
*/
void
print_generation_stats(int verbose)
#define FPU_STATE_SIZE 27
/*
* Need 512 byte area, aligned on a 16-byte boundary. So allocate
* 512+16 bytes of space and let the routine adjust use the
* appropriate alignment.
*/
#define SSE_STATE_SIZE ((512+16)/4)
int fpu_state[FPU_STATE_SIZE];
extern void sse_save(void *);
extern void sse_restore(void *);
#elif defined(sparc)
/*
* 32 (single-precision) FP registers, and the FP state register.
* But Sparc V9 has 32 double-precision registers (equivalent to 64
* single-precision, but can't be accessed), so we leave enough room
* for that.
*/
#define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
long long fpu_state[FPU_STATE_SIZE];
#define FPU_STATE_SIZE 32
long long fpu_state[FPU_STATE_SIZE];
#endif
/*
* This code uses the FP instructions which may be setup for Lisp so
* they need to the saved and reset for C.
*/
fpu_save(fpu_state);
#if defined(i386) || defined(__x86_64)
if (fpu_mode == SSE2) {
sse_save(sse_state);
}
#endif
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
/* Number of generations to print out. */
if (verbose)
gens = NUM_GENERATIONS + 1;
else
gens = NUM_GENERATIONS;
/* Print the heap stats */
fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
fprintf(stderr,
" Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
for (i = 0; i < gens; i++) {
int j;
int boxed_cnt = 0;
int unboxed_cnt = 0;
int large_boxed_cnt = 0;
int large_unboxed_cnt = 0;
for (j = 0; j < last_free_page; j++) {
int flags = page_table[j].flags;
if ((flags & PAGE_GENERATION_MASK) == i) {
if (flags & PAGE_ALLOCATED_MASK) {
/*
* Count the number of boxed and unboxed pages within the
* given generation.
*/
if (flags & PAGE_UNBOXED_MASK)
if (flags & PAGE_LARGE_OBJECT_MASK)
large_unboxed_cnt++;
else
unboxed_cnt++;
else if (flags & PAGE_LARGE_OBJECT_MASK)
large_boxed_cnt++;
else
boxed_cnt++;
}
}
gc_assert(generations[i].bytes_allocated ==
generation_bytes_allocated(i));
fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
generations[i].bytes_allocated,
PAGE_SIZE * count_generation_pages(i) -
generations[i].bytes_allocated, generations[i].gc_trigger,
count_write_protect_generation_pages(i), generations[i].num_gc,
gen_av_mem_age(i));
}
fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
fpu_restore(fpu_state);
#if defined(i386) || defined(__x86_64)
if (fpu_mode == SSE2) {
sse_restore(sse_state);
}
#endif
/* Get statistics that are kept "on the fly" out of the generation
array.
*/
void
get_generation_stats(int gen, struct generation_stats *stats)
{
if (gen <= NUM_GENERATIONS) {
stats->bytes_allocated = generations[gen].bytes_allocated;
stats->gc_trigger = generations[gen].gc_trigger;
stats->bytes_consed_between_gc =
generations[gen].bytes_consed_between_gc;
stats->num_gc = generations[gen].num_gc;
stats->trigger_age = generations[gen].trigger_age;
stats->cum_sum_bytes_allocated =
generations[gen].cum_sum_bytes_allocated;
stats->min_av_mem_age = generations[gen].min_av_mem_age;
}
void
set_gc_trigger(int gen, int trigger)
if (gen <= NUM_GENERATIONS) {
generations[gen].gc_trigger = trigger;
}
void
set_trigger_age(int gen, int trigger_age)
if (gen <= NUM_GENERATIONS) {
generations[gen].trigger_age = trigger_age;
}
void
set_min_mem_age(int gen, double min_mem_age)
if (gen <= NUM_GENERATIONS) {
generations[gen].min_av_mem_age = min_mem_age;
}
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
/*
* Allocation routines.
*
*
* To support quick and inline allocation, regions of memory can be
* allocated and then allocated from with just a free pointer and a
* check against an end address.
*
* Since objects can be allocated to spaces with different properties
* e.g. boxed/unboxed, generation, ages; there may need to be many
* allocation regions.
*
* Each allocation region may be start within a partly used page.
* Many features of memory use are noted on a page wise basis,
* E.g. the generation; so if a region starts within an existing
* allocated page it must be consistent with this page.
*
* During the scavenging of the newspace, objects will be transported
* into an allocation region, and pointers updated to point to this
* allocation region. It is possible that these pointers will be
* scavenged again before the allocation region is closed, E.g. due to
* trans_list which jumps all over the place to cleanup the list. It
* is important to be able to determine properties of all objects
* pointed to when scavenging, E.g to detect pointers to the
* oldspace. Thus it's important that the allocation regions have the
* correct properties set when allocated, and not just set when
* closed. The region allocation routines return regions with the
* specified properties, and grab all the pages, setting there
* properties appropriately, except that the amount used is not known.
*
* These regions are used to support quicker allocation using just a
* free pointer. The actual space used by the region is not reflected
* in the pages tables until it is closed. It can't be scavenged until
* closed.
*
* When finished with the region it should be closed, which will
* update the page tables for the actual space used returning unused
* space. Further it may be noted in the new regions which is
* necessary when scavenging the newspace.
*
* Large objects may be allocated directly without an allocation
* region, the page tables are updated immediately.
*
* Unboxed objects don't contain points to other objects so don't need
* scavenging. Further they can't contain pointers to younger
* generations so WP is not needed. By allocating pages to unboxed
* objects the whole page never needs scavenging or write protecting.
*/
/*
* Only using two regions at present, both are for the current
* newspace generation.
*/
struct alloc_region boxed_region;
struct alloc_region unboxed_region;
/*
* X hack. current lisp code uses the following. Need coping in/out.
*/
void *current_region_free_pointer;
void *current_region_end_addr;
/* The generation currently being allocated to. X */
static int gc_alloc_generation = 0;
extern void do_dynamic_space_overflow_warning(void);
extern void do_dynamic_space_overflow_error(void);
/* Handle heap overflow here, maybe. */
static void
handle_heap_overflow(const char *msg, int size)
unsigned long heap_size_mb;
if (msg) {
fprintf(stderr, msg, size);
}
#ifndef SPARSE_BLOCK_SIZE
#define SPARSE_BLOCK_SIZE (0)
#endif
/* Figure out how many MB of heap we have */
heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
heap_size_mb);
/* Try to handle heap overflow somewhat gracefully if we can. */
#if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
if (reserved_heap_pages == 0) {
fprintf(stderr, "\n Returning to top-level.\n");
do_dynamic_space_overflow_error();
} else {
fprintf(stderr,
" You can control heap size with the -dynamic-space-size commandline option.\n");
do_dynamic_space_overflow_warning();
print_generation_stats(1);
#endif
}
/*
* Find a new region with room for at least the given number of bytes.
*
* It starts looking at the current generations alloc_start_page. So
* may pick up from the previous region if there is enough space. This
* keeps the allocation contiguous when scavenging the newspace.
*
* The alloc_region should have been closed by a call to
* gc_alloc_update_page_tables, and will thus be in an empty state.
*
* To assist the scavenging functions, write protected pages are not
* used. Free pages should not be write protected.
*
* It is critical to the conservative GC that the start of regions be
* known. To help achieve this only small regions are allocated at a
* time.
*
* During scavenging, pointers may be found that point within the
* current region and the page generation must be set so pointers to
* the from space can be recognised. So the generation of pages in
* the region are set to gc_alloc_generation. To prevent another
* allocation call using the same pages, all the pages in the region
* are allocated, although they will initially be empty.
*/
static void
gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
{
int first_page;
int last_page;
int region_size;
int restart_page;
int bytes_found;
int num_pages;
int i;
int mmask, mflags;
/* Shut up some compiler warnings */
last_page = bytes_found = 0;
fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
nbytes, gc_alloc_generation);
/* Check that the region is in a reset state. */
gc_assert(alloc_region->first_page == 0
&& alloc_region->last_page == -1
&& alloc_region->free_pointer == alloc_region->end_addr);
if (unboxed)
restart_page =
generations[gc_alloc_generation].alloc_unboxed_start_page;
else
restart_page = generations[gc_alloc_generation].alloc_start_page;
/*
* Search for a contiguous free region of at least nbytes with the
* given properties: boxed/unboxed, generation. First setting up the
* mask and matching flags.
*/
mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
| PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
| PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
| gc_alloc_generation;
do {
first_page = restart_page;
/*
* First search for a page with at least 32 bytes free, that is
* not write protected, or marked dont_move.
*/
while (first_page < dynamic_space_pages) {
int flags = page_table[first_page].flags;
if (!(flags & PAGE_ALLOCATED_MASK)
|| ((flags & mmask) == mflags &&
page_table[first_page].bytes_used < PAGE_SIZE - 32))
break;
first_page++;
}
/* Check for a failure */
if (first_page >= dynamic_space_pages - reserved_heap_pages) {
handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
nbytes);