Newer
Older
/*
* Generational Conservative Garbage Collector for CMUCL x86.
*
* This code was written by Douglas T. Crosher, based on Public Domain
* codes from Carnegie Mellon University. This code has been placed in
* the public domain, and is provided 'as is'.
*
#include <limits.h>
#include "internals.h"
#include "os.h"
#include "globals.h"
#include "interrupt.h"
#include "validate.h"
#include "lispregs.h"
#include "gencgc.h"
/*
* This value in a hash table hash-vector means that the key uses
* EQ-based hashing. That is, the key might be using EQ or EQL for
* the test. This MUST match the value used in hash-new.lisp!
*/
#define EQ_BASED_HASH_VALUE 0x80000000
#define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
__FILE__, __LINE__)
#define set_alloc_pointer(value) \
SetSymbolValue (ALLOCATION_POINTER, (value))
#define get_alloc_pointer() \
SymbolValue (ALLOCATION_POINTER)
#define get_binding_stack_pointer() \
SymbolValue (BINDING_STACK_POINTER)
#define get_pseudo_atomic_atomic() \
SymbolValue (PSEUDO_ATOMIC_ATOMIC)
#define set_pseudo_atomic_atomic() \
SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
#define clr_pseudo_atomic_atomic() \
SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
#define get_pseudo_atomic_interrupted() \
SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
#define clr_pseudo_atomic_interrupted() \
SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
#define set_current_region_free(value) \
SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
#define set_current_region_end(value) \
SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
#define get_current_region_free() \
SymbolValue(CURRENT_REGION_FREE_POINTER)
#define set_current_region_end(value) \
SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
#elif defined(sparc)
/*
* current_dynamic_space_free_pointer contains the pseudo-atomic
* stuff, so we need to preserve those bits when we give it a value.
* This value better not have any bits set there either!
*/
/*
* On sparc, we don't need to set the alloc_pointer in the code here
* because the alloc pointer (current_dynamic_space_free_pointer) is
* the same as *current-region-free-pointer* and is stored in
* alloc-tn.
*/
#define set_alloc_pointer(value)
#define get_alloc_pointer() \
((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
#define get_binding_stack_pointer() \
(current_binding_stack_pointer)
#define get_pseudo_atomic_atomic() \
((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
#define set_pseudo_atomic_atomic() \
(current_dynamic_space_free_pointer \
= (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
#define clr_pseudo_atomic_atomic() \
(current_dynamic_space_free_pointer \
= (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
#define get_pseudo_atomic_interrupted() \
((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
#define clr_pseudo_atomic_interrupted() \
(current_dynamic_space_free_pointer \
= (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
#define set_current_region_free(value) \
current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
#define get_current_region_free() \
((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
#define set_current_region_end(value) \
SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
#ifndef pseudo_atomic_InterruptedValue
#define pseudo_atomic_InterruptedValue 1
#endif
#ifndef pseudo_atomic_Value
#define pseudo_atomic_Value 4
#endif
#define set_alloc_pointer(value)
#define get_alloc_pointer() \
((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
#define get_binding_stack_pointer() \
(current_binding_stack_pointer)
#define get_pseudo_atomic_atomic() \
((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
#define set_pseudo_atomic_atomic() \
(current_dynamic_space_free_pointer \
= (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
#define clr_pseudo_atomic_atomic() \
(current_dynamic_space_free_pointer \
= (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
#define get_pseudo_atomic_interrupted() \
((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
#define clr_pseudo_atomic_interrupted() \
(current_dynamic_space_free_pointer \
= (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
#define set_current_region_free(value) \
current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
#define get_current_region_free() \
((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
#define set_current_region_end(value) \
SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
#error gencgc is not supported on this platform
/* Define for activating assertions. */
/* Check for references to stack-allocated objects. */
static void *invalid_stack_start, *invalid_stack_end;
static inline void
check_escaped_stack_object(lispobj * where, lispobj obj)
{
void *p;
if (Pointerp(obj)
&& (p = (void *) PTR(obj),
&& p < (void *) control_stack_end))) {
char *space;
if (where >= (lispobj *) DYNAMIC_0_SPACE_START
&& where < (lispobj *) (DYNAMIC_0_SPACE_START + dynamic_space_size))
space = "dynamic space";
else if (where >= (lispobj *) STATIC_SPACE_START
&& where <
(lispobj *) (STATIC_SPACE_START + static_space_size)) space =
"static space";
else if (where >= (lispobj *) READ_ONLY_SPACE_START
&& where <
(lispobj *) (READ_ONLY_SPACE_START +
read_only_space_size)) space = "read-only space";
else
space = NULL;
/* GC itself uses some stack, so we can't tell exactly where the
invalid stack area starts. Usually, it should be an error if a
reference to a stack-allocated object is found, although it
is valid to store a reference to a stack-allocated object
temporarily in another reachable object, as long as the
reference goes away at the end of a dynamic extent. */
if (p >= invalid_stack_start && p < invalid_stack_end)
lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
(unsigned long) obj, where, space);
#ifndef i386
else if ((where >= (lispobj *) control_stack
&& where < (lispobj *) (control_stack_end))
|| (space == NULL)) {
/* Do nothing if it the reference is from the control stack,
because that will happen, and that's ok. Or if it's from
an unknown space (typically from scavenging an interrupt
context. */
}
#endif
else
fprintf(stderr,
"Reference to stack-allocated object 0x%08lx at %p in %s\n",
(unsigned long) obj, where,
space ? space : "Unknown space");
#if defined(x86) && defined(SOLARIS)
#define DEFAULT_GC_ASSERT_LEVEL 1
#else
#define DEFAULT_GC_ASSERT_LEVEL 0
#endif
int gc_assert_level = DEFAULT_GC_ASSERT_LEVEL;
#define gc_assert(ex) \
do { \
if (!(ex)) gc_abort (); \
} while (0)
/*
* The number of generations, an extra is added to this for use as a temp.
*/
#define NUM_GENERATIONS 6
/* Debugging variables. */
/*
* The verbose level. All non-error messages are disabled at level 0;
* and only a few rare messages are printed at level 1.
*/
unsigned gencgc_verbose = 0;
/*
* If true, then some debugging information is printed when scavenging
* static (malloc'ed) arrays.
*/
boolean debug_static_array_p = 0;
/*
* To enable the use of page protection to help avoid the scavenging
* of pages that don't have pointers to younger generations.
*/
boolean enable_page_protection = TRUE;
/*
* Hunt for pointers to old-space, when GCing generations >= verify_gen.
* Set to NUM_GENERATIONS to disable.
*/
int verify_gens = NUM_GENERATIONS;
* Enable a pre-scan verify of generation 0 before it's GCed. (This
* makes GC very, very slow, so don't enable this unless you really
* need it!)
int pre_verify_gen_0 = FALSE;
* Enable checking for bad pointers after gc_free_heap called from purify.
#if 0 && defined(DARWIN)
int verify_after_free_heap = TRUE;
int verify_after_free_heap = FALSE;
/*
* Enable the printing of a note when code objects are found in the
* dynamic space during a heap verify.
*/
boolean verify_dynamic_code_check = FALSE;
/*
* Enable the checking of code objects for fixup errors after they are
* transported. (Only used for x86.)
boolean check_code_fixups = FALSE;
/*
* How to have a page zero-filled. When a page is freed, we need to
* either zero it immediately or mark it as needing to be zero-filled
* when it is allocated later.
*/
enum gencgc_unmap_mode {
/*
* Unmap and mmap the region to get it zeroed when the page
* is freed..
*/
/*
* Memset the region to 0 when it is freed.
*/
MODE_MEMSET,
/*
* Call madvise to allow the kernel to free the memory if needed.
* But when the region needs to be allocated, we will zero it if
* necessary.
*/
MODE_MADVISE,
/*
* Like madvise, except we don't actually call madvize and lazily
};
* Control how freed regions should be zeroed. Default to MODE_LAZY
* for all systems since tests indicate that it is much faster than
* unmapping and re-mapping it to zero the region. See enum
* gencgc_unmap_made for other possible options.
*
* XXX: Choose the appopriate mode for each OS/arch.
*
* To enable unmapping of a page and re-mmaping it to have it zero filled.
* Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
* don't unmap.
#if defined(DARWIN) || defined(__linux__) || defined(sparc)
enum gencgc_unmap_mode gencgc_unmap_zero = MODE_LAZY;
#else
enum gencgc_unmap_mode gencgc_unmap_zero = MODE_MEMSET;
/*
* Enable checking that newly allocated regions are zero filled.
*/
#if 0 && defined(DARWIN)
boolean gencgc_zero_check = TRUE;
boolean gencgc_enable_verify_zero_fill = TRUE;
#else
boolean gencgc_zero_check = FALSE;
boolean gencgc_enable_verify_zero_fill = FALSE;
/*
* Enable checking that free pages are zero filled during gc_free_heap
* called after purify.
*/
#if 0 && defined(DARWIN)
boolean gencgc_zero_check_during_free_heap = TRUE;
#else
boolean gencgc_zero_check_during_free_heap = FALSE;
* For now, enable the zero check if gencgc_zero_check is true or if
* gencgc_unmap_zero is MODE_LAZY or MODE_MADVISE. XXX: Remove this
* additional condition when we feel that gencgc_unmap_zero is good
* enough.
#define DO_GENCGC_ZERO_CHECK (gencgc_zero_check)
/*
* Only to the zero check during free_heap if both
* gencgc_zero_check_during_free_heap is true and gencgc_unmap_zero is
* MODE_MAP or MODE_MEMSET because in all other modes, unallocated
* pages are known not to contain zeroes.
*/
#define DO_GENCGC_ZERO_CHECK_DURING_FREE_HEAP (gencgc_zero_check_during_free_heap)
unsigned large_object_size = 4 * GC_PAGE_SIZE;
/*
* Enable the filtering of stack/register pointers. This could reduce
* the number of invalid pointers accepted. It will probably degrades
* interrupt safety during object initialisation.
*/
boolean enable_pointer_filter = TRUE;
/*
* The total bytes allocated. Seen by (dynamic-usage)
*/
unsigned long bytes_allocated = 0;
/*
* The total amount of bytes ever allocated. Not decreased by GC.
*/
volatile unsigned long long bytes_allocated_sum = 0;
/*
* GC trigger; a value of 0xffffffff represents disabled.
*/
unsigned long auto_gc_trigger = 0xffffffff;
/*
* Number of pages to reserve for heap overflow. We want some space
* available on the heap when we are close to a heap overflow, so we
* can handle the overflow. But how much do we really need? I (rtoy)
* think 256 pages is probably a decent amount. (That's 1 MB for x86,
* 2 MB for sparc, which has 8K pages.)
*/
unsigned long reserved_heap_pages = 256;
/*
* The src. and dest. generations. Set before a GC starts scavenging.
*/
static int from_space;
static int new_space;
/*
* Number of pages within the dynamic heap, setup from the size of the
* dynamic space.
*/
unsigned dynamic_space_pages;
struct page *page_table;
/*
* Heap base, needed for mapping addresses to page structures.
*/
/*
* Calculate the start address for the given page number.
*/
page_address(int page_num)
return heap_base + GC_PAGE_SIZE * page_num;
/*
* Find the page index within the page_table for the given address.
* Returns -1 on failure.
*/
find_page_index(void *addr)
int index = (char *) addr - heap_base;
index = (unsigned int) index / GC_PAGE_SIZE;
if (index < dynamic_space_pages)
return index;
}
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
/*
* This routine implements a write barrier used to record stores into
* to boxed regions outside of generation 0. When such a store occurs
* this routine will be automatically invoked by the page fault
* handler. If passed an address outside of the dynamic space, this
* routine will return immediately with a value of 0. Otherwise, the
* page belonging to the address is made writable, the protection
* change is recorded in the garbage collector page table, and a value
* of 1 is returned.
*/
int
gc_write_barrier(void *addr)
{
int page_index = find_page_index(addr);
/* Check if the fault is within the dynamic space. */
if (page_index == -1) {
return 0;
}
/* The page should have been marked write protected */
if (!PAGE_WRITE_PROTECTED(page_index))
fprintf(stderr,
"*** Page fault in page not marked as write protected\n");
/* Un-protect the page */
os_protect((os_vm_address_t) page_address(page_index), GC_PAGE_SIZE, OS_VM_PROT_ALL);
page_table[page_index].flags &= ~PAGE_WRITE_PROTECTED_MASK;
return 1;
}
#define MEM_AGE_SHIFT 16
#define MEM_AGE_SCALE (1 << MEM_AGE_SHIFT)
struct generation {
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
/* The first page that gc_alloc checks on its next call. */
int alloc_start_page;
/* The first page that gc_alloc_unboxed checks on its next call. */
int alloc_unboxed_start_page;
/*
* The first page that gc_alloc_large (boxed) considers on its next call.
* Although it always allocates after the boxed_region.
*/
int alloc_large_start_page;
/*
* The first page that gc_alloc_large (unboxed) considers on its next call.
* Although it always allocates after the current_unboxed_region.
*/
int alloc_large_unboxed_start_page;
/* The bytes allocate to this generation. */
int bytes_allocated;
/* The number of bytes at which to trigger a GC */
int gc_trigger;
/* To calculate a new level for gc_trigger */
int bytes_consed_between_gc;
/* The number of GCs since the last raise. */
int num_gc;
/*
* The average age at after which a GC will raise objects to the
* next generation.
*/
int trigger_age;
/*
* The cumulative sum of the bytes allocated to this generation. It
* is cleared after a GC on this generation, and update before new
* objects are added from a GC of a younger generation. Dividing by
* the bytes_allocated will give the average age of the memory in
* this generation since its last GC.
*/
int cum_sum_bytes_allocated;
/*
* A minimum average memory age before a GC will occur helps prevent
* a GC when a large number of new live objects have been added, in
* which case a GC could be a waste of time.
*
* The age is represented as an integer between 0 and 32767
* corresponding to an age of 0 to (just less than) 1.
int min_av_mem_age;
/*
* An array of generation structures. There needs to be one more
* generation structure than actual generations as the oldest
* generations is temporarily raised then lowered.
*/
static struct generation generations[NUM_GENERATIONS + 1];
/* Statistics about a generation, extracted from the generations
array. This gets returned to Lisp.
*/
struct generation_stats {
int bytes_allocated;
int gc_trigger;
int bytes_consed_between_gc;
int num_gc;
int trigger_age;
int cum_sum_bytes_allocated;
int min_av_mem_age;
/*
* The oldest generation that will currently be GCed by default.
* Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
*
* A value of NUM_GENERATIONS - 1 enables GC on all generations; the
* default is less.
*
* Setting this to 0 effectively disables the generational nature of
* the GC. In some applications generational GC may not be useful
* because there are no long-lived objects.
*
* An intermediate value could be handy after moving long-lived data
* into an older generation so an unnecessary GC of this long-lived
* data can be avoided.
*/
#define DEFAULT_OLDEST_GEN_TO_GC 3
unsigned int gencgc_oldest_gen_to_gc = DEFAULT_OLDEST_GEN_TO_GC;
/*
* The maximum free page in the heap is maintained and used to update
* ALLOCATION_POINTER which is used by the room function to limit its
* search of the heap. XX Gencgc obviously needs to be better
* integrated with the lisp code.
*
* Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
* never updated. So make this available (non-static).
int last_free_page;
static void scan_weak_tables(void);
static void scan_weak_objects(void);
/*
* Count the number of write protected pages within the given generation.
*/
static int
count_write_protect_generation_pages(int generation)
int i;
int cnt = 0;
int mmask, mflags;
mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
| PAGE_GENERATION_MASK;
mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
for (i = 0; i < last_free_page; i++)
if (PAGE_FLAGS(i, mmask) == mflags)
cnt++;
return cnt;
/*
* Count the number of pages within the given generation.
*/
static int
count_generation_pages(int generation)
int i;
int cnt = 0;
int mmask, mflags;
mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
mflags = PAGE_ALLOCATED_MASK | generation;
for (i = 0; i < last_free_page; i++)
if (PAGE_FLAGS(i, mmask) == mflags)
cnt++;
return cnt;
static int
count_dont_move_pages(void)
int i;
int cnt = 0;
int mmask;
mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
for (i = 0; i < last_free_page; i++)
if (PAGE_FLAGS(i, mmask) == mmask)
cnt++;
return cnt;
/*
* Work through the pages and add up the number of bytes used for the
* given generation.
*/
static int
generation_bytes_allocated(int generation)
int i;
int bytes_allocated = 0;
int mmask, mflags;
mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
mflags = PAGE_ALLOCATED_MASK | generation;
for (i = 0; i < last_free_page; i++) {
if (PAGE_FLAGS(i, mmask) == mflags)
bytes_allocated += page_table[i].bytes_used;
}
return bytes_allocated;
/*
* Return the average age of the memory in a generation.
*/
static int
gen_av_mem_age(int gen)
if (generations[gen].bytes_allocated == 0)
return 0;
return (((long long) generations[gen].cum_sum_bytes_allocated) << MEM_AGE_SHIFT) /
generations[gen].bytes_allocated;
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
void
save_fpu_state(void* state)
{
#if defined(i386) || defined(__x86_64)
if (fpu_mode == SSE2) {
sse_save(state);
} else {
fpu_save(state);
}
#else
fpu_save(state);
#endif
}
void
restore_fpu_state(void* state)
{
#if defined(i386) || defined(__x86_64)
if (fpu_mode == SSE2) {
sse_restore(state);
} else {
fpu_restore(state);
}
#else
fpu_restore(state);
/*
* The verbose argument controls how much to print out:
* 0 for normal level of detail; 1 for debugging.
*/
void
print_generation_stats(int verbose)
{
int i, gens;
FPU_STATE(fpu_state);
/*
* This code uses the FP instructions which may be setup for Lisp so
* they need to the saved and reset for C.
*/
save_fpu_state(fpu_state);
/* Number of generations to print out. */
if (verbose)
gens = NUM_GENERATIONS + 1;
else
gens = NUM_GENERATIONS;
/* Print the heap stats */
fprintf(stderr, " Page count (%d KB)\n", GC_PAGE_SIZE / 1024);
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
fprintf(stderr,
" Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
for (i = 0; i < gens; i++) {
int j;
int boxed_cnt = 0;
int unboxed_cnt = 0;
int large_boxed_cnt = 0;
int large_unboxed_cnt = 0;
for (j = 0; j < last_free_page; j++) {
int flags = page_table[j].flags;
if ((flags & PAGE_GENERATION_MASK) == i) {
if (flags & PAGE_ALLOCATED_MASK) {
/*
* Count the number of boxed and unboxed pages within the
* given generation.
*/
if (flags & PAGE_UNBOXED_MASK)
if (flags & PAGE_LARGE_OBJECT_MASK)
large_unboxed_cnt++;
else
unboxed_cnt++;
else if (flags & PAGE_LARGE_OBJECT_MASK)
large_boxed_cnt++;
else
boxed_cnt++;
}
}
if (gc_assert_level > 0) {
gc_assert(generations[i].bytes_allocated ==
generation_bytes_allocated(i));
}
fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
generations[i].bytes_allocated,
GC_PAGE_SIZE * count_generation_pages(i) -
generations[i].bytes_allocated, generations[i].gc_trigger,
count_write_protect_generation_pages(i), generations[i].num_gc,
(double)gen_av_mem_age(i) / MEM_AGE_SCALE);
}
fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
restore_fpu_state(fpu_state);
/* Get statistics that are kept "on the fly" out of the generation
array.
*/
void
get_generation_stats(int gen, struct generation_stats *stats)
{
if (gen <= NUM_GENERATIONS) {
stats->bytes_allocated = generations[gen].bytes_allocated;
stats->gc_trigger = generations[gen].gc_trigger;
stats->bytes_consed_between_gc =
generations[gen].bytes_consed_between_gc;
stats->num_gc = generations[gen].num_gc;
stats->trigger_age = generations[gen].trigger_age;
stats->cum_sum_bytes_allocated =
generations[gen].cum_sum_bytes_allocated;
stats->min_av_mem_age = generations[gen].min_av_mem_age;
}
void
set_gc_trigger(int gen, int trigger)
if (gen <= NUM_GENERATIONS) {
generations[gen].gc_trigger = trigger;
}
void
set_trigger_age(int gen, int trigger_age)
if (gen <= NUM_GENERATIONS) {
generations[gen].trigger_age = trigger_age;
}
void
set_min_mem_age(int gen, double min_mem_age)
if (gen <= NUM_GENERATIONS) {
generations[gen].min_av_mem_age = min_mem_age * MEM_AGE_SCALE;
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
/*
* Allocation routines.
*
*
* To support quick and inline allocation, regions of memory can be
* allocated and then allocated from with just a free pointer and a
* check against an end address.
*
* Since objects can be allocated to spaces with different properties
* e.g. boxed/unboxed, generation, ages; there may need to be many
* allocation regions.
*
* Each allocation region may be start within a partly used page.
* Many features of memory use are noted on a page wise basis,
* E.g. the generation; so if a region starts within an existing
* allocated page it must be consistent with this page.
*
* During the scavenging of the newspace, objects will be transported
* into an allocation region, and pointers updated to point to this
* allocation region. It is possible that these pointers will be
* scavenged again before the allocation region is closed, E.g. due to
* trans_list which jumps all over the place to cleanup the list. It
* is important to be able to determine properties of all objects
* pointed to when scavenging, E.g to detect pointers to the
* oldspace. Thus it's important that the allocation regions have the
* correct properties set when allocated, and not just set when
* closed. The region allocation routines return regions with the
* specified properties, and grab all the pages, setting there
* properties appropriately, except that the amount used is not known.
*
* These regions are used to support quicker allocation using just a
* free pointer. The actual space used by the region is not reflected
* in the pages tables until it is closed. It can't be scavenged until
* closed.
*
* When finished with the region it should be closed, which will
* update the page tables for the actual space used returning unused
* space. Further it may be noted in the new regions which is
* necessary when scavenging the newspace.
*
* Large objects may be allocated directly without an allocation
* region, the page tables are updated immediately.
*
* Unboxed objects don't contain points to other objects so don't need
* scavenging. Further they can't contain pointers to younger
* generations so WP is not needed. By allocating pages to unboxed
* objects the whole page never needs scavenging or write protecting.
*/
/*
* Only using two regions at present, both are for the current
* newspace generation.
*/
struct alloc_region boxed_region;
struct alloc_region unboxed_region;
/*
* X hack. current lisp code uses the following. Need coping in/out.
*/
void *current_region_free_pointer;
void *current_region_end_addr;
/* The generation currently being allocated to. X */
static int gc_alloc_generation = 0;
extern void do_dynamic_space_overflow_warning(void);
extern void do_dynamic_space_overflow_error(void);
/* Handle heap overflow here, maybe. */
static void
handle_heap_overflow(const char *msg, int size)
unsigned long heap_size_mb;
if (msg) {
fprintf(stderr, msg, size);
}
#ifndef SPARSE_BLOCK_SIZE
#define SPARSE_BLOCK_SIZE (0)
#endif
/* Figure out how many MB of heap we have */
heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
heap_size_mb);
/* Try to handle heap overflow somewhat gracefully if we can. */
#if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
if (reserved_heap_pages == 0) {
fprintf(stderr, "\n Returning to top-level.\n");
do_dynamic_space_overflow_error();
} else {
fprintf(stderr,
" You can control heap size with the -dynamic-space-size commandline option.\n");
do_dynamic_space_overflow_warning();
print_generation_stats(1);
#endif
}
* Enables debug messages for MODE_MADVISE and MODE_LAZY
*/
boolean gencgc_debug_madvise = FALSE;
handle_madvise_first_page(int first_page)
{
int flags = page_table[first_page].flags;
if (gencgc_debug_madvise) {
fprintf(stderr, "first_page = %d, FLAGS = %x, orig = %d",
first_page, flags, page_table[first_page].bytes_used);
}
if (!PAGE_ALLOCATED(first_page)) {
int *page_start = (int *) page_address(first_page);
if (gencgc_debug_madvise) {
fprintf(stderr, ": marker = %x", *page_start);
}
if (*page_start != 0) {
memset(page_start, 0, GC_PAGE_SIZE);
}
}
if (gencgc_debug_madvise) {
fprintf(stderr, "\n");
}