Newer
Older
/*
* Generational Conservative Garbage Collector for CMUCL x86.
*
* This code was written by Douglas T. Crosher, based on Public Domain
* codes from Carnegie Mellon University. This code has been placed in
* the public domain, and is provided 'as is'.
*
* $Header: /Volumes/share2/src/cmucl/cvs2git/cvsroot/src/lisp/gencgc.c,v 1.22 2000/10/21 12:50:42 dtc Exp $
#include <stdio.h>
#include <signal.h>
#include "lisp.h"
#include "internals.h"
#include "os.h"
#include "globals.h"
#include "interrupt.h"
#include "validate.h"
#include "lispregs.h"
#include "gencgc.h"
#define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
__FILE__, __LINE__)
#define gc_assert(ex) do { \
if (!(ex)) gc_abort(); \
} while (0)
#else
#define gc_assert(ex)
#endif
/*
* The number of generations, an extra is added to this for use as a temp.
*/
#define NUM_GENERATIONS 6
/* Debugging variables. */
/*
* The verbose level. All non-error messages are disabled at level 0;
* and only a few rare messages are printed at level 1.
*/
unsigned gencgc_verbose = 0;
/*
* To enable the use of page protection to help avoid the scavenging
* of pages that don't have pointers to younger generations.
*/
boolean enable_page_protection = TRUE;
/*
* Hunt for pointers to old-space, when GCing generations >= verify_gen.
* Set to NUM_GENERATIONS to disable.
*/
int verify_gens = NUM_GENERATIONS;
/*
* Enable a pre-scan verify of generation 0 before it's GCed.
*/
boolean pre_verify_gen_0 = FALSE;
* Enable checking for bad pointers after gc_free_heap called from purify.
*/
boolean verify_after_free_heap = FALSE;
/*
* Enable the printing of a note when code objects are found in the
* dynamic space during a heap verify.
*/
boolean verify_dynamic_code_check = FALSE;
/*
* Enable the checking of code objects for fixup errors after they are
* transported.
*/
boolean check_code_fixups = FALSE;
/*
* To enable unmapping of a page and re-mmaping it to have it zero filled.
* Note: this can waste a lot of swap on FreeBSD so don't unmap.
*/
#if defined(__FreeBSD__)
boolean gencgc_unmap_zero = FALSE;
#else
boolean gencgc_unmap_zero = TRUE;
#endif
/*
* Enable checking that newly allocated regions are zero filled.
*/
boolean gencgc_zero_check = FALSE;
boolean gencgc_enable_verify_zero_fill = FALSE;
/*
* Enable checking that free pages are zero filled during gc_free_heap
* called after purify.
*/
boolean gencgc_zero_check_during_free_heap = FALSE;
unsigned large_object_size = 4 * PAGE_SIZE;
/*
* Enable the filtering of stack/register pointers. This could reduce
* the number of invalid pointers accepted. It will probably degrades
* interrupt safety during object initialisation.
*/
boolean enable_pointer_filter = TRUE;
/*
* The total bytes allocated. Seen by (dynamic-usage)
*/
unsigned long bytes_allocated = 0;
/*
* GC trigger; a value of 0xffffffff represents disabled.
*/
unsigned long auto_gc_trigger = 0xffffffff;
/*
* The src. and dest. generations. Set before a GC starts scavenging.
*/
static int from_space;
static int new_space;
/*
* An array of page structures is statically allocated.
* This helps quickly map between an address its page structure.
* NUM_PAGES is set from the size of the dynamic space.
*/
struct page page_table[NUM_PAGES];
/*
* Heap base, needed for mapping addresses to page structures.
*/
static void *heap_base = NULL;
/*
* Calculate the start address for the given page number.
*/
inline void *page_address(int page_num)
return heap_base + PAGE_SIZE * page_num;
/*
* Find the page index within the page_table for the given address.
* Returns -1 on failure.
*/
inline int find_page_index(void *addr)
{
int index = addr-heap_base;
if (index >= 0) {
index = (unsigned int) index / PAGE_SIZE;
if (index < NUM_PAGES)
struct generation {
/* The first page that gc_alloc checks on its next call. */
int alloc_start_page;
/* The first page that gc_alloc_unboxed checks on its next call. */
int alloc_unboxed_start_page;
/*
* The first page that gc_alloc_large (boxed) considers on its next call.
* Although it always allocates after the boxed_region.
*/
int alloc_large_start_page;
/*
* The first page that gc_alloc_large (unboxed) considers on its next call.
* Although it always allocates after the current_unboxed_region.
*/
int alloc_large_unboxed_start_page;
/* The bytes allocate to this generation. */
int bytes_allocated;
/* The number of bytes at which to trigger a GC */
int gc_trigger;
/* To calculate a new level for gc_trigger */
int bytes_consed_between_gc;
/* The number of GCs since the last raise. */
int num_gc;
/*
* The average age at after which a GC will raise objects to the
* next generation.
*/
int trigger_age;
/*
* The cumulative sum of the bytes allocated to this generation. It
* is cleared after a GC on this generations, and update before new
* objects are added from a GC of a younger generation. Dividing by
* the bytes_allocated will give the average age of the memory in
* this generation since its last GC.
*/
int cum_sum_bytes_allocated;
/*
* A minimum average memory age before a GC will occur helps prevent
* a GC when a large number of new live objects have been added, in
* which case a GC could be a waste of time.
*/
double min_av_mem_age;
};
/*
* An array of generation structures. There needs to be one more
* generation structure than actual generations as the oldest
* generations is temporarily raised then lowered.
*/
static struct generation generations[NUM_GENERATIONS + 1];
/*
* The oldest generation that will currently be GCed by default.
* Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
*
* The default of (NUM_GENERATIONS - 1) enables GC on all generations.
*
* Setting this to 0 effectively disables the generational nature of
* the GC. In some applications generational GC may not be useful
* because there are no long-lived objects.
*
* An intermediate value could be handy after moving long-lived data
* into an older generation so an unnecessary GC of this long-lived
* data can be avoided.
*/
unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
/*
* The maximum free page in the heap is maintained and used to update
* ALLOCATION_POINTER which is used by the room function to limit its
* search of the heap. XX Gencgc obviously needs to be better
* integrated with the lisp code.
*/
static int last_free_page;
/*
* Count the number of write protected pages within the given generation.
*/
static int count_write_protect_generation_pages(int generation)
{
int i;
int cnt = 0;
int mmask, mflags;
mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
| PAGE_GENERATION_MASK;
mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
for (i = 0; i < last_free_page; i++)
if (PAGE_FLAGS(i, mmask) == mflags)
/*
* Count the number of pages within the given generation.
*/
static int count_generation_pages(int generation)
{
int i;
int cnt = 0;
int mmask, mflags;
mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
mflags = PAGE_ALLOCATED_MASK | generation;
for (i = 0; i < last_free_page; i++)
if (PAGE_FLAGS(i, mmask) == mflags)
/*
* Count the number of dont_move pages.
*/
static int count_dont_move_pages(void)
{
int i;
int cnt = 0;
int mmask;
mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
for (i = 0; i < last_free_page; i++)
if (PAGE_FLAGS(i, mmask) == mmask)
/*
* Work through the pages and add up the number of bytes used for the
* given generation.
*/
static int generation_bytes_allocated (int generation)
{
int i;
int bytes_allocated = 0;
int mmask, mflags;
mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
mflags = PAGE_ALLOCATED_MASK | generation;
for (i = 0; i < last_free_page; i++) {
if (PAGE_FLAGS(i, mmask) == mflags)
bytes_allocated += page_table[i].bytes_used;
}
/*
* Return the average age of the memory in a generation.
*/
static double gen_av_mem_age(int gen)
{
if (generations[gen].bytes_allocated == 0)
return 0.0;
return (double) generations[gen].cum_sum_bytes_allocated /
(double) generations[gen].bytes_allocated;
/*
* The verbose argument controls how much to print out:
* 0 for normal level of detail; 1 for debugging.
*/
static void print_generation_stats(int verbose)
/*
* This code uses the FP instructions which may be setup for Lisp so
* they need to the saved and reset for C.
*/
/* Number of generations to print out. */
if (verbose)
else
gens = NUM_GENERATIONS;
/* Print the heap stats */
fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
for (i = 0; i < gens; i++) {
int j;
int boxed_cnt = 0;
int unboxed_cnt = 0;
int large_boxed_cnt = 0;
int large_unboxed_cnt = 0;
for (j = 0; j < last_free_page; j++) {
int flags = page_table[j].flags;
if ((flags & PAGE_GENERATION_MASK) == i) {
if (flags & PAGE_ALLOCATED_MASK) {
/*
* Count the number of boxed and unboxed pages within the
* given generation.
*/
if (flags & PAGE_UNBOXED_MASK)
if (flags & PAGE_LARGE_OBJECT_MASK)
large_unboxed_cnt++;
else
unboxed_cnt++;
if (flags & PAGE_LARGE_OBJECT_MASK)
large_boxed_cnt++;
else
boxed_cnt++;
}
gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
generations[i].bytes_allocated,
PAGE_SIZE * count_generation_pages(i) -
generations[i].bytes_allocated,
generations[i].gc_trigger,
count_write_protect_generation_pages(i),
generations[i].num_gc,
gen_av_mem_age(i));
}
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
/*
* Allocation routines.
*
*
* To support quick and inline allocation, regions of memory can be
* allocated and then allocated from with just a free pointer and a
* check against an end address.
*
* Since objects can be allocated to spaces with different properties
* e.g. boxed/unboxed, generation, ages; there may need to be many
* allocation regions.
*
* Each allocation region may be start within a partly used page.
* Many features of memory use are noted on a page wise basis,
* E.g. the generation; so if a region starts within an existing
* allocated page it must be consistent with this page.
*
* During the scavenging of the newspace, objects will be transported
* into an allocation region, and pointers updated to point to this
* allocation region. It is possible that these pointers will be
* scavenged again before the allocation region is closed, E.g. due to
* trans_list which jumps all over the place to cleanup the list. It
* is important to be able to determine properties of all objects
* pointed to when scavenging, E.g to detect pointers to the
* oldspace. Thus it's important that the allocation regions have the
* correct properties set when allocated, and not just set when
* closed. The region allocation routines return regions with the
* specified properties, and grab all the pages, setting there
* properties appropriately, except that the amount used is not known.
*
* These regions are used to support quicker allocation using just a
* free pointer. The actual space used by the region is not reflected
* in the pages tables until it is closed. It can't be scavenged until
* closed.
*
* When finished with the region it should be closed, which will
* update the page tables for the actual space used returning unused
* space. Further it may be noted in the new regions which is
* necessary when scavenging the newspace.
*
* Large objects may be allocated directly without an allocation
* region, the page tables are updated immediately.
*
* Unboxed objects don't contain points to other objects so don't need
* scavenging. Further they can't contain pointers to younger
* generations so WP is not needed. By allocating pages to unboxed
* objects the whole page never needs scavenging or write protecting.
*/
/*
* Only using two regions at present, both are for the current
* newspace generation.
*/
struct alloc_region boxed_region;
struct alloc_region unboxed_region;
/*
* X hack. current lisp code uses the following. Need coping in/out.
*/
void *current_region_free_pointer;
void *current_region_end_addr;
/* The generation currently being allocated to. X */
static int gc_alloc_generation;
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
/*
* Find a new region with room for at least the given number of bytes.
*
* It starts looking at the current generations alloc_start_page. So
* may pick up from the previous region if there is enough space. This
* keeps the allocation contiguous when scavenging the newspace.
*
* The alloc_region should have been closed by a call to
* gc_alloc_update_page_tables, and will thus be in an empty state.
*
* To assist the scavenging functions, write protected pages are not
* used. Free pages should not be write protected.
*
* It is critical to the conservative GC that the start of regions be
* known. To help achieve this only small regions are allocated at a
* time.
*
* During scavenging, pointers may be found that point within the
* current region and the page generation must be set so pointers to
* the from space can be recognised. So the generation of pages in
* the region are set to gc_alloc_generation. To prevent another
* allocation call using the same pages, all the pages in the region
* are allocated, although they will initially be empty.
*/
static void gc_alloc_new_region(int nbytes, int unboxed,
struct alloc_region *alloc_region)
{
int first_page;
int last_page;
int region_size;
int restart_page;
int bytes_found;
int num_pages;
int i;
int mmask, mflags;
#if 0
fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
nbytes, gc_alloc_generation);
#endif
/* Check that the region is in a reset state. */
gc_assert(alloc_region->first_page == 0
&& alloc_region->last_page == -1
&& alloc_region->free_pointer == alloc_region->end_addr);
if (unboxed)
restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
else
restart_page = generations[gc_alloc_generation].alloc_start_page;
/*
* Search for a contiguous free region of at least nbytes with the
* given properties: boxed/unboxed, generation. First setting up the
* mask and matching flags.
mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
| PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
| PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
| gc_alloc_generation;
do {
first_page = restart_page;
/*
* First search for a page with at least 32 bytes free, that is
* not write protected, or marked dont_move.
*/
while (first_page < NUM_PAGES) {
int flags = page_table[first_page].flags;
if (!(flags & PAGE_ALLOCATED_MASK)
|| ((flags & mmask) == mflags &&
page_table[first_page].bytes_used < PAGE_SIZE - 32))
/* Check for a failure */
if (first_page >= NUM_PAGES) {
fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
print_generation_stats(1);
exit(1);
}
gc_assert(!PAGE_WRITE_PROTECTED(first_page));
#if 0
fprintf(stderr, " first_page=%d bytes_used=%d\n",
first_page, page_table[first_page].bytes_used);
#endif
/*
* Now search forward to calculate the available region size. It
* tries to keeps going until nbytes are found and the number of
* pages is greater than some level. This helps keep down the
* number of pages in a region.
*/
last_page = first_page;
bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
while ((bytes_found < nbytes || num_pages < 2)
&& last_page < NUM_PAGES - 1
&& !PAGE_ALLOCATED(last_page + 1)) {
last_page++;
num_pages++;
bytes_found += PAGE_SIZE;
gc_assert(!PAGE_WRITE_PROTECTED(last_page));
region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
+ PAGE_SIZE * (last_page - first_page);
gc_assert(bytes_found == region_size);
#if 0
fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
last_page, bytes_found, num_pages);
#endif
restart_page = last_page + 1;
}
/* Check for a failure */
if (restart_page >= NUM_PAGES && bytes_found < nbytes) {
fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
print_generation_stats(1);
exit(1);
}
#if 0
fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
gc_alloc_generation, bytes_found, first_page, last_page,
page_address(first_page));
#endif
/* Setup the alloc_region. */
alloc_region->first_page = first_page;
alloc_region->last_page = last_page;
alloc_region->start_addr = page_table[first_page].bytes_used
+ page_address(first_page);
alloc_region->free_pointer = alloc_region->start_addr;
alloc_region->end_addr = alloc_region->start_addr + bytes_found;
if (gencgc_zero_check) {
int *p;
for(p = (int *)alloc_region->start_addr;
p < (int *)alloc_region->end_addr; p++)
if (*p != 0)
}
/* Setup the pages. */
/* The first page may have already been in use. */
if (page_table[first_page].bytes_used == 0) {
PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
page_table[first_page].first_object_offset = 0;
}
gc_assert(PAGE_ALLOCATED(first_page));
gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
gc_assert(!PAGE_LARGE_OBJECT(first_page));
for (i = first_page + 1; i <= last_page; i++) {
PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
| PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
| gc_alloc_generation);
/*
* This may not be necessary for unboxed regions (think it was
* broken before!)
*/
page_table[i].first_object_offset =
alloc_region->start_addr - page_address(i);
}
/* Bump up the last_free_page */
if (last_page + 1 > last_free_page) {
last_free_page = last_page + 1;
SetSymbolValue(ALLOCATION_POINTER,
(lispobj) ((char *) heap_base +
PAGE_SIZE * last_free_page));
}
}
/*
* If the record_new_objects flag is 2 then all new regions created
* are recorded.
*
* If it's 1 then then it is only recorded if the first page of the
* current region is <= new_areas_ignore_page. This helps avoid
* unnecessary recording when doing full scavenge pass.
*
* The new_object structure holds the page, byte offset, and size of
* new regions of objects. Each new area is placed in the array of
* these structures pointed to by new_areas; new_areas_index holds the
* offset into new_areas.
*
* If new_area overflows NUM_NEW_AREAS then it stops adding them. The
* later code must detect this an handle it, probably by doing a full
* scavenge of a generation.
*/
#define NUM_NEW_AREAS 512
static int record_new_objects = 0;
static int new_areas_ignore_page;
struct new_area {
int page;
int offset;
int size;
};
static struct new_area (*new_areas)[];
int max_new_areas;
/* Add a new area to new_areas. */
{
unsigned new_area_start,c;
int i;
/* Ignore if full */
if (new_areas_index >= NUM_NEW_AREAS)
return;
switch (record_new_objects) {
case 0:
return;
case 1:
if (first_page > new_areas_ignore_page)
return;
break;
case 2:
break;
default:
gc_abort();
}
new_area_start = PAGE_SIZE * first_page + offset;
/*
* Search backwards for a prior area that this follows from. If
* found this will save adding a new area.
*/
for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
+ (*new_areas)[i].offset + (*new_areas)[i].size;
#if 0
fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
#endif
if (new_area_start == area_end) {
#if 0
fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
i, (*new_areas)[i].page, (*new_areas)[i].offset ,
(*new_areas)[i].size += size;
return;
}
}
#if 0
fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
#endif
(*new_areas)[new_areas_index].page = first_page;
(*new_areas)[new_areas_index].offset = offset;
(*new_areas)[new_areas_index].size = size;
#if 0
fprintf(stderr, " new_area %d page %d offset %d size %d\n",
new_areas_index, first_page, offset, size);
#endif
/* Note the max new_areas used. */
if (new_areas_index > max_new_areas)
max_new_areas = new_areas_index;
}
/*
* Update the tables for the alloc_region. The region may be added to
* the new_areas.
*
* When done the alloc_region its setup so that the next quick alloc
* will fail safely and thus a new region will be allocated. Further
* it is safe to try and re-update the page table of this reset
* alloc_region.
*/
void gc_alloc_update_page_tables(int unboxed,
struct alloc_region *alloc_region)
{
int more;
int first_page;
int next_page;
int bytes_used;
int orig_first_page_bytes_used;
int region_size;
int byte_cnt;
#if 0
fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
gc_alloc_generation);
#endif
first_page = alloc_region->first_page;
/* Catch an unused alloc_region. */
/* Skip if no bytes were allocated */
if (alloc_region->free_pointer != alloc_region->start_addr) {
orig_first_page_bytes_used = page_table[first_page].bytes_used;
gc_assert(alloc_region->start_addr == page_address(first_page) +
page_table[first_page].bytes_used);
/* All the pages used need to be updated */
/* Update the first page. */
#if 0
fprintf(stderr, "0");
#endif
/* If the page was free then setup the gen, and first_object_offset. */
if (page_table[first_page].bytes_used == 0)
gc_assert(page_table[first_page].first_object_offset == 0);
gc_assert(PAGE_ALLOCATED(first_page));
gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
gc_assert(!PAGE_LARGE_OBJECT(first_page));
/*
* Calc. the number of bytes used in this page. This is not always
* the number of new bytes, unless it was free.
*/
bytes_used = alloc_region->free_pointer - page_address(first_page);
if (bytes_used > PAGE_SIZE) {
bytes_used = PAGE_SIZE;
more = 1;
}
page_table[first_page].bytes_used = bytes_used;
byte_cnt += bytes_used;
/*
* All the rest of the pages should be free. Need to set their
* first_object_offset pointer to the start of the region, and set
* the bytes_used.
*/
gc_assert(PAGE_ALLOCATED(next_page));
gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
gc_assert(page_table[next_page].bytes_used == 0);
gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
gc_assert(!PAGE_LARGE_OBJECT(next_page));
gc_assert(page_table[next_page].first_object_offset ==
alloc_region->start_addr - page_address(next_page));
/* Calc. the number of bytes used in this page. */
more = 0;
bytes_used = alloc_region->free_pointer - page_address(next_page);
if (bytes_used > PAGE_SIZE) {
bytes_used = PAGE_SIZE;
more = 1;
}
page_table[next_page].bytes_used = bytes_used;
byte_cnt += bytes_used;
region_size = alloc_region->free_pointer - alloc_region->start_addr;
bytes_allocated += region_size;
generations[gc_alloc_generation].bytes_allocated += region_size;
gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
/*
* Set the generations alloc restart page to the last page of
* the region.
*/
generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
generations[gc_alloc_generation].alloc_start_page = next_page - 1;
/* Add the region to the new_areas if requested. */
if (!unboxed)
add_new_area(first_page, orig_first_page_bytes_used, region_size);
#if 0
fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
region_size, gc_alloc_generation);
#endif
/*
* No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
*/
if (page_table[first_page].bytes_used == 0)
page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
/* Unallocate any unused pages. */
while (next_page <= alloc_region->last_page) {
gc_assert(page_table[next_page].bytes_used == 0);
page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
next_page++;
}
/* Reset the alloc_region. */
alloc_region->first_page = 0;
alloc_region->last_page = -1;
alloc_region->start_addr = page_address(0);
alloc_region->free_pointer = page_address(0);
alloc_region->end_addr = page_address(0);
}
static inline void *gc_quick_alloc(int nbytes);
/*
* Allocate a possibly large object.
*/
static void *gc_alloc_large(int nbytes, int unboxed,
struct alloc_region *alloc_region)
{
int first_page;
int last_page;
int region_size;
int restart_page;
int bytes_found;
int num_pages;
int orig_first_page_bytes_used;
int byte_cnt;
int more;
int bytes_used;
int next_page;
int large = (nbytes >= large_object_size);
int mmask, mflags;
#if 0
if (nbytes > 200000)
fprintf(stderr, "*** alloc_large %d\n", nbytes);
#endif
#if 0
fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
nbytes, gc_alloc_generation);
#endif
/*
* If the object is small, and there is room in the current region
* then allocation it in the current region.
*/
if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
return gc_quick_alloc(nbytes);
/*
* Search for a contiguous free region of at least nbytes. If it's a
* large object then align it on a page boundary by searching for a
* free page.
*/
/*
* To allow the allocation of small objects without the danger of
* using a page in the current boxed region, the search starts after
* the current boxed free region. XX could probably keep a page
* index ahead of the current region and bumped up here to save a
* lot of re-scanning.
*/
if (unboxed)
restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
else
restart_page = generations[gc_alloc_generation].alloc_large_start_page;
if (restart_page <= alloc_region->last_page)
/* Setup the mask and matching flags. */
mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK