Newer
Older
/*
* OS-dependent routines. This file (along with os.h) exports an
* OS-independent interface to the operating system VM facilities.
* Suprisingly, this interface looks a lot like the Mach interface
* (but simpler in some places). For some operating systems, a subset
* of these functions will have to be emulated.
*
* This is the SunOS version.
* March 1991, Miles Bader <miles@cogsci.ed.ack.uk> & ted <ted@edu.NMSU>
*
*/
/* #define DEBUG */
#include <stdio.h>
#include <signal.h>
#include <sys/file.h>
#define OS_PROTERR SEGV_PROT
#define OS_MAPERR SEGV_NOMAP
#define OS_HASERRNO(code) (SEGV_CODE(code)==SEGV_OBJERR)
#define OS_ERRNO(code) SEGV_ERRNO(code)
extern int errno;
/* To get dynamic_0_space and friends */
#include "globals.h"
/* To get memory map */
#include "sparc-validate.h"
/* block size must be larger than the system page size */
#define SPARSE_BLOCK_SIZE (1<<15)
#define SPARSE_SIZE_MASK (SPARSE_BLOCK_SIZE-1)
#define PROT_DEFAULT OS_VM_PROT_ALL
#define OFFSET_NONE ((os_vm_offset_t)(~0))
#define EMPTYFILE "/tmp/empty"
#define ZEROFILE "/dev/zero"
#define INITIAL_MAX_SEGS 32
#define GROW_MAX_SEGS 16
extern char *getenv();
/* ---------------------------------------------------------------- */
#define ADJ_OFFSET(off,adj) (((off)==OFFSET_NONE) ? OFFSET_NONE : ((off)+(adj)))
long os_vm_page_size = (-1);
static long os_real_page_size = (-1);
static struct segment {
os_vm_address_t start; /* note: start & length are expected to be on page */
os_vm_size_t length; /* boundaries */
static int n_segments = 0, max_segments = 0;
static int zero_fd = (-1), empty_fd = (-1);
static os_vm_address_t last_fault = 0;
static os_vm_size_t real_page_size_difference = 0;
void
os_init0(const char *argv[], const char *envp[])
{}
static void
os_init_bailout(arg)
char *arg;
sprintf(buf, "os_init: %s", arg);
void
os_init0(const char *argv[], const char *envp[])
{}
char *empty_file = getenv("CMUCL_EMPTYFILE");
if (empty_file == NULL)
empty_file = EMPTYFILE;
empty_fd = open(empty_file, O_RDONLY | O_CREAT);
if (empty_fd < 0)
zero_fd = open(ZEROFILE, O_RDONLY);
if (zero_fd < 0)
max_segments = INITIAL_MAX_SEGS;
segments = (struct segment *) malloc(sizeof(struct segment) * max_segments);
if (segments == NULL) {
fprintf(stderr, "os_init: Couldn't allocate %d segment descriptors\n",
if (os_vm_page_size > OS_VM_DEFAULT_PAGESIZE) {
fprintf(stderr, "os_init: Pagesize too large (%d > %d)\n",
os_vm_page_size, OS_VM_DEFAULT_PAGESIZE);
/*
* we do this because there are apparently dependencies on
* the pagesize being OS_VM_DEFAULT_PAGESIZE somewhere...
* but since the OS doesn't know we're using this restriction,
* we have to grovel around a bit to enforce it, thus anything
* that uses real_page_size_difference.
*/
real_page_size_difference = OS_VM_DEFAULT_PAGESIZE - os_vm_page_size;
os_vm_page_size = OS_VM_DEFAULT_PAGESIZE;
}
}
/* ---------------------------------------------------------------- */
void
seg_force_resident(struct segment *seg, os_vm_address_t addr, os_vm_size_t len)
int prot = seg->protection;
if (prot != 0) {
os_vm_address_t end = addr + len, touch = addr;
while (touch < end) {
int contents = (*(char *) touch);
if (prot & OS_VM_PROT_WRITE)
(*(char *) touch) = contents;
touch =
(os_vm_address_t) (((long) touch + SPARSE_BLOCK_SIZE) &
~SPARSE_SIZE_MASK);
static struct segment *
seg_create_nomerge(addr, len, protection, mapped_fd, file_offset)
os_vm_address_t addr;
os_vm_size_t len;
int protection;
int mapped_fd;
if (n_segments == max_segments) {
max_segments += GROW_MAX_SEGS;
new_segs = (struct segment *)
realloc(segments, max_segments * sizeof(struct segment));
if (new_segs == NULL) {
fprintf(stderr,
"seg_create_nomerge: Couldn't grow segment descriptor table to %s segments\n",
max_segments);
max_segments -= GROW_MAX_SEGS;
segments = new_segs;
for (n = n_segments, seg = segments; n > 0; n--, seg++)
if (addr < seg->start) {
seg = (&segments[n_segments]);
while (n-- > 0) {
seg[0] = seg[-1];
seg->start = addr;
seg->length = len;
seg->protection = protection;
seg->mapped_fd = mapped_fd;
seg->file_offset = file_offset;
static struct segment *
seg_find(addr)
os_vm_address_t addr;
for (n = n_segments, seg = segments; n > 0; n--, seg++)
if (seg->start <= addr && seg->start + seg->length > addr)
#else
/* returns the first segment containing addr */
static struct segment *
seg_find(addr)
os_vm_address_t addr;
struct segment *lo = segments, *hi = segments + n_segments;
while (hi > lo) {
struct segment *mid = lo + ((hi - lo) >> 1);
os_vm_address_t start = mid->start;
if (addr >= start && addr < start + mid->length)
else if (addr < start)
hi = mid;
/* returns TRUE if the range from addr to addr+len intersects with any segment */
static boolean
collides_with_seg_p(addr, len)
os_vm_address_t addr;
os_vm_size_t len;
os_vm_address_t end = addr + len;
for (n = n_segments, seg = segments; n > 0; n--, seg++)
if (seg->start >= end)
else if (seg->start + seg->length > addr)
#if 0 /* WAY to SLOW */
/* returns TRUE if the range from addr to addr+len is a valid mapping
* (that we don't know about) */
static boolean
mem_in_use(addr, len)
os_vm_address_t addr;
os_vm_size_t len;
{
os_vm_address_t p;
for (p = addr; addr < addr + len; p += os_real_page_size) {
char c;
if (mincore((caddr_t) p, os_real_page_size, &c) == 0 || errno != ENOMEM)
#define seg_last_p(seg) (((seg)-segments)>=n_segments-1)
static void
seg_destroy(seg)
struct segment *seg;
for (n = seg - segments + 1; n < n_segments; n++) {
seg[0] = seg[1];
static void
seg_try_merge_next(seg)
struct segment *seg;
struct segment *nseg = seg + 1;
if (!seg_last_p(seg)
&& seg->start + seg->length == nseg->start
&& seg->protection == nseg->protection
&& seg->mapped_fd == nseg->mapped_fd
&& ADJ_OFFSET(seg->file_offset, seg->length) == nseg->file_offset) {
/* can merge with the next segment */
#ifdef DEBUG
fprintf(stderr,
";;; seg_try_merge: Merged 0x%08x[0x%08x] with 0x%08x[0x%08x]\n",
seg->start, seg->length, nseg->start, nseg->length);
if (((long) nseg->start & SPARSE_SIZE_MASK) != 0) {
/*
* if not on a block boundary, we have to ensure both parts
* of a common block are in a known state
*/
seg_force_resident(seg, nseg->start - 1, 1);
seg_force_resident(nseg, nseg->start, 1);
seg->length += nseg->length;
seg_destroy(nseg);
}
}
/*
* Try to merge seg with adjacent segments.
*/
static void
seg_try_merge_adjacent(seg)
struct segment *seg;
if (!seg_last_p(seg))
if (seg > segments)
seg_try_merge_next(seg - 1);
static struct segment *
seg_create(addr, len, protection, mapped_fd, file_offset)
os_vm_address_t addr;
os_vm_size_t len;
int protection;
int mapped_fd;
struct segment *seg =
seg_create_nomerge(addr, len, protection, mapped_fd, file_offset);
if (seg != NULL)
seg_try_merge_adjacent(seg);
return seg;
}
/*
* Change the attributes of the given range of an existing segment, and return
* a segment corresponding to the new bit.
*/
static struct segment *
seg_change_range(seg, addr, len, protection, mapped_fd, file_offset)
struct segment *seg;
os_vm_address_t addr;
os_vm_size_t len;
int protection;
int mapped_fd;
os_vm_address_t end = addr + len;
if (protection != seg->protection
|| mapped_fd != seg->mapped_fd
|| file_offset != ADJ_OFFSET(seg->file_offset, addr - seg->start)) {
os_vm_size_t old_len = seg->length, seg_offset = (addr - seg->start);
if (old_len < len + seg_offset) {
struct segment *next = seg + 1;
#ifdef DEBUG
fprintf(stderr,
";;; seg_change_range: region 0x%08x[0x%08x] overflows 0x%08x[0x%08x]\n",
addr, len, seg->start, old_len);
while (!seg_last_p(seg) && next->start + next->length <= end) {
#ifdef DEBUG
fprintf(stderr,
";;; seg_change_range: merging extra segment 0x%08x[0x%08x]\n",
next->start, next->length);
if (!seg_last_p(seg) && next->start < end) {
next->length -= end - next->start;
next->start = end;
old_len = next->start - seg->start;
} else
old_len = len + seg_offset;
#ifdef DEBUG
fprintf(stderr,
";;; seg_change_range: extended first seg to 0x%08x[0x%08x]\n",
seg->start, old_len);
if (seg_offset + len < old_len) {
/* add second part of old segment */
seg_create_nomerge(end,
old_len - (seg_offset + len),
ADJ_OFFSET(seg->file_offset, seg_offset + len));
#ifdef DEBUG
fprintf(stderr,
";;; seg_change_range: Split off end of 0x%08x[0x%08x]: 0x%08x[0x%08x]\n",
seg->start, old_len, end, old_len - (seg_offset + len));
if (seg_offset == 0) {
seg->length = len;
seg->protection = protection;
seg->mapped_fd = mapped_fd;
seg->file_offset = file_offset;
} else {
seg->length = seg_offset;
#ifdef DEBUG
fprintf(stderr,
";;; seg_change_range: Split off beginning of 0x%08x[0x%08x]: 0x%08x[0x%08x]\n",
seg->start, old_len, seg->start, seg_offset);
#endif
/* add new middle segment for new protected region */
seg =
seg_create_nomerge(addr, len, protection, mapped_fd,
file_offset);
}
return seg;
}
/* ---------------------------------------------------------------- */
static os_vm_address_t
mapin(addr, len, protection, map_fd, offset, is_readable)
os_vm_address_t addr;
os_vm_size_t len;
int protection;
int map_fd;
long offset;
int is_readable;
boolean sparse = (len >= SPARSE_BLOCK_SIZE);
if (offset != OFFSET_NONE
&& (offset < os_vm_page_size || (offset & (os_vm_page_size - 1)) != 0)) {
fprintf(stderr,
"mapin: file offset (%d) not multiple of pagesize (%d)\n",
offset, os_vm_page_size);
if (addr == NULL)
len += real_page_size_difference; /* futz around to get an aligned region */
last_fault = 0;
real = (os_vm_address_t)
mmap((caddr_t) addr,
(long) len,
sparse ? (is_readable ? PROT_READ | PROT_EXEC : 0) : protection,
(addr == NULL ? 0 : MAP_FIXED) | MAP_PRIVATE,
(off_t) (offset == OFFSET_NONE ? 0 : offset));
if ((long) real == -1) {
/*
* now play around with what the os gave us to make it align by
* our standards (which is why we overallocated)
*/
os_vm_size_t overflow;
addr = os_round_up_to_page(real);
if (addr != real)
munmap((caddr_t) real, addr - real);
overflow = real_page_size_difference - (addr - real);
if (overflow != 0)
munmap((caddr_t) (addr + len - real_page_size_difference),
overflow);
static os_vm_address_t
map_and_remember(addr, len, protection, map_fd, offset, is_readable)
os_vm_address_t addr;
os_vm_size_t len;
int protection;
int map_fd;
long offset;
int is_readable;
os_vm_address_t real =
mapin(addr, len, protection, map_fd, offset, is_readable);
if (real != NULL) {
struct segment *seg = seg_find(real);
if (seg != NULL)
seg = seg_change_range(seg, real, len, protection, map_fd, offset);
seg = seg_create(real, len, protection, map_fd, offset);
if (seg == NULL) {
munmap((caddr_t) real, len);
fprintf(stderr,
";;; map_and_remember: 0x%08x[0x%08x] offset: %d, mapped to: %d\n",
real, len, offset, map_fd);
#endif
return real;
}
/* ---------------------------------------------------------------- */
os_vm_address_t
os_validate(addr, len)
os_vm_address_t addr;
os_vm_size_t len;
addr = os_trunc_to_page(addr);
len = os_round_up_size_to_page(len);
fprintf(stderr, ";;; os_validate: 0x%08x[0x%08x]\n", addr, len);
if (addr != NULL && collides_with_seg_p(addr, len))
return map_and_remember(addr, len, PROT_DEFAULT, zero_fd, OFFSET_NONE,
FALSE);
void
os_invalidate(addr, len)
os_vm_address_t addr;
os_vm_size_t len;
struct segment *seg = seg_find(addr);
addr = os_trunc_to_page(addr);
len = os_round_up_size_to_page(len);
fprintf(stderr, ";;; os_invalidate: 0x%08x[0x%08x]\n", addr, len);
if (seg == NULL)
fprintf(stderr, "os_invalidate: Unknown segment: 0x%08x[0x%08x]\n",
addr, len);
else {
seg = seg_change_range(seg, addr, len, 0, 0, OFFSET_NONE);
if (seg != NULL)
last_fault = 0;
if (munmap((caddr_t) addr, len) != 0)
os_vm_address_t
os_map(fd, offset, addr, len)
int fd;
int offset;
os_vm_address_t addr;
long len;
addr = os_trunc_to_page(addr);
len = os_round_up_size_to_page(len);
fprintf(stderr, ";;; os_map: 0x%08x[0x%08x]\n", addr, len);
return map_and_remember(addr, len, PROT_DEFAULT, fd, offset, TRUE);
void
os_flush_icache(address, length)
os_vm_address_t address;
os_vm_size_t length;
vm_machine_attribute_val_t flush;
kern_return_t kr;
flush = MATTR_VAL_ICACHE_FLUSH;
kr = vm_machine_attribute(task_self(), address, length,
MATTR_CACHE, &flush);
if (kr != KERN_SUCCESS)
mach_error("Could not flush the instruction cache", kr);
void
os_protect(addr, len, prot)
os_vm_address_t addr;
os_vm_size_t len;
int prot;
struct segment *seg = seg_find(addr);
addr = os_trunc_to_page(addr);
len = os_round_up_size_to_page(len);
fprintf(stderr, ";;; os_protect: 0x%08x[0x%08x]\n", addr, len);
if (seg != NULL) {
int old_prot = seg->protection;
if (prot != old_prot) {
/*
* oooooh, sick: we have to make sure all the pages being protected have
* faulted in, so they're in a known state...
*/
seg_force_resident(seg, addr, len);
seg_change_range(seg, addr, len, prot, seg->mapped_fd,
seg->file_offset);
if (mprotect((caddr_t) addr, (long) len, prot) != 0)
} else
fprintf(stderr, "os_protect: Unknown segment: 0x%08x[0x%08x]\n", addr,
len);
boolean
valid_addr(test)
os_vm_address_t test;
return seg_find(test) != NULL;
}
/* ---------------------------------------------------------------- */
static boolean
maybe_gc(HANDLER_ARGS)
{
/*
* It's necessary to enable recursive SEGVs, since the handle is
* used for multiple things (e.g., both gc-trigger & faulting in pages).
* We check against recursive gc's though...
*/
boolean did_gc;
static already_trying = 0;
sigprocmask(SIG_SETMASK, &context->uc_sigmask, 0);
already_trying = TRUE;
did_gc = interrupt_maybe_gc(signal, code, context);
already_trying = FALSE;
return did_gc;
}
/*
* The primary point of catching segmentation violations is to allow
* read only memory to be re-mapped with more permissions when a write
* is attempted. this greatly decreases the residency of the program
* in swap space since read only areas don't take up room
*
* Running into the gc trigger page will also end up here...
*/
void
segv_handler(HANDLER_ARGS, caddr_t addr)
SAVE_CONTEXT();
if (CODE(code) == OS_PROTERR) { /* allow writes to this chunk */
struct segment *seg = seg_find(addr);
if ((caddr_t) last_fault == addr) {
if (seg != NULL && maybe_gc(signal, code, context))
/* a *real* protection fault */
fprintf(stderr,
"segv_handler: Real protection violation: 0x%08x\n",
addr);
interrupt_handle_now(signal, code, context);
} else
last_fault = (os_vm_address_t) addr;
os_vm_address_t block =
(os_vm_address_t) ((long) addr & ~SPARSE_SIZE_MASK);
os_vm_size_t length = SPARSE_BLOCK_SIZE;
if (block < seg->start) {
length -= (seg->start - block);
block = seg->start;
if (block + length > seg->start + seg->length)
length = seg->start + seg->length - block;
if (munmap((caddr_t) block, length) == -1)
perror("segv_handler: munmap");
#endif
/* and remap it with more permissions */
err = (int)
mmap((caddr_t) block,
MAP_PRIVATE | MAP_FIXED,
seg->file_offset == OFFSET_NONE
? 0 : seg->file_offset + (block - seg->start));
interrupt_handle_now(signal, code, context);
} else {
fprintf(stderr, "segv_handler: 0x%08x not in any segment\n", addr);
interrupt_handle_now(signal, code, context);
}
}
/*
* note that we check for a gc-trigger hit even if it's not a PROT error
*/
else if (!maybe_gc(signal, code, context)) {
static int nomap_count = 0;
if (CODE(code) == OS_MAPERR) {
if (nomap_count == 0) {
"segv_handler: No mapping fault: 0x%08x\n", addr);
/*
* There should be higher-level protection against stack
* overflow somewhere, but at least this prevents infinite
* puking of error messages...
*/
fprintf(stderr,
"segv_handler: Recursive no mapping fault (stack overflow?)\n");
exit(-1);
}
} else if (OS_HASERRNO(code)) {
errno = OS_ERRNO(code);
interrupt_handle_now(signal, code, context);
if (CODE(code) == OS_MAPERR)
os_install_interrupt_handlers(void)
interrupt_install_low_level_handler(SIGSEGV, segv_handler);
os_vm_address_t round_up_sparse_size(os_vm_address_t addr)
return (addr + SPARSE_BLOCK_SIZE - 1) & ~SPARSE_SIZE_MASK;
}
/*
* An array of the start of the spaces which should have holes placed
* after them. Must not include the dynamic spaces because the size
* of the dynamic space can be controlled from the command line.
*/
static os_vm_address_t spaces[] = {
READ_ONLY_SPACE_START, STATIC_SPACE_START,
BINDING_STACK_START, CONTROL_STACK_START
};
/*
* The corresponding array for the size of each space. Be sure that
* the spaces and holes don't overlap! The sizes MUST be on
* SPARSE_BLOCK_SIZE boundaries.
*/
static unsigned long space_size[] = {
READ_ONLY_SPACE_SIZE, STATIC_SPACE_SIZE,
BINDING_STACK_SIZE, CONTROL_STACK_SIZE
};
/*
* The size of the hole to make. It should be strictly smaller than
* SPARSE_BLOCK_SIZE.
*/
#define HOLE_SIZE 0x2000
void
make_holes(void)
int k;
os_vm_address_t hole;
/* Make holes of the appropriate size for desired spaces */
for (k = 0; k < sizeof(spaces) / sizeof(spaces[0]); ++k) {
hole = spaces[k] + space_size[k];
if (os_validate(hole, HOLE_SIZE) == NULL) {
fprintf(stderr,
"ensure_space: Failed to validate hole of %ld bytes at 0x%08X\n",
HOLE_SIZE, (unsigned long) hole);
exit(1);
}
/* Make it inaccessible */
os_protect(hole, HOLE_SIZE, 0);
/* Round up the dynamic_space_size to the nearest SPARSE_BLOCK_SIZE */
dynamic_space_size = round_up_sparse_size(dynamic_space_size);
/* Now make a hole for the dynamic spaces */
hole = dynamic_space_size + (os_vm_address_t) dynamic_0_space;
if (os_validate(hole, HOLE_SIZE) == NULL) {
fprintf(stderr,
"ensure_space: Failed to validate hold of %ld bytes at 0x%08X\n",
HOLE_SIZE, (unsigned long) hole);
exit(1);
os_protect(hole, HOLE_SIZE, 0);
hole = dynamic_space_size + (os_vm_address_t) dynamic_1_space;
if (os_validate(hole, HOLE_SIZE) == NULL) {
fprintf(stderr,
"ensure_space: Failed to validate hole of %ld bytes at 0x%08X\n",
HOLE_SIZE, (unsigned long) hole);
exit(1);
os_protect(hole, HOLE_SIZE, 0);