/[cmucl]/src/lisp/sunos-os.c
ViewVC logotype

Contents of /src/lisp/sunos-os.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.13 - (show annotations)
Thu Sep 1 05:18:26 2011 UTC (2 years, 7 months ago) by rtoy
Branch: MAIN
CVS Tags: GIT-CONVERSION, snapshot-2011-09, HEAD
Changes since 1.12: +5 -1 lines
File MIME type: text/plain
Add os_init0 to allow for some really early OS inits.

On Linux, os_init can re-exec lisp to set up the correct personality.
Not normally a problem, but if any output happens before os_init is
called, the output appears to happen twice.  So add os_init0 to do
this early on, before any output.  This is a bit of a kludge.

lisp/lisp.c:
o Call os_init0 early in main.

lisp/Linux-os.c:
o Move the personality stuff from os_init to os_init0.

lisp/Darwin-os.c:
lisp/FreeBSD-os.c:
lisp/NetBSD-os.c:
lisp/OpenBSD-os.c:
lisp/hpux-os.c:
lisp/irix-os.c:
lisp/mach-os.c:
lisp/osf1-os.c:
lisp/solaris-os.c:
lisp/sunos-os.c:
o Add dummy implementation of os_init0.  These OSes don't (currently)
  need anything special.

lisp/os.h:
o Declare os_init0.
1 /*
2 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/sunos-os.c,v 1.13 2011/09/01 05:18:26 rtoy Exp $
3 *
4 * OS-dependent routines. This file (along with os.h) exports an
5 * OS-independent interface to the operating system VM facilities.
6 * Suprisingly, this interface looks a lot like the Mach interface
7 * (but simpler in some places). For some operating systems, a subset
8 * of these functions will have to be emulated.
9 *
10 * This is the SunOS version.
11 * March 1991, Miles Bader <miles@cogsci.ed.ack.uk> & ted <ted@edu.NMSU>
12 *
13 */
14
15 /* #define DEBUG */
16
17 #include <stdio.h>
18
19 #include <signal.h>
20 #include <sys/file.h>
21
22 #define OS_PROTERR SEGV_PROT
23 #define OS_MAPERR SEGV_NOMAP
24 #define OS_HASERRNO(code) (SEGV_CODE(code)==SEGV_OBJERR)
25 #define OS_ERRNO(code) SEGV_ERRNO(code)
26 extern int errno;
27
28 #include "os.h"
29 /* To get dynamic_0_space and friends */
30 #include "globals.h"
31 /* To get memory map */
32 #include "sparc-validate.h"
33
34 /* block size must be larger than the system page size */
35 #define SPARSE_BLOCK_SIZE (1<<15)
36 #define SPARSE_SIZE_MASK (SPARSE_BLOCK_SIZE-1)
37
38 #define PROT_DEFAULT OS_VM_PROT_ALL
39
40 #define OFFSET_NONE ((os_vm_offset_t)(~0))
41
42 #define EMPTYFILE "/tmp/empty"
43 #define ZEROFILE "/dev/zero"
44
45 #define INITIAL_MAX_SEGS 32
46 #define GROW_MAX_SEGS 16
47
48 extern char *getenv();
49
50 /* ---------------------------------------------------------------- */
51
52 #define ADJ_OFFSET(off,adj) (((off)==OFFSET_NONE) ? OFFSET_NONE : ((off)+(adj)))
53
54 long os_vm_page_size = (-1);
55 static long os_real_page_size = (-1);
56
57 static struct segment {
58 os_vm_address_t start; /* note: start & length are expected to be on page */
59 os_vm_size_t length; /* boundaries */
60 long file_offset;
61 short mapped_fd;
62 short protection;
63 } *segments;
64
65 static int n_segments = 0, max_segments = 0;
66
67 static int zero_fd = (-1), empty_fd = (-1);
68
69 static os_vm_address_t last_fault = 0;
70 static os_vm_size_t real_page_size_difference = 0;
71
72 static void
73 os_init_bailout(arg)
74 char *arg;
75 {
76 char buf[500];
77
78 sprintf(buf, "os_init: %s", arg);
79 perror(buf);
80 exit(1);
81 }
82
83 void
84 os_init0(const char *argv[], const char *envp[])
85 {}
86
87 void
88 os_init(const char *argv[], const char *envp[])
89 {
90 char *empty_file = getenv("CMUCL_EMPTYFILE");
91
92 if (empty_file == NULL)
93 empty_file = EMPTYFILE;
94
95 empty_fd = open(empty_file, O_RDONLY | O_CREAT);
96 if (empty_fd < 0)
97 os_init_bailout(empty_file);
98 unlink(empty_file);
99
100 zero_fd = open(ZEROFILE, O_RDONLY);
101 if (zero_fd < 0)
102 os_init_bailout(ZEROFILE);
103
104 os_vm_page_size = os_real_page_size = getpagesize();
105
106 max_segments = INITIAL_MAX_SEGS;
107 segments = (struct segment *) malloc(sizeof(struct segment) * max_segments);
108
109 if (segments == NULL) {
110 fprintf(stderr, "os_init: Couldn't allocate %d segment descriptors\n",
111 max_segments);
112 exit(1);
113 }
114
115 if (os_vm_page_size > OS_VM_DEFAULT_PAGESIZE) {
116 fprintf(stderr, "os_init: Pagesize too large (%d > %d)\n",
117 os_vm_page_size, OS_VM_DEFAULT_PAGESIZE);
118 exit(1);
119 } else {
120 /*
121 * we do this because there are apparently dependencies on
122 * the pagesize being OS_VM_DEFAULT_PAGESIZE somewhere...
123 * but since the OS doesn't know we're using this restriction,
124 * we have to grovel around a bit to enforce it, thus anything
125 * that uses real_page_size_difference.
126 */
127 real_page_size_difference = OS_VM_DEFAULT_PAGESIZE - os_vm_page_size;
128 os_vm_page_size = OS_VM_DEFAULT_PAGESIZE;
129 }
130 }
131
132 /* ---------------------------------------------------------------- */
133
134 void
135 seg_force_resident(struct segment *seg, os_vm_address_t addr, os_vm_size_t len)
136 {
137 int prot = seg->protection;
138
139 if (prot != 0) {
140 os_vm_address_t end = addr + len, touch = addr;
141
142 while (touch < end) {
143 int contents = (*(char *) touch);
144
145 if (prot & OS_VM_PROT_WRITE)
146 (*(char *) touch) = contents;
147 touch =
148 (os_vm_address_t) (((long) touch + SPARSE_BLOCK_SIZE) &
149 ~SPARSE_SIZE_MASK);
150 }
151 }
152 }
153
154 static struct segment *
155 seg_create_nomerge(addr, len, protection, mapped_fd, file_offset)
156 os_vm_address_t addr;
157 os_vm_size_t len;
158 int protection;
159 int mapped_fd;
160 {
161 int n;
162 struct segment *seg;
163
164 if (len == 0)
165 return NULL;
166
167 if (n_segments == max_segments) {
168 struct segment *new_segs;
169
170 max_segments += GROW_MAX_SEGS;
171
172 new_segs = (struct segment *)
173 realloc(segments, max_segments * sizeof(struct segment));
174
175 if (new_segs == NULL) {
176 fprintf(stderr,
177 "seg_create_nomerge: Couldn't grow segment descriptor table to %s segments\n",
178 max_segments);
179 max_segments -= GROW_MAX_SEGS;
180 return NULL;
181 }
182
183 segments = new_segs;
184 }
185
186 for (n = n_segments, seg = segments; n > 0; n--, seg++)
187 if (addr < seg->start) {
188 seg = (&segments[n_segments]);
189 while (n-- > 0) {
190 seg[0] = seg[-1];
191 seg--;
192 }
193 break;
194 }
195
196 n_segments++;
197
198 seg->start = addr;
199 seg->length = len;
200 seg->protection = protection;
201 seg->mapped_fd = mapped_fd;
202 seg->file_offset = file_offset;
203
204 return seg;
205 }
206
207 #if 1
208 /* returns the first segment containing addr */
209 static struct segment *
210 seg_find(addr)
211 os_vm_address_t addr;
212 {
213 int n;
214 struct segment *seg;
215
216 for (n = n_segments, seg = segments; n > 0; n--, seg++)
217 if (seg->start <= addr && seg->start + seg->length > addr)
218 return seg;
219
220 return NULL;
221 }
222 #else
223 /* returns the first segment containing addr */
224 static struct segment *
225 seg_find(addr)
226 os_vm_address_t addr;
227 {
228 /* does a binary search */
229 struct segment *lo = segments, *hi = segments + n_segments;
230
231 while (hi > lo) {
232 struct segment *mid = lo + ((hi - lo) >> 1);
233 os_vm_address_t start = mid->start;
234
235 if (addr >= start && addr < start + mid->length)
236 return mid;
237 else if (addr < start)
238 hi = mid;
239 else
240 lo = mid + 1;
241 }
242
243 return NULL;
244 }
245 #endif
246
247 /* returns TRUE if the range from addr to addr+len intersects with any segment */
248 static boolean
249 collides_with_seg_p(addr, len)
250 os_vm_address_t addr;
251 os_vm_size_t len;
252 {
253 int n;
254 struct segment *seg;
255 os_vm_address_t end = addr + len;
256
257 for (n = n_segments, seg = segments; n > 0; n--, seg++)
258 if (seg->start >= end)
259 return FALSE;
260 else if (seg->start + seg->length > addr)
261 return TRUE;
262
263 return FALSE;
264 }
265
266 #if 0 /* WAY to SLOW */
267 /* returns TRUE if the range from addr to addr+len is a valid mapping
268 * (that we don't know about) */
269 static boolean
270 mem_in_use(addr, len)
271 os_vm_address_t addr;
272 os_vm_size_t len;
273 {
274 os_vm_address_t p;
275
276 for (p = addr; addr < addr + len; p += os_real_page_size) {
277 char c;
278
279 if (mincore((caddr_t) p, os_real_page_size, &c) == 0 || errno != ENOMEM)
280 return TRUE;
281 }
282 return FALSE;
283 }
284 #endif
285
286 #define seg_last_p(seg) (((seg)-segments)>=n_segments-1)
287
288 static void
289 seg_destroy(seg)
290 struct segment *seg;
291 {
292 if (seg != NULL) {
293 int n;
294
295 for (n = seg - segments + 1; n < n_segments; n++) {
296 seg[0] = seg[1];
297 seg++;
298 }
299
300 n_segments--;
301 }
302 }
303
304 static void
305 seg_try_merge_next(seg)
306 struct segment *seg;
307 {
308 struct segment *nseg = seg + 1;
309
310 if (!seg_last_p(seg)
311 && seg->start + seg->length == nseg->start
312 && seg->protection == nseg->protection
313 && seg->mapped_fd == nseg->mapped_fd
314 && ADJ_OFFSET(seg->file_offset, seg->length) == nseg->file_offset) {
315 /* can merge with the next segment */
316 #ifdef DEBUG
317 fprintf(stderr,
318 ";;; seg_try_merge: Merged 0x%08x[0x%08x] with 0x%08x[0x%08x]\n",
319 seg->start, seg->length, nseg->start, nseg->length);
320 #endif
321
322 if (((long) nseg->start & SPARSE_SIZE_MASK) != 0) {
323 /*
324 * if not on a block boundary, we have to ensure both parts
325 * of a common block are in a known state
326 */
327 seg_force_resident(seg, nseg->start - 1, 1);
328 seg_force_resident(nseg, nseg->start, 1);
329 }
330
331 seg->length += nseg->length;
332 seg_destroy(nseg);
333 }
334 }
335
336
337 /*
338 * Try to merge seg with adjacent segments.
339 */
340 static void
341 seg_try_merge_adjacent(seg)
342 struct segment *seg;
343 {
344 if (!seg_last_p(seg))
345 seg_try_merge_next(seg);
346 if (seg > segments)
347 seg_try_merge_next(seg - 1);
348 }
349
350 static struct segment *
351 seg_create(addr, len, protection, mapped_fd, file_offset)
352 os_vm_address_t addr;
353 os_vm_size_t len;
354 int protection;
355 int mapped_fd;
356 {
357 struct segment *seg =
358
359 seg_create_nomerge(addr, len, protection, mapped_fd, file_offset);
360 if (seg != NULL)
361 seg_try_merge_adjacent(seg);
362 return seg;
363 }
364
365 /*
366 * Change the attributes of the given range of an existing segment, and return
367 * a segment corresponding to the new bit.
368 */
369 static struct segment *
370 seg_change_range(seg, addr, len, protection, mapped_fd, file_offset)
371 struct segment *seg;
372 os_vm_address_t addr;
373 os_vm_size_t len;
374 int protection;
375 int mapped_fd;
376 {
377 os_vm_address_t end = addr + len;
378
379 if (len == 0)
380 return NULL;
381
382 if (protection != seg->protection
383 || mapped_fd != seg->mapped_fd
384 || file_offset != ADJ_OFFSET(seg->file_offset, addr - seg->start)) {
385 os_vm_size_t old_len = seg->length, seg_offset = (addr - seg->start);
386
387 if (old_len < len + seg_offset) {
388 struct segment *next = seg + 1;
389
390 #ifdef DEBUG
391 fprintf(stderr,
392 ";;; seg_change_range: region 0x%08x[0x%08x] overflows 0x%08x[0x%08x]\n",
393 addr, len, seg->start, old_len);
394 #endif
395
396 while (!seg_last_p(seg) && next->start + next->length <= end) {
397 #ifdef DEBUG
398 fprintf(stderr,
399 ";;; seg_change_range: merging extra segment 0x%08x[0x%08x]\n",
400 next->start, next->length);
401 #endif
402 seg_destroy(next);
403 }
404
405 if (!seg_last_p(seg) && next->start < end) {
406 next->length -= end - next->start;
407 next->start = end;
408 old_len = next->start - seg->start;
409 } else
410 old_len = len + seg_offset;
411
412 #ifdef DEBUG
413 fprintf(stderr,
414 ";;; seg_change_range: extended first seg to 0x%08x[0x%08x]\n",
415 seg->start, old_len);
416 #endif
417 }
418
419 if (seg_offset + len < old_len) {
420 /* add second part of old segment */
421 seg_create_nomerge(end,
422 old_len - (seg_offset + len),
423 seg->protection,
424 seg->mapped_fd,
425 ADJ_OFFSET(seg->file_offset, seg_offset + len));
426
427 #ifdef DEBUG
428 fprintf(stderr,
429 ";;; seg_change_range: Split off end of 0x%08x[0x%08x]: 0x%08x[0x%08x]\n",
430 seg->start, old_len, end, old_len - (seg_offset + len));
431 #endif
432 }
433
434 if (seg_offset == 0) {
435 seg->length = len;
436 seg->protection = protection;
437 seg->mapped_fd = mapped_fd;
438 seg->file_offset = file_offset;
439 } else {
440 /* adjust first part of remaining old segment */
441 seg->length = seg_offset;
442
443 #ifdef DEBUG
444 fprintf(stderr,
445 ";;; seg_change_range: Split off beginning of 0x%08x[0x%08x]: 0x%08x[0x%08x]\n",
446 seg->start, old_len, seg->start, seg_offset);
447 #endif
448
449 /* add new middle segment for new protected region */
450 seg =
451 seg_create_nomerge(addr, len, protection, mapped_fd,
452 file_offset);
453 }
454
455 seg_try_merge_adjacent(seg);
456
457 last_fault = 0;
458 }
459
460 return seg;
461 }
462
463 /* ---------------------------------------------------------------- */
464
465 static os_vm_address_t
466 mapin(addr, len, protection, map_fd, offset, is_readable)
467 os_vm_address_t addr;
468 os_vm_size_t len;
469 int protection;
470 int map_fd;
471 long offset;
472 int is_readable;
473 {
474 os_vm_address_t real;
475 boolean sparse = (len >= SPARSE_BLOCK_SIZE);
476
477 if (offset != OFFSET_NONE
478 && (offset < os_vm_page_size || (offset & (os_vm_page_size - 1)) != 0)) {
479 fprintf(stderr,
480 "mapin: file offset (%d) not multiple of pagesize (%d)\n",
481 offset, os_vm_page_size);
482 }
483
484 if (addr == NULL)
485 len += real_page_size_difference; /* futz around to get an aligned region */
486
487 last_fault = 0;
488 real = (os_vm_address_t)
489 mmap((caddr_t) addr,
490 (long) len,
491 sparse ? (is_readable ? PROT_READ | PROT_EXEC : 0) : protection,
492 (addr == NULL ? 0 : MAP_FIXED) | MAP_PRIVATE,
493 (is_readable || !sparse) ? map_fd : empty_fd,
494 (off_t) (offset == OFFSET_NONE ? 0 : offset));
495
496 if ((long) real == -1) {
497 perror("mapin: mmap");
498 return NULL;
499 }
500
501 if (addr == NULL) {
502 /*
503 * now play around with what the os gave us to make it align by
504 * our standards (which is why we overallocated)
505 */
506 os_vm_size_t overflow;
507
508 addr = os_round_up_to_page(real);
509 if (addr != real)
510 munmap((caddr_t) real, addr - real);
511
512 overflow = real_page_size_difference - (addr - real);
513 if (overflow != 0)
514 munmap((caddr_t) (addr + len - real_page_size_difference),
515 overflow);
516
517 real = addr;
518 }
519
520
521 return real;
522 }
523
524 static os_vm_address_t
525 map_and_remember(addr, len, protection, map_fd, offset, is_readable)
526 os_vm_address_t addr;
527 os_vm_size_t len;
528 int protection;
529 int map_fd;
530 long offset;
531 int is_readable;
532 {
533 os_vm_address_t real =
534
535 mapin(addr, len, protection, map_fd, offset, is_readable);
536
537 if (real != NULL) {
538 struct segment *seg = seg_find(real);
539
540 if (seg != NULL)
541 seg = seg_change_range(seg, real, len, protection, map_fd, offset);
542 else
543 seg = seg_create(real, len, protection, map_fd, offset);
544
545 if (seg == NULL) {
546 munmap((caddr_t) real, len);
547 return NULL;
548 }
549 }
550 #ifdef DEBUG
551 fprintf(stderr,
552 ";;; map_and_remember: 0x%08x[0x%08x] offset: %d, mapped to: %d\n",
553 real, len, offset, map_fd);
554 #endif
555
556 return real;
557 }
558
559 /* ---------------------------------------------------------------- */
560
561 os_vm_address_t
562 os_validate(addr, len)
563 os_vm_address_t addr;
564 os_vm_size_t len;
565 {
566 addr = os_trunc_to_page(addr);
567 len = os_round_up_size_to_page(len);
568
569 #ifdef DEBUG
570 fprintf(stderr, ";;; os_validate: 0x%08x[0x%08x]\n", addr, len);
571 #endif
572
573 if (addr != NULL && collides_with_seg_p(addr, len))
574 return NULL;
575
576 return map_and_remember(addr, len, PROT_DEFAULT, zero_fd, OFFSET_NONE,
577 FALSE);
578 }
579
580 void
581 os_invalidate(addr, len)
582 os_vm_address_t addr;
583 os_vm_size_t len;
584 {
585 struct segment *seg = seg_find(addr);
586
587 addr = os_trunc_to_page(addr);
588 len = os_round_up_size_to_page(len);
589
590 #ifdef DEBUG
591 fprintf(stderr, ";;; os_invalidate: 0x%08x[0x%08x]\n", addr, len);
592 #endif
593
594 if (seg == NULL)
595 fprintf(stderr, "os_invalidate: Unknown segment: 0x%08x[0x%08x]\n",
596 addr, len);
597 else {
598 seg = seg_change_range(seg, addr, len, 0, 0, OFFSET_NONE);
599 if (seg != NULL)
600 seg_destroy(seg);
601
602 last_fault = 0;
603 if (munmap((caddr_t) addr, len) != 0)
604 perror("os_invalidate: munmap");
605 }
606 }
607
608 os_vm_address_t
609 os_map(fd, offset, addr, len)
610 int fd;
611 int offset;
612 os_vm_address_t addr;
613 long len;
614 {
615 addr = os_trunc_to_page(addr);
616 len = os_round_up_size_to_page(len);
617
618 #ifdef DEBUG
619 fprintf(stderr, ";;; os_map: 0x%08x[0x%08x]\n", addr, len);
620 #endif
621
622 return map_and_remember(addr, len, PROT_DEFAULT, fd, offset, TRUE);
623 }
624
625 void
626 os_flush_icache(address, length)
627 os_vm_address_t address;
628 os_vm_size_t length;
629 {
630 #if defined(MACH) && defined(mips)
631 vm_machine_attribute_val_t flush;
632 kern_return_t kr;
633
634 flush = MATTR_VAL_ICACHE_FLUSH;
635
636 kr = vm_machine_attribute(task_self(), address, length,
637 MATTR_CACHE, &flush);
638 if (kr != KERN_SUCCESS)
639 mach_error("Could not flush the instruction cache", kr);
640 #endif
641 }
642
643 void
644 os_protect(addr, len, prot)
645 os_vm_address_t addr;
646 os_vm_size_t len;
647 int prot;
648 {
649 struct segment *seg = seg_find(addr);
650
651 addr = os_trunc_to_page(addr);
652 len = os_round_up_size_to_page(len);
653
654 #ifdef DEBUG
655 fprintf(stderr, ";;; os_protect: 0x%08x[0x%08x]\n", addr, len);
656 #endif
657
658 if (seg != NULL) {
659 int old_prot = seg->protection;
660
661 if (prot != old_prot) {
662 /*
663 * oooooh, sick: we have to make sure all the pages being protected have
664 * faulted in, so they're in a known state...
665 */
666 seg_force_resident(seg, addr, len);
667
668 seg_change_range(seg, addr, len, prot, seg->mapped_fd,
669 seg->file_offset);
670
671 if (mprotect((caddr_t) addr, (long) len, prot) != 0)
672 perror("os_unprotect: mprotect");
673 }
674 } else
675 fprintf(stderr, "os_protect: Unknown segment: 0x%08x[0x%08x]\n", addr,
676 len);
677 }
678
679 boolean
680 valid_addr(test)
681 os_vm_address_t test;
682 {
683 return seg_find(test) != NULL;
684 }
685
686 /* ---------------------------------------------------------------- */
687
688 static boolean
689 maybe_gc(HANDLER_ARGS)
690 {
691 /*
692 * It's necessary to enable recursive SEGVs, since the handle is
693 * used for multiple things (e.g., both gc-trigger & faulting in pages).
694 * We check against recursive gc's though...
695 */
696
697 boolean did_gc;
698 static already_trying = 0;
699
700 if (already_trying)
701 return FALSE;
702
703 SAVE_CONTEXT();
704
705 sigprocmask(SIG_SETMASK, &context->uc_sigmask, 0);
706
707 already_trying = TRUE;
708 did_gc = interrupt_maybe_gc(signal, code, context);
709 already_trying = FALSE;
710
711 return did_gc;
712 }
713
714 /*
715 * The primary point of catching segmentation violations is to allow
716 * read only memory to be re-mapped with more permissions when a write
717 * is attempted. this greatly decreases the residency of the program
718 * in swap space since read only areas don't take up room
719 *
720 * Running into the gc trigger page will also end up here...
721 */
722 void
723 segv_handler(HANDLER_ARGS, caddr_t addr)
724 {
725 SAVE_CONTEXT();
726
727 if (CODE(code) == OS_PROTERR) { /* allow writes to this chunk */
728 struct segment *seg = seg_find(addr);
729
730 if ((caddr_t) last_fault == addr) {
731 if (seg != NULL && maybe_gc(signal, code, context))
732 /* we just garbage collected */
733 return;
734 else {
735 /* a *real* protection fault */
736 fprintf(stderr,
737 "segv_handler: Real protection violation: 0x%08x\n",
738 addr);
739 interrupt_handle_now(signal, code, context);
740 }
741 } else
742 last_fault = (os_vm_address_t) addr;
743
744 if (seg != NULL) {
745 int err;
746
747 /* round down to a page */
748 os_vm_address_t block =
749
750 (os_vm_address_t) ((long) addr & ~SPARSE_SIZE_MASK);
751 os_vm_size_t length = SPARSE_BLOCK_SIZE;
752
753 if (block < seg->start) {
754 length -= (seg->start - block);
755 block = seg->start;
756 }
757 if (block + length > seg->start + seg->length)
758 length = seg->start + seg->length - block;
759
760 #if 0
761 /* unmap it. probably redundant. */
762 if (munmap((caddr_t) block, length) == -1)
763 perror("segv_handler: munmap");
764 #endif
765
766 /* and remap it with more permissions */
767 err = (int)
768 mmap((caddr_t) block,
769 length,
770 seg->protection,
771 MAP_PRIVATE | MAP_FIXED,
772 seg->mapped_fd,
773 seg->file_offset == OFFSET_NONE
774 ? 0 : seg->file_offset + (block - seg->start));
775
776 if (err == -1) {
777 perror("segv_handler: mmap");
778 interrupt_handle_now(signal, code, context);
779 }
780 } else {
781 fprintf(stderr, "segv_handler: 0x%08x not in any segment\n", addr);
782 interrupt_handle_now(signal, code, context);
783 }
784 }
785 /*
786 * note that we check for a gc-trigger hit even if it's not a PROT error
787 */
788 else if (!maybe_gc(signal, code, context)) {
789 static int nomap_count = 0;
790
791 if (CODE(code) == OS_MAPERR) {
792 if (nomap_count == 0) {
793 fprintf(stderr,
794 "segv_handler: No mapping fault: 0x%08x\n", addr);
795 nomap_count++;
796 } else {
797 /*
798 * There should be higher-level protection against stack
799 * overflow somewhere, but at least this prevents infinite
800 * puking of error messages...
801 */
802 fprintf(stderr,
803 "segv_handler: Recursive no mapping fault (stack overflow?)\n");
804 exit(-1);
805 }
806 } else if (OS_HASERRNO(code)) {
807 errno = OS_ERRNO(code);
808 perror("segv_handler: Object error");
809 }
810
811 interrupt_handle_now(signal, code, context);
812
813 if (CODE(code) == OS_MAPERR)
814 nomap_count--;
815 }
816 }
817
818 void
819 os_install_interrupt_handlers(void)
820 {
821 interrupt_install_low_level_handler(SIGSEGV, segv_handler);
822 }
823
824 os_vm_address_t round_up_sparse_size(os_vm_address_t addr)
825 {
826 return (addr + SPARSE_BLOCK_SIZE - 1) & ~SPARSE_SIZE_MASK;
827 }
828
829 /*
830 * An array of the start of the spaces which should have holes placed
831 * after them. Must not include the dynamic spaces because the size
832 * of the dynamic space can be controlled from the command line.
833 */
834 static os_vm_address_t spaces[] = {
835 READ_ONLY_SPACE_START, STATIC_SPACE_START,
836 BINDING_STACK_START, CONTROL_STACK_START
837 };
838
839 /*
840
841 * The corresponding array for the size of each space. Be sure that
842 * the spaces and holes don't overlap! The sizes MUST be on
843 * SPARSE_BLOCK_SIZE boundaries.
844
845 */
846 static unsigned long space_size[] = {
847 READ_ONLY_SPACE_SIZE, STATIC_SPACE_SIZE,
848 BINDING_STACK_SIZE, CONTROL_STACK_SIZE
849 };
850
851 /*
852 * The size of the hole to make. It should be strictly smaller than
853 * SPARSE_BLOCK_SIZE.
854 */
855
856 #define HOLE_SIZE 0x2000
857
858 void
859 make_holes(void)
860 {
861 int k;
862 os_vm_address_t hole;
863
864 /* Make holes of the appropriate size for desired spaces */
865
866 for (k = 0; k < sizeof(spaces) / sizeof(spaces[0]); ++k) {
867
868 hole = spaces[k] + space_size[k];
869
870 if (os_validate(hole, HOLE_SIZE) == NULL) {
871 fprintf(stderr,
872 "ensure_space: Failed to validate hole of %ld bytes at 0x%08X\n",
873 HOLE_SIZE, (unsigned long) hole);
874 exit(1);
875 }
876 /* Make it inaccessible */
877 os_protect(hole, HOLE_SIZE, 0);
878 }
879
880 /* Round up the dynamic_space_size to the nearest SPARSE_BLOCK_SIZE */
881 dynamic_space_size = round_up_sparse_size(dynamic_space_size);
882
883 /* Now make a hole for the dynamic spaces */
884 hole = dynamic_space_size + (os_vm_address_t) dynamic_0_space;
885
886 if (os_validate(hole, HOLE_SIZE) == NULL) {
887 fprintf(stderr,
888 "ensure_space: Failed to validate hold of %ld bytes at 0x%08X\n",
889 HOLE_SIZE, (unsigned long) hole);
890 exit(1);
891 }
892 os_protect(hole, HOLE_SIZE, 0);
893
894 hole = dynamic_space_size + (os_vm_address_t) dynamic_1_space;
895 if (os_validate(hole, HOLE_SIZE) == NULL) {
896 fprintf(stderr,
897 "ensure_space: Failed to validate hole of %ld bytes at 0x%08X\n",
898 HOLE_SIZE, (unsigned long) hole);
899 exit(1);
900 }
901 os_protect(hole, HOLE_SIZE, 0);
902 }

  ViewVC Help
Powered by ViewVC 1.1.5