/[cmucl]/src/lisp/sunos-os.c
ViewVC logotype

Contents of /src/lisp/sunos-os.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.12 - (show annotations)
Mon Feb 1 16:04:43 2010 UTC (4 years, 2 months ago) by rtoy
Branch: MAIN
CVS Tags: sparc-tramp-assem-base, post-merge-intl-branch, intl-branch-working-2010-02-19-1000, release-20b-pre1, release-20b-pre2, sparc-tramp-assem-2010-07-19, intl-2-branch-base, cross-sol-x86-merged, intl-branch-working-2010-02-11-1000, RELEASE_20b, cross-sol-x86-base, snapshot-2010-12, snapshot-2010-11, snapshot-2011-06, snapshot-2011-07, snapshot-2011-04, snapshot-2011-02, snapshot-2011-03, snapshot-2011-01, pre-merge-intl-branch, snapshot-2010-05, snapshot-2010-04, snapshot-2010-07, snapshot-2010-06, snapshot-2010-03, snapshot-2010-08, cross-sol-x86-2010-12-20, intl-branch-2010-03-18-1300, cross-sparc-branch-base, intl-branch-base
Branch point for: cross-sparc-branch, RELEASE-20B-BRANCH, sparc-tramp-assem-branch, intl-branch, cross-sol-x86-branch, intl-2-branch
Changes since 1.11: +2 -2 lines
File MIME type: text/plain
Linux-os.c:
o Follow CMUCL coding style.
o Argv and envp are const char*.

os.h:
lisp.c:
Darwin-os.c:
FreeBSD-os.c:
NetBSD-os.c:
OpenBSD-os.c:
hpux-os.c:
irix-os.c:
mach-os.c:
osf1-os.c:
solaris-os.c:
sunos-os.c:
o argv and envp are const char *.
1 /*
2 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/sunos-os.c,v 1.12 2010/02/01 16:04:43 rtoy Rel $
3 *
4 * OS-dependent routines. This file (along with os.h) exports an
5 * OS-independent interface to the operating system VM facilities.
6 * Suprisingly, this interface looks a lot like the Mach interface
7 * (but simpler in some places). For some operating systems, a subset
8 * of these functions will have to be emulated.
9 *
10 * This is the SunOS version.
11 * March 1991, Miles Bader <miles@cogsci.ed.ack.uk> & ted <ted@edu.NMSU>
12 *
13 */
14
15 /* #define DEBUG */
16
17 #include <stdio.h>
18
19 #include <signal.h>
20 #include <sys/file.h>
21
22 #define OS_PROTERR SEGV_PROT
23 #define OS_MAPERR SEGV_NOMAP
24 #define OS_HASERRNO(code) (SEGV_CODE(code)==SEGV_OBJERR)
25 #define OS_ERRNO(code) SEGV_ERRNO(code)
26 extern int errno;
27
28 #include "os.h"
29 /* To get dynamic_0_space and friends */
30 #include "globals.h"
31 /* To get memory map */
32 #include "sparc-validate.h"
33
34 /* block size must be larger than the system page size */
35 #define SPARSE_BLOCK_SIZE (1<<15)
36 #define SPARSE_SIZE_MASK (SPARSE_BLOCK_SIZE-1)
37
38 #define PROT_DEFAULT OS_VM_PROT_ALL
39
40 #define OFFSET_NONE ((os_vm_offset_t)(~0))
41
42 #define EMPTYFILE "/tmp/empty"
43 #define ZEROFILE "/dev/zero"
44
45 #define INITIAL_MAX_SEGS 32
46 #define GROW_MAX_SEGS 16
47
48 extern char *getenv();
49
50 /* ---------------------------------------------------------------- */
51
52 #define ADJ_OFFSET(off,adj) (((off)==OFFSET_NONE) ? OFFSET_NONE : ((off)+(adj)))
53
54 long os_vm_page_size = (-1);
55 static long os_real_page_size = (-1);
56
57 static struct segment {
58 os_vm_address_t start; /* note: start & length are expected to be on page */
59 os_vm_size_t length; /* boundaries */
60 long file_offset;
61 short mapped_fd;
62 short protection;
63 } *segments;
64
65 static int n_segments = 0, max_segments = 0;
66
67 static int zero_fd = (-1), empty_fd = (-1);
68
69 static os_vm_address_t last_fault = 0;
70 static os_vm_size_t real_page_size_difference = 0;
71
72 static void
73 os_init_bailout(arg)
74 char *arg;
75 {
76 char buf[500];
77
78 sprintf(buf, "os_init: %s", arg);
79 perror(buf);
80 exit(1);
81 }
82
83 void
84 os_init(const char *argv[], const char *envp[])
85 {
86 char *empty_file = getenv("CMUCL_EMPTYFILE");
87
88 if (empty_file == NULL)
89 empty_file = EMPTYFILE;
90
91 empty_fd = open(empty_file, O_RDONLY | O_CREAT);
92 if (empty_fd < 0)
93 os_init_bailout(empty_file);
94 unlink(empty_file);
95
96 zero_fd = open(ZEROFILE, O_RDONLY);
97 if (zero_fd < 0)
98 os_init_bailout(ZEROFILE);
99
100 os_vm_page_size = os_real_page_size = getpagesize();
101
102 max_segments = INITIAL_MAX_SEGS;
103 segments = (struct segment *) malloc(sizeof(struct segment) * max_segments);
104
105 if (segments == NULL) {
106 fprintf(stderr, "os_init: Couldn't allocate %d segment descriptors\n",
107 max_segments);
108 exit(1);
109 }
110
111 if (os_vm_page_size > OS_VM_DEFAULT_PAGESIZE) {
112 fprintf(stderr, "os_init: Pagesize too large (%d > %d)\n",
113 os_vm_page_size, OS_VM_DEFAULT_PAGESIZE);
114 exit(1);
115 } else {
116 /*
117 * we do this because there are apparently dependencies on
118 * the pagesize being OS_VM_DEFAULT_PAGESIZE somewhere...
119 * but since the OS doesn't know we're using this restriction,
120 * we have to grovel around a bit to enforce it, thus anything
121 * that uses real_page_size_difference.
122 */
123 real_page_size_difference = OS_VM_DEFAULT_PAGESIZE - os_vm_page_size;
124 os_vm_page_size = OS_VM_DEFAULT_PAGESIZE;
125 }
126 }
127
128 /* ---------------------------------------------------------------- */
129
130 void
131 seg_force_resident(struct segment *seg, os_vm_address_t addr, os_vm_size_t len)
132 {
133 int prot = seg->protection;
134
135 if (prot != 0) {
136 os_vm_address_t end = addr + len, touch = addr;
137
138 while (touch < end) {
139 int contents = (*(char *) touch);
140
141 if (prot & OS_VM_PROT_WRITE)
142 (*(char *) touch) = contents;
143 touch =
144 (os_vm_address_t) (((long) touch + SPARSE_BLOCK_SIZE) &
145 ~SPARSE_SIZE_MASK);
146 }
147 }
148 }
149
150 static struct segment *
151 seg_create_nomerge(addr, len, protection, mapped_fd, file_offset)
152 os_vm_address_t addr;
153 os_vm_size_t len;
154 int protection;
155 int mapped_fd;
156 {
157 int n;
158 struct segment *seg;
159
160 if (len == 0)
161 return NULL;
162
163 if (n_segments == max_segments) {
164 struct segment *new_segs;
165
166 max_segments += GROW_MAX_SEGS;
167
168 new_segs = (struct segment *)
169 realloc(segments, max_segments * sizeof(struct segment));
170
171 if (new_segs == NULL) {
172 fprintf(stderr,
173 "seg_create_nomerge: Couldn't grow segment descriptor table to %s segments\n",
174 max_segments);
175 max_segments -= GROW_MAX_SEGS;
176 return NULL;
177 }
178
179 segments = new_segs;
180 }
181
182 for (n = n_segments, seg = segments; n > 0; n--, seg++)
183 if (addr < seg->start) {
184 seg = (&segments[n_segments]);
185 while (n-- > 0) {
186 seg[0] = seg[-1];
187 seg--;
188 }
189 break;
190 }
191
192 n_segments++;
193
194 seg->start = addr;
195 seg->length = len;
196 seg->protection = protection;
197 seg->mapped_fd = mapped_fd;
198 seg->file_offset = file_offset;
199
200 return seg;
201 }
202
203 #if 1
204 /* returns the first segment containing addr */
205 static struct segment *
206 seg_find(addr)
207 os_vm_address_t addr;
208 {
209 int n;
210 struct segment *seg;
211
212 for (n = n_segments, seg = segments; n > 0; n--, seg++)
213 if (seg->start <= addr && seg->start + seg->length > addr)
214 return seg;
215
216 return NULL;
217 }
218 #else
219 /* returns the first segment containing addr */
220 static struct segment *
221 seg_find(addr)
222 os_vm_address_t addr;
223 {
224 /* does a binary search */
225 struct segment *lo = segments, *hi = segments + n_segments;
226
227 while (hi > lo) {
228 struct segment *mid = lo + ((hi - lo) >> 1);
229 os_vm_address_t start = mid->start;
230
231 if (addr >= start && addr < start + mid->length)
232 return mid;
233 else if (addr < start)
234 hi = mid;
235 else
236 lo = mid + 1;
237 }
238
239 return NULL;
240 }
241 #endif
242
243 /* returns TRUE if the range from addr to addr+len intersects with any segment */
244 static boolean
245 collides_with_seg_p(addr, len)
246 os_vm_address_t addr;
247 os_vm_size_t len;
248 {
249 int n;
250 struct segment *seg;
251 os_vm_address_t end = addr + len;
252
253 for (n = n_segments, seg = segments; n > 0; n--, seg++)
254 if (seg->start >= end)
255 return FALSE;
256 else if (seg->start + seg->length > addr)
257 return TRUE;
258
259 return FALSE;
260 }
261
262 #if 0 /* WAY to SLOW */
263 /* returns TRUE if the range from addr to addr+len is a valid mapping
264 * (that we don't know about) */
265 static boolean
266 mem_in_use(addr, len)
267 os_vm_address_t addr;
268 os_vm_size_t len;
269 {
270 os_vm_address_t p;
271
272 for (p = addr; addr < addr + len; p += os_real_page_size) {
273 char c;
274
275 if (mincore((caddr_t) p, os_real_page_size, &c) == 0 || errno != ENOMEM)
276 return TRUE;
277 }
278 return FALSE;
279 }
280 #endif
281
282 #define seg_last_p(seg) (((seg)-segments)>=n_segments-1)
283
284 static void
285 seg_destroy(seg)
286 struct segment *seg;
287 {
288 if (seg != NULL) {
289 int n;
290
291 for (n = seg - segments + 1; n < n_segments; n++) {
292 seg[0] = seg[1];
293 seg++;
294 }
295
296 n_segments--;
297 }
298 }
299
300 static void
301 seg_try_merge_next(seg)
302 struct segment *seg;
303 {
304 struct segment *nseg = seg + 1;
305
306 if (!seg_last_p(seg)
307 && seg->start + seg->length == nseg->start
308 && seg->protection == nseg->protection
309 && seg->mapped_fd == nseg->mapped_fd
310 && ADJ_OFFSET(seg->file_offset, seg->length) == nseg->file_offset) {
311 /* can merge with the next segment */
312 #ifdef DEBUG
313 fprintf(stderr,
314 ";;; seg_try_merge: Merged 0x%08x[0x%08x] with 0x%08x[0x%08x]\n",
315 seg->start, seg->length, nseg->start, nseg->length);
316 #endif
317
318 if (((long) nseg->start & SPARSE_SIZE_MASK) != 0) {
319 /*
320 * if not on a block boundary, we have to ensure both parts
321 * of a common block are in a known state
322 */
323 seg_force_resident(seg, nseg->start - 1, 1);
324 seg_force_resident(nseg, nseg->start, 1);
325 }
326
327 seg->length += nseg->length;
328 seg_destroy(nseg);
329 }
330 }
331
332
333 /*
334 * Try to merge seg with adjacent segments.
335 */
336 static void
337 seg_try_merge_adjacent(seg)
338 struct segment *seg;
339 {
340 if (!seg_last_p(seg))
341 seg_try_merge_next(seg);
342 if (seg > segments)
343 seg_try_merge_next(seg - 1);
344 }
345
346 static struct segment *
347 seg_create(addr, len, protection, mapped_fd, file_offset)
348 os_vm_address_t addr;
349 os_vm_size_t len;
350 int protection;
351 int mapped_fd;
352 {
353 struct segment *seg =
354
355 seg_create_nomerge(addr, len, protection, mapped_fd, file_offset);
356 if (seg != NULL)
357 seg_try_merge_adjacent(seg);
358 return seg;
359 }
360
361 /*
362 * Change the attributes of the given range of an existing segment, and return
363 * a segment corresponding to the new bit.
364 */
365 static struct segment *
366 seg_change_range(seg, addr, len, protection, mapped_fd, file_offset)
367 struct segment *seg;
368 os_vm_address_t addr;
369 os_vm_size_t len;
370 int protection;
371 int mapped_fd;
372 {
373 os_vm_address_t end = addr + len;
374
375 if (len == 0)
376 return NULL;
377
378 if (protection != seg->protection
379 || mapped_fd != seg->mapped_fd
380 || file_offset != ADJ_OFFSET(seg->file_offset, addr - seg->start)) {
381 os_vm_size_t old_len = seg->length, seg_offset = (addr - seg->start);
382
383 if (old_len < len + seg_offset) {
384 struct segment *next = seg + 1;
385
386 #ifdef DEBUG
387 fprintf(stderr,
388 ";;; seg_change_range: region 0x%08x[0x%08x] overflows 0x%08x[0x%08x]\n",
389 addr, len, seg->start, old_len);
390 #endif
391
392 while (!seg_last_p(seg) && next->start + next->length <= end) {
393 #ifdef DEBUG
394 fprintf(stderr,
395 ";;; seg_change_range: merging extra segment 0x%08x[0x%08x]\n",
396 next->start, next->length);
397 #endif
398 seg_destroy(next);
399 }
400
401 if (!seg_last_p(seg) && next->start < end) {
402 next->length -= end - next->start;
403 next->start = end;
404 old_len = next->start - seg->start;
405 } else
406 old_len = len + seg_offset;
407
408 #ifdef DEBUG
409 fprintf(stderr,
410 ";;; seg_change_range: extended first seg to 0x%08x[0x%08x]\n",
411 seg->start, old_len);
412 #endif
413 }
414
415 if (seg_offset + len < old_len) {
416 /* add second part of old segment */
417 seg_create_nomerge(end,
418 old_len - (seg_offset + len),
419 seg->protection,
420 seg->mapped_fd,
421 ADJ_OFFSET(seg->file_offset, seg_offset + len));
422
423 #ifdef DEBUG
424 fprintf(stderr,
425 ";;; seg_change_range: Split off end of 0x%08x[0x%08x]: 0x%08x[0x%08x]\n",
426 seg->start, old_len, end, old_len - (seg_offset + len));
427 #endif
428 }
429
430 if (seg_offset == 0) {
431 seg->length = len;
432 seg->protection = protection;
433 seg->mapped_fd = mapped_fd;
434 seg->file_offset = file_offset;
435 } else {
436 /* adjust first part of remaining old segment */
437 seg->length = seg_offset;
438
439 #ifdef DEBUG
440 fprintf(stderr,
441 ";;; seg_change_range: Split off beginning of 0x%08x[0x%08x]: 0x%08x[0x%08x]\n",
442 seg->start, old_len, seg->start, seg_offset);
443 #endif
444
445 /* add new middle segment for new protected region */
446 seg =
447 seg_create_nomerge(addr, len, protection, mapped_fd,
448 file_offset);
449 }
450
451 seg_try_merge_adjacent(seg);
452
453 last_fault = 0;
454 }
455
456 return seg;
457 }
458
459 /* ---------------------------------------------------------------- */
460
461 static os_vm_address_t
462 mapin(addr, len, protection, map_fd, offset, is_readable)
463 os_vm_address_t addr;
464 os_vm_size_t len;
465 int protection;
466 int map_fd;
467 long offset;
468 int is_readable;
469 {
470 os_vm_address_t real;
471 boolean sparse = (len >= SPARSE_BLOCK_SIZE);
472
473 if (offset != OFFSET_NONE
474 && (offset < os_vm_page_size || (offset & (os_vm_page_size - 1)) != 0)) {
475 fprintf(stderr,
476 "mapin: file offset (%d) not multiple of pagesize (%d)\n",
477 offset, os_vm_page_size);
478 }
479
480 if (addr == NULL)
481 len += real_page_size_difference; /* futz around to get an aligned region */
482
483 last_fault = 0;
484 real = (os_vm_address_t)
485 mmap((caddr_t) addr,
486 (long) len,
487 sparse ? (is_readable ? PROT_READ | PROT_EXEC : 0) : protection,
488 (addr == NULL ? 0 : MAP_FIXED) | MAP_PRIVATE,
489 (is_readable || !sparse) ? map_fd : empty_fd,
490 (off_t) (offset == OFFSET_NONE ? 0 : offset));
491
492 if ((long) real == -1) {
493 perror("mapin: mmap");
494 return NULL;
495 }
496
497 if (addr == NULL) {
498 /*
499 * now play around with what the os gave us to make it align by
500 * our standards (which is why we overallocated)
501 */
502 os_vm_size_t overflow;
503
504 addr = os_round_up_to_page(real);
505 if (addr != real)
506 munmap((caddr_t) real, addr - real);
507
508 overflow = real_page_size_difference - (addr - real);
509 if (overflow != 0)
510 munmap((caddr_t) (addr + len - real_page_size_difference),
511 overflow);
512
513 real = addr;
514 }
515
516
517 return real;
518 }
519
520 static os_vm_address_t
521 map_and_remember(addr, len, protection, map_fd, offset, is_readable)
522 os_vm_address_t addr;
523 os_vm_size_t len;
524 int protection;
525 int map_fd;
526 long offset;
527 int is_readable;
528 {
529 os_vm_address_t real =
530
531 mapin(addr, len, protection, map_fd, offset, is_readable);
532
533 if (real != NULL) {
534 struct segment *seg = seg_find(real);
535
536 if (seg != NULL)
537 seg = seg_change_range(seg, real, len, protection, map_fd, offset);
538 else
539 seg = seg_create(real, len, protection, map_fd, offset);
540
541 if (seg == NULL) {
542 munmap((caddr_t) real, len);
543 return NULL;
544 }
545 }
546 #ifdef DEBUG
547 fprintf(stderr,
548 ";;; map_and_remember: 0x%08x[0x%08x] offset: %d, mapped to: %d\n",
549 real, len, offset, map_fd);
550 #endif
551
552 return real;
553 }
554
555 /* ---------------------------------------------------------------- */
556
557 os_vm_address_t
558 os_validate(addr, len)
559 os_vm_address_t addr;
560 os_vm_size_t len;
561 {
562 addr = os_trunc_to_page(addr);
563 len = os_round_up_size_to_page(len);
564
565 #ifdef DEBUG
566 fprintf(stderr, ";;; os_validate: 0x%08x[0x%08x]\n", addr, len);
567 #endif
568
569 if (addr != NULL && collides_with_seg_p(addr, len))
570 return NULL;
571
572 return map_and_remember(addr, len, PROT_DEFAULT, zero_fd, OFFSET_NONE,
573 FALSE);
574 }
575
576 void
577 os_invalidate(addr, len)
578 os_vm_address_t addr;
579 os_vm_size_t len;
580 {
581 struct segment *seg = seg_find(addr);
582
583 addr = os_trunc_to_page(addr);
584 len = os_round_up_size_to_page(len);
585
586 #ifdef DEBUG
587 fprintf(stderr, ";;; os_invalidate: 0x%08x[0x%08x]\n", addr, len);
588 #endif
589
590 if (seg == NULL)
591 fprintf(stderr, "os_invalidate: Unknown segment: 0x%08x[0x%08x]\n",
592 addr, len);
593 else {
594 seg = seg_change_range(seg, addr, len, 0, 0, OFFSET_NONE);
595 if (seg != NULL)
596 seg_destroy(seg);
597
598 last_fault = 0;
599 if (munmap((caddr_t) addr, len) != 0)
600 perror("os_invalidate: munmap");
601 }
602 }
603
604 os_vm_address_t
605 os_map(fd, offset, addr, len)
606 int fd;
607 int offset;
608 os_vm_address_t addr;
609 long len;
610 {
611 addr = os_trunc_to_page(addr);
612 len = os_round_up_size_to_page(len);
613
614 #ifdef DEBUG
615 fprintf(stderr, ";;; os_map: 0x%08x[0x%08x]\n", addr, len);
616 #endif
617
618 return map_and_remember(addr, len, PROT_DEFAULT, fd, offset, TRUE);
619 }
620
621 void
622 os_flush_icache(address, length)
623 os_vm_address_t address;
624 os_vm_size_t length;
625 {
626 #if defined(MACH) && defined(mips)
627 vm_machine_attribute_val_t flush;
628 kern_return_t kr;
629
630 flush = MATTR_VAL_ICACHE_FLUSH;
631
632 kr = vm_machine_attribute(task_self(), address, length,
633 MATTR_CACHE, &flush);
634 if (kr != KERN_SUCCESS)
635 mach_error("Could not flush the instruction cache", kr);
636 #endif
637 }
638
639 void
640 os_protect(addr, len, prot)
641 os_vm_address_t addr;
642 os_vm_size_t len;
643 int prot;
644 {
645 struct segment *seg = seg_find(addr);
646
647 addr = os_trunc_to_page(addr);
648 len = os_round_up_size_to_page(len);
649
650 #ifdef DEBUG
651 fprintf(stderr, ";;; os_protect: 0x%08x[0x%08x]\n", addr, len);
652 #endif
653
654 if (seg != NULL) {
655 int old_prot = seg->protection;
656
657 if (prot != old_prot) {
658 /*
659 * oooooh, sick: we have to make sure all the pages being protected have
660 * faulted in, so they're in a known state...
661 */
662 seg_force_resident(seg, addr, len);
663
664 seg_change_range(seg, addr, len, prot, seg->mapped_fd,
665 seg->file_offset);
666
667 if (mprotect((caddr_t) addr, (long) len, prot) != 0)
668 perror("os_unprotect: mprotect");
669 }
670 } else
671 fprintf(stderr, "os_protect: Unknown segment: 0x%08x[0x%08x]\n", addr,
672 len);
673 }
674
675 boolean
676 valid_addr(test)
677 os_vm_address_t test;
678 {
679 return seg_find(test) != NULL;
680 }
681
682 /* ---------------------------------------------------------------- */
683
684 static boolean
685 maybe_gc(HANDLER_ARGS)
686 {
687 /*
688 * It's necessary to enable recursive SEGVs, since the handle is
689 * used for multiple things (e.g., both gc-trigger & faulting in pages).
690 * We check against recursive gc's though...
691 */
692
693 boolean did_gc;
694 static already_trying = 0;
695
696 if (already_trying)
697 return FALSE;
698
699 SAVE_CONTEXT();
700
701 sigprocmask(SIG_SETMASK, &context->uc_sigmask, 0);
702
703 already_trying = TRUE;
704 did_gc = interrupt_maybe_gc(signal, code, context);
705 already_trying = FALSE;
706
707 return did_gc;
708 }
709
710 /*
711 * The primary point of catching segmentation violations is to allow
712 * read only memory to be re-mapped with more permissions when a write
713 * is attempted. this greatly decreases the residency of the program
714 * in swap space since read only areas don't take up room
715 *
716 * Running into the gc trigger page will also end up here...
717 */
718 void
719 segv_handler(HANDLER_ARGS, caddr_t addr)
720 {
721 SAVE_CONTEXT();
722
723 if (CODE(code) == OS_PROTERR) { /* allow writes to this chunk */
724 struct segment *seg = seg_find(addr);
725
726 if ((caddr_t) last_fault == addr) {
727 if (seg != NULL && maybe_gc(signal, code, context))
728 /* we just garbage collected */
729 return;
730 else {
731 /* a *real* protection fault */
732 fprintf(stderr,
733 "segv_handler: Real protection violation: 0x%08x\n",
734 addr);
735 interrupt_handle_now(signal, code, context);
736 }
737 } else
738 last_fault = (os_vm_address_t) addr;
739
740 if (seg != NULL) {
741 int err;
742
743 /* round down to a page */
744 os_vm_address_t block =
745
746 (os_vm_address_t) ((long) addr & ~SPARSE_SIZE_MASK);
747 os_vm_size_t length = SPARSE_BLOCK_SIZE;
748
749 if (block < seg->start) {
750 length -= (seg->start - block);
751 block = seg->start;
752 }
753 if (block + length > seg->start + seg->length)
754 length = seg->start + seg->length - block;
755
756 #if 0
757 /* unmap it. probably redundant. */
758 if (munmap((caddr_t) block, length) == -1)
759 perror("segv_handler: munmap");
760 #endif
761
762 /* and remap it with more permissions */
763 err = (int)
764 mmap((caddr_t) block,
765 length,
766 seg->protection,
767 MAP_PRIVATE | MAP_FIXED,
768 seg->mapped_fd,
769 seg->file_offset == OFFSET_NONE
770 ? 0 : seg->file_offset + (block - seg->start));
771
772 if (err == -1) {
773 perror("segv_handler: mmap");
774 interrupt_handle_now(signal, code, context);
775 }
776 } else {
777 fprintf(stderr, "segv_handler: 0x%08x not in any segment\n", addr);
778 interrupt_handle_now(signal, code, context);
779 }
780 }
781 /*
782 * note that we check for a gc-trigger hit even if it's not a PROT error
783 */
784 else if (!maybe_gc(signal, code, context)) {
785 static int nomap_count = 0;
786
787 if (CODE(code) == OS_MAPERR) {
788 if (nomap_count == 0) {
789 fprintf(stderr,
790 "segv_handler: No mapping fault: 0x%08x\n", addr);
791 nomap_count++;
792 } else {
793 /*
794 * There should be higher-level protection against stack
795 * overflow somewhere, but at least this prevents infinite
796 * puking of error messages...
797 */
798 fprintf(stderr,
799 "segv_handler: Recursive no mapping fault (stack overflow?)\n");
800 exit(-1);
801 }
802 } else if (OS_HASERRNO(code)) {
803 errno = OS_ERRNO(code);
804 perror("segv_handler: Object error");
805 }
806
807 interrupt_handle_now(signal, code, context);
808
809 if (CODE(code) == OS_MAPERR)
810 nomap_count--;
811 }
812 }
813
814 void
815 os_install_interrupt_handlers(void)
816 {
817 interrupt_install_low_level_handler(SIGSEGV, segv_handler);
818 }
819
820 os_vm_address_t round_up_sparse_size(os_vm_address_t addr)
821 {
822 return (addr + SPARSE_BLOCK_SIZE - 1) & ~SPARSE_SIZE_MASK;
823 }
824
825 /*
826 * An array of the start of the spaces which should have holes placed
827 * after them. Must not include the dynamic spaces because the size
828 * of the dynamic space can be controlled from the command line.
829 */
830 static os_vm_address_t spaces[] = {
831 READ_ONLY_SPACE_START, STATIC_SPACE_START,
832 BINDING_STACK_START, CONTROL_STACK_START
833 };
834
835 /*
836
837 * The corresponding array for the size of each space. Be sure that
838 * the spaces and holes don't overlap! The sizes MUST be on
839 * SPARSE_BLOCK_SIZE boundaries.
840
841 */
842 static unsigned long space_size[] = {
843 READ_ONLY_SPACE_SIZE, STATIC_SPACE_SIZE,
844 BINDING_STACK_SIZE, CONTROL_STACK_SIZE
845 };
846
847 /*
848 * The size of the hole to make. It should be strictly smaller than
849 * SPARSE_BLOCK_SIZE.
850 */
851
852 #define HOLE_SIZE 0x2000
853
854 void
855 make_holes(void)
856 {
857 int k;
858 os_vm_address_t hole;
859
860 /* Make holes of the appropriate size for desired spaces */
861
862 for (k = 0; k < sizeof(spaces) / sizeof(spaces[0]); ++k) {
863
864 hole = spaces[k] + space_size[k];
865
866 if (os_validate(hole, HOLE_SIZE) == NULL) {
867 fprintf(stderr,
868 "ensure_space: Failed to validate hole of %ld bytes at 0x%08X\n",
869 HOLE_SIZE, (unsigned long) hole);
870 exit(1);
871 }
872 /* Make it inaccessible */
873 os_protect(hole, HOLE_SIZE, 0);
874 }
875
876 /* Round up the dynamic_space_size to the nearest SPARSE_BLOCK_SIZE */
877 dynamic_space_size = round_up_sparse_size(dynamic_space_size);
878
879 /* Now make a hole for the dynamic spaces */
880 hole = dynamic_space_size + (os_vm_address_t) dynamic_0_space;
881
882 if (os_validate(hole, HOLE_SIZE) == NULL) {
883 fprintf(stderr,
884 "ensure_space: Failed to validate hold of %ld bytes at 0x%08X\n",
885 HOLE_SIZE, (unsigned long) hole);
886 exit(1);
887 }
888 os_protect(hole, HOLE_SIZE, 0);
889
890 hole = dynamic_space_size + (os_vm_address_t) dynamic_1_space;
891 if (os_validate(hole, HOLE_SIZE) == NULL) {
892 fprintf(stderr,
893 "ensure_space: Failed to validate hole of %ld bytes at 0x%08X\n",
894 HOLE_SIZE, (unsigned long) hole);
895 exit(1);
896 }
897 os_protect(hole, HOLE_SIZE, 0);
898 }

  ViewVC Help
Powered by ViewVC 1.1.5