/[cmucl]/src/lisp/sunos-os.c
ViewVC logotype

Contents of /src/lisp/sunos-os.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.2 - (show annotations)
Mon Oct 24 19:17:10 1994 UTC (19 years, 5 months ago) by ram
Branch: MAIN
Changes since 1.1: +74 -14 lines
File MIME type: text/plain
Dynamic segment limit patch.
1 /*
2 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/sunos-os.c,v 1.2 1994/10/24 19:17:10 ram Exp $
3 *
4 * OS-dependent routines. This file (along with os.h) exports an
5 * OS-independent interface to the operating system VM facilities.
6 * Suprisingly, this interface looks a lot like the Mach interface
7 * (but simpler in some places). For some operating systems, a subset
8 * of these functions will have to be emulated.
9 *
10 * This is the SunOS version.
11 * March 1991, Miles Bader <miles@cogsci.ed.ack.uk> & ted <ted@edu.NMSU>
12 *
13 */
14
15 /* #define DEBUG */
16
17 #include <stdio.h>
18
19 #include <signal.h>
20 #include <sys/file.h>
21
22 #include "os.h"
23
24 /* block size must be larger than the system page size */
25 #define SPARSE_BLOCK_SIZE (1<<15)
26 #define SPARSE_SIZE_MASK (SPARSE_BLOCK_SIZE-1)
27
28 #define PROT_DEFAULT OS_VM_PROT_ALL
29
30 #define OFFSET_NONE ((os_vm_offset_t)(~0))
31
32 #define EMPTYFILE "/tmp/empty"
33 #define ZEROFILE "/dev/zero"
34
35 #define INITIAL_MAX_SEGS 32
36 #define GROW_MAX_SEGS 16
37
38 extern char *getenv();
39
40 /* ---------------------------------------------------------------- */
41
42 #define ADJ_OFFSET(off,adj) (((off)==OFFSET_NONE) ? OFFSET_NONE : ((off)+(adj)))
43
44 long os_vm_page_size=(-1);
45
46 static struct segment {
47 os_vm_address_t start; /* note: start & length are expected to be on page */
48 os_vm_size_t length; /* boundaries */
49 long file_offset;
50 short mapped_fd;
51 short protection;
52 } *segments;
53
54 static int n_segments=0, max_segments=0;
55
56 static int zero_fd=(-1), empty_fd=(-1);
57
58 static os_vm_address_t last_fault=0;
59 static os_vm_size_t real_page_size_difference=0;
60
61 static void os_init_bailout(arg)
62 char *arg;
63 {
64 char buf[500];
65 sprintf(buf,"os_init: %s",arg);
66 perror(buf);
67 exit(1);
68 }
69
70 void os_init()
71 {
72 char *empty_file=getenv("CMUCL_EMPTYFILE");
73
74 if(empty_file==NULL)
75 empty_file=EMPTYFILE;
76
77 empty_fd=open(empty_file,O_RDONLY|O_CREAT);
78 if(empty_fd<0)
79 os_init_bailout(empty_file);
80 unlink(empty_file);
81
82 zero_fd=open(ZEROFILE,O_RDONLY);
83 if(zero_fd<0)
84 os_init_bailout(ZEROFILE);
85
86 os_vm_page_size=getpagesize();
87
88 max_segments=INITIAL_MAX_SEGS;
89 segments=(struct segment *)malloc(sizeof(struct segment)*max_segments);
90 if(segments==NULL){
91 fprintf(stderr,"os_init: Couldn't allocate %d segment descriptors\n",
92 max_segments);
93 exit(1);
94 }
95
96 if(os_vm_page_size>OS_VM_DEFAULT_PAGESIZE){
97 fprintf(stderr,"os_init: Pagesize too large (%d > %d)\n",
98 os_vm_page_size,OS_VM_DEFAULT_PAGESIZE);
99 exit(1);
100 }else{
101 /*
102 * we do this because there are apparently dependencies on
103 * the pagesize being OS_VM_DEFAULT_PAGESIZE somewhere...
104 * but since the OS doesn't know we're using this restriction,
105 * we have to grovel around a bit to enforce it, thus anything
106 * that uses real_page_size_difference.
107 */
108 real_page_size_difference=OS_VM_DEFAULT_PAGESIZE-os_vm_page_size;
109 os_vm_page_size=OS_VM_DEFAULT_PAGESIZE;
110 }
111 }
112
113 /* ---------------------------------------------------------------- */
114
115 void seg_force_resident(struct segment *seg,
116 os_vm_address_t addr,
117 os_vm_size_t len)
118 {
119 int prot=seg->protection;
120
121 if(prot!=0){
122 os_vm_address_t end=addr+len, touch=addr;
123
124 while(touch<end){
125 int contents=(*(char *)touch);
126 if(prot&OS_VM_PROT_WRITE)
127 (*(char *)touch)=contents;
128 touch=(os_vm_address_t)(((long)touch+SPARSE_BLOCK_SIZE)&~SPARSE_SIZE_MASK);
129 }
130 }
131 }
132
133 static struct segment *seg_create_nomerge(addr,len,protection,mapped_fd,file_offset)
134 os_vm_address_t addr;
135 os_vm_size_t len;
136 int protection;
137 int mapped_fd;
138 {
139 int n;
140 struct segment *seg;
141
142 if(len==0)
143 return NULL;
144
145 if(n_segments==max_segments){
146 struct segment *new_segs;
147
148 max_segments+=GROW_MAX_SEGS;
149
150 new_segs=(struct segment *)
151 realloc(segments,max_segments*sizeof(struct segment));
152
153 if(new_segs==NULL){
154 fprintf(stderr,
155 "seg_create_nomerge: Couldn't grow segment descriptor table to %s segments\n",
156 max_segments);
157 max_segments-=GROW_MAX_SEGS;
158 return NULL;
159 }
160
161 segments=new_segs;
162 }
163
164 for(n=n_segments, seg=segments; n>0; n--, seg++)
165 if(addr<seg->start){
166 seg=(&segments[n_segments]);
167 while(n-->0){
168 seg[0]=seg[-1];
169 seg--;
170 }
171 break;
172 }
173
174 n_segments++;
175
176 seg->start=addr;
177 seg->length=len;
178 seg->protection=protection;
179 seg->mapped_fd=mapped_fd;
180 seg->file_offset=file_offset;
181
182 return seg;
183 }
184
185 #if 1
186 /* returns the first segment containing addr */
187 static struct segment *seg_find(addr)
188 os_vm_address_t addr;
189 {
190 int n;
191 struct segment *seg;
192
193 for(n=n_segments, seg=segments; n>0; n--, seg++)
194 if(seg->start<=addr && seg->start+seg->length>addr)
195 return seg;
196
197 return NULL;
198 }
199 #else
200 /* returns the first segment containing addr */
201 static struct segment *seg_find(addr)
202 os_vm_address_t addr;
203 {
204 /* does a binary search */
205 struct segment *lo=segments, *hi=segments+n_segments;
206
207 while(hi>lo){
208 struct segment *mid=lo+((hi-lo)>>1);
209 os_vm_address_t start=mid->start;
210
211 if(addr>=start && addr<start+mid->length)
212 return mid;
213 else if(addr<start)
214 hi=mid;
215 else
216 lo=mid+1;
217 }
218
219 return NULL;
220 }
221 #endif
222
223 /* returns TRUE if the range from addr to addr+len intersects with any segment */
224 static boolean collides_with_seg_p(addr,len)
225 os_vm_address_t addr;
226 os_vm_size_t len;
227 {
228 int n;
229 struct segment *seg;
230 os_vm_address_t end=addr+len;
231
232 for(n=n_segments, seg=segments; n>0; n--, seg++)
233 if(seg->start>=end)
234 return FALSE;
235 else if(seg->start+seg->length>addr)
236 return TRUE;
237
238 return FALSE;
239 }
240
241 #define seg_last_p(seg) (((seg)-segments)>=n_segments-1)
242
243 static void seg_destroy(seg)
244 struct segment *seg;
245 {
246 if(seg!=NULL){
247 int n;
248
249 for(n=seg-segments+1; n<n_segments; n++){
250 seg[0]=seg[1];
251 seg++;
252 }
253
254 n_segments--;
255 }
256 }
257
258 static void seg_try_merge_next(seg)
259 struct segment *seg;
260 {
261 struct segment *nseg=seg+1;
262
263 if(!seg_last_p(seg)
264 && seg->start+seg->length==nseg->start
265 && seg->protection==nseg->protection
266 && seg->mapped_fd==nseg->mapped_fd
267 && ADJ_OFFSET(seg->file_offset,seg->length)==nseg->file_offset)
268 {
269 /* can merge with the next segment */
270 #ifdef DEBUG
271 fprintf(stderr,
272 ";;; seg_try_merge: Merged 0x%08x[0x%08x] with 0x%08x[0x%08x]\n",
273 seg->start,seg->length,nseg->start,nseg->length);
274 #endif
275
276 if(((long)nseg->start&SPARSE_SIZE_MASK)!=0){
277 /*
278 * if not on a block boundary, we have to ensure both parts
279 * of a common block are in a known state
280 */
281 seg_force_resident(seg,nseg->start-1,1);
282 seg_force_resident(nseg,nseg->start,1);
283 }
284
285 seg->length+=nseg->length;
286 seg_destroy(nseg);
287 }
288 }
289
290
291 /*
292 * Try to merge seg with adjacent segments.
293 */
294 static void seg_try_merge_adjacent(seg)
295 struct segment *seg;
296 {
297 if(!seg_last_p(seg))
298 seg_try_merge_next(seg);
299 if(seg>segments)
300 seg_try_merge_next(seg-1);
301 }
302
303 static struct segment *seg_create(addr,len,protection,mapped_fd,file_offset)
304 os_vm_address_t addr;
305 os_vm_size_t len;
306 int protection;
307 int mapped_fd;
308 {
309 struct segment *seg=seg_create_nomerge(addr,len,protection,mapped_fd,file_offset);
310 if(seg!=NULL)
311 seg_try_merge_adjacent(seg);
312 return seg;
313 }
314
315 /*
316 * Change the attributes of the given range of an existing segment, and return
317 * a segment corresponding to the new bit.
318 */
319 static struct segment *seg_change_range(seg,addr,len,protection,mapped_fd,file_offset)
320 struct segment *seg;
321 os_vm_address_t addr;
322 os_vm_size_t len;
323 int protection;
324 int mapped_fd;
325 {
326 os_vm_address_t end=addr+len;
327
328 if(len==0)
329 return NULL;
330
331 if(protection!=seg->protection
332 || mapped_fd!=seg->mapped_fd
333 || file_offset!=ADJ_OFFSET(seg->file_offset,addr-seg->start))
334 {
335 os_vm_size_t old_len=seg->length, seg_offset=(addr-seg->start);
336
337 if(old_len<len+seg_offset){
338 struct segment *next=seg+1;
339
340 #ifdef DEBUG
341 fprintf(stderr,
342 ";;; seg_change_range: region 0x%08x[0x%08x] overflows 0x%08x[0x%08x]\n",
343 addr,len,
344 seg->start,old_len);
345 #endif
346
347 while(!seg_last_p(seg) && next->start+next->length<=end){
348 #ifdef DEBUG
349 fprintf(stderr,
350 ";;; seg_change_range: merging extra segment 0x%08x[0x%08x]\n",
351 next->start,
352 next->length);
353 #endif
354 seg_destroy(next);
355 }
356
357 if(!seg_last_p(seg) && next->start<end){
358 next->length-=end-next->start;
359 next->start=end;
360 old_len=next->start-seg->start;
361 }else
362 old_len=len+seg_offset;
363
364 #ifdef DEBUG
365 fprintf(stderr,
366 ";;; seg_change_range: extended first seg to 0x%08x[0x%08x]\n",
367 seg->start,
368 old_len);
369 #endif
370 }
371
372 if(seg_offset+len<old_len){
373 /* add second part of old segment */
374 seg_create_nomerge(end,
375 old_len-(seg_offset+len),
376 seg->protection,
377 seg->mapped_fd,
378 ADJ_OFFSET(seg->file_offset,seg_offset+len));
379
380 #ifdef DEBUG
381 fprintf(stderr,
382 ";;; seg_change_range: Split off end of 0x%08x[0x%08x]: 0x%08x[0x%08x]\n",
383 seg->start,old_len,
384 end,old_len-(seg_offset+len));
385 #endif
386 }
387
388 if(seg_offset==0){
389 seg->length=len;
390 seg->protection=protection;
391 seg->mapped_fd=mapped_fd;
392 seg->file_offset=file_offset;
393 }else{
394 /* adjust first part of remaining old segment */
395 seg->length=seg_offset;
396
397 #ifdef DEBUG
398 fprintf(stderr,
399 ";;; seg_change_range: Split off beginning of 0x%08x[0x%08x]: 0x%08x[0x%08x]\n",
400 seg->start,old_len,
401 seg->start,seg_offset);
402 #endif
403
404 /* add new middle segment for new protected region */
405 seg=seg_create_nomerge(addr,len,protection,mapped_fd,file_offset);
406 }
407
408 seg_try_merge_adjacent(seg);
409
410 last_fault=0;
411 }
412
413 return seg;
414 }
415
416 /* ---------------------------------------------------------------- */
417
418 static os_vm_address_t mapin(addr,len,protection,map_fd,offset,is_readable)
419 os_vm_address_t addr;
420 os_vm_size_t len;
421 int protection;
422 int map_fd;
423 long offset;
424 int is_readable;
425 {
426 os_vm_address_t real;
427 boolean sparse=(len>=SPARSE_BLOCK_SIZE);
428
429 if(offset!=OFFSET_NONE
430 && (offset<os_vm_page_size || (offset&(os_vm_page_size-1))!=0))
431 {
432 fprintf(stderr,
433 "mapin: file offset (%d) not multiple of pagesize (%d)\n",
434 offset,
435 os_vm_page_size);
436 }
437
438 if(addr==NULL)
439 len+=real_page_size_difference; /* futz around to get an aligned region */
440
441 last_fault=0;
442 real=(os_vm_address_t)
443 mmap((caddr_t)addr,
444 (long)len,
445 sparse ? (is_readable ? PROT_READ|PROT_EXEC : 0) : protection,
446 (addr==NULL? 0 : MAP_FIXED)|MAP_PRIVATE,
447 (is_readable || !sparse) ? map_fd : empty_fd,
448 (off_t)(offset==OFFSET_NONE ? 0 : offset));
449
450 if((long)real==-1){
451 perror("mapin: mmap");
452 return NULL;
453 }
454
455 if(addr==NULL){
456 /*
457 * now play around with what the os gave us to make it align by
458 * our standards (which is why we overallocated)
459 */
460 os_vm_size_t overflow;
461
462 addr=os_round_up_to_page(real);
463 if(addr!=real)
464 munmap(real,addr-real);
465
466 overflow=real_page_size_difference-(addr-real);
467 if(overflow!=0)
468 munmap(addr+len-real_page_size_difference,overflow);
469
470 real=addr;
471 }
472
473
474 return real;
475 }
476
477 static os_vm_address_t map_and_remember(addr,len,protection,map_fd,offset,is_readable)
478 os_vm_address_t addr;
479 os_vm_size_t len;
480 int protection;
481 int map_fd;
482 long offset;
483 int is_readable;
484 {
485 os_vm_address_t real=mapin(addr,len,protection,map_fd,offset,is_readable);
486
487 if(real!=NULL){
488 struct segment *seg=seg_find(real);
489
490 if(seg!=NULL)
491 seg=seg_change_range(seg,real,len,protection,map_fd,offset);
492 else
493 seg=seg_create(real,len,protection,map_fd,offset);
494
495 if(seg==NULL){
496 munmap(real,len);
497 return NULL;
498 }
499 }
500
501 #ifdef DEBUG
502 fprintf(stderr,";;; map_and_remember: 0x%08x[0x%08x] offset: %d, mapped to: %d\n",
503 real,len,offset,map_fd);
504 #endif
505
506 return real;
507 }
508
509 /* ---------------------------------------------------------------- */
510
511 os_vm_address_t os_validate(addr, len)
512 os_vm_address_t addr;
513 os_vm_size_t len;
514 {
515 addr=os_trunc_to_page(addr);
516 len=os_round_up_size_to_page(len);
517
518 #ifdef DEBUG
519 fprintf(stderr, ";;; os_validate: 0x%08x[0x%08x]\n",addr,len);
520 #endif
521
522 if(addr!=NULL && collides_with_seg_p(addr,len))
523 return NULL;
524
525 return map_and_remember(addr,len,PROT_DEFAULT,zero_fd,OFFSET_NONE,FALSE);
526 }
527
528 void os_invalidate(addr, len)
529 os_vm_address_t addr;
530 os_vm_size_t len;
531 {
532 struct segment *seg=seg_find(addr);
533
534 addr=os_trunc_to_page(addr);
535 len=os_round_up_size_to_page(len);
536
537 #ifdef DEBUG
538 fprintf(stderr, ";;; os_invalidate: 0x%08x[0x%08x]\n",addr,len);
539 #endif
540
541 if(seg==NULL)
542 fprintf(stderr, "os_invalidate: Unknown segment: 0x%08x[0x%08x]\n",addr,len);
543 else{
544 seg=seg_change_range(seg,addr,len,0,0,OFFSET_NONE);
545 if(seg!=NULL)
546 seg_destroy(seg);
547
548 last_fault=0;
549 if(munmap(addr,len)!=0)
550 perror("os_invalidate: munmap");
551 }
552 }
553
554 os_vm_address_t os_map(fd, offset, addr, len)
555 int fd;
556 int offset;
557 os_vm_address_t addr;
558 long len;
559 {
560 addr=os_trunc_to_page(addr);
561 len=os_round_up_size_to_page(len);
562
563 #ifdef DEBUG
564 fprintf(stderr, ";;; os_map: 0x%08x[0x%08x]\n",addr,len);
565 #endif
566
567 return map_and_remember(addr,len,PROT_DEFAULT,fd,offset,TRUE);
568 }
569
570 void os_flush_icache(address, length)
571 os_vm_address_t address;
572 os_vm_size_t length;
573 {
574 #if defined(MACH) && defined(mips)
575 vm_machine_attribute_val_t flush;
576 kern_return_t kr;
577
578 flush = MATTR_VAL_ICACHE_FLUSH;
579
580 kr = vm_machine_attribute(task_self(), address, length,
581 MATTR_CACHE, &flush);
582 if (kr != KERN_SUCCESS)
583 mach_error("Could not flush the instruction cache", kr);
584 #endif
585 }
586
587 void os_protect(addr, len, prot)
588 os_vm_address_t addr;
589 os_vm_size_t len;
590 int prot;
591 {
592 struct segment *seg=seg_find(addr);
593
594 addr=os_trunc_to_page(addr);
595 len=os_round_up_size_to_page(len);
596
597 #ifdef DEBUG
598 fprintf(stderr,";;; os_protect: 0x%08x[0x%08x]\n",addr,len);
599 #endif
600
601 if(seg!=NULL){
602 int old_prot=seg->protection;
603
604 if(prot!=old_prot){
605 /*
606 * oooooh, sick: we have to make sure all the pages being protected have
607 * faulted in, so they're in a known state...
608 */
609 seg_force_resident(seg,addr,len);
610
611 seg_change_range(seg,addr,len,prot,seg->mapped_fd,seg->file_offset);
612
613 if(mprotect((caddr_t)addr,(long)len,prot)!=0)
614 perror("os_unprotect: mprotect");
615 }
616 }else
617 fprintf(stderr,"os_protect: Unknown segment: 0x%08x[0x%08x]\n",addr,len);
618 }
619
620 boolean valid_addr(test)
621 os_vm_address_t test;
622 {
623 return seg_find(test)!=NULL;
624 }
625
626 /* ---------------------------------------------------------------- */
627
628 static boolean maybe_gc(sig, code, context)
629 int sig, code;
630 struct sigcontext *context;
631 {
632 /*
633 * It's necessary to enable recursive SEGVs, since the handle is
634 * used for multiple things (e.g., both gc-trigger & faulting in pages).
635 * We check against recursive gc's though...
636 */
637
638 boolean did_gc;
639 static already_trying=0;
640
641 if(already_trying)
642 return FALSE;
643
644 sigsetmask(context->sc_mask);
645
646 already_trying=TRUE;
647 did_gc=interrupt_maybe_gc(sig, code, context);
648 already_trying=FALSE;
649
650 return did_gc;
651 }
652
653 /*
654 * The primary point of catching segmentation violations is to allow
655 * read only memory to be re-mapped with more permissions when a write
656 * is attempted. this greatly decreases the residency of the program
657 * in swap space since read only areas don't take up room
658 *
659 * Running into the gc trigger page will also end up here...
660 */
661 void segv_handler(sig, code, context, addr)
662 int sig, code;
663 struct sigcontext *context;
664 caddr_t addr;
665 {
666 if (code == SEGV_PROT) { /* allow writes to this chunk */
667 struct segment *seg=seg_find(addr);
668
669 if(last_fault==addr){
670 if(seg!=NULL && maybe_gc(sig, code, context))
671 /* we just garbage collected */
672 return;
673 else{
674 /* a *real* protection fault */
675 fprintf(stderr,
676 "segv_handler: Real protection violation: 0x%08x\n",
677 addr);
678 interrupt_handle_now(sig,code,context);
679 }
680 }else
681 last_fault=addr;
682
683 if(seg!=NULL){
684 int err;
685 /* round down to a page */
686 os_vm_address_t block=(os_vm_address_t)((long)addr&~SPARSE_SIZE_MASK);
687 os_vm_size_t length=SPARSE_BLOCK_SIZE;
688
689 if(block < seg->start){
690 length-=(seg->start - block);
691 block=seg->start;
692 }
693 if(block+length > seg->start+seg->length)
694 length=seg->start+seg->length-block;
695
696 #if 0
697 /* unmap it. probably redundant. */
698 if(munmap(block,length) == -1)
699 perror("segv_handler: munmap");
700 #endif
701
702 /* and remap it with more permissions */
703 err=(int)
704 mmap(block,
705 length,
706 seg->protection,
707 MAP_PRIVATE|MAP_FIXED,
708 seg->mapped_fd,
709 seg->file_offset==OFFSET_NONE
710 ? 0
711 : seg->file_offset+(block-seg->start));
712
713 if (err == -1) {
714 perror("segv_handler: mmap");
715 interrupt_handle_now(sig,code,context);
716 }
717 }
718 else{
719 fprintf(stderr, "segv_handler: 0x%08x not in any segment\n",addr);
720 interrupt_handle_now(sig,code,context);
721 }
722 }
723 /*
724 * note that we check for a gc-trigger hit even if it's not a PROT error
725 */
726 else if(!maybe_gc(sig, code, context)){
727 static int nomap_count=0;
728
729 if(code==SEGV_NOMAP){
730 if(nomap_count==0){
731 fprintf(stderr,
732 "segv_handler: No mapping fault: 0x%08x\n",addr);
733 nomap_count++;
734 }else{
735 /*
736 * There should be higher-level protection against stack
737 * overflow somewhere, but at least this prevents infinite
738 * puking of error messages...
739 */
740 fprintf(stderr,
741 "segv_handler: Recursive no mapping fault (stack overflow?)\n");
742 exit(-1);
743 }
744 }else if(SEGV_CODE(code)==SEGV_OBJERR){
745 extern int errno;
746 errno=SEGV_ERRNO(code);
747 perror("segv_handler: Object error");
748 }
749
750 interrupt_handle_now(sig,code,context);
751
752 if(code==SEGV_NOMAP)
753 nomap_count--;
754 }
755 }
756
757 void os_install_interrupt_handlers()
758 {
759 interrupt_install_low_level_handler(SIGSEGV,segv_handler);
760 }

  ViewVC Help
Powered by ViewVC 1.1.5