1: /* 2: * SCCS id @(#)slp.c 2.1 (Berkeley) 8/29/83 3: */ 4: 5: #include "param.h" 6: #include <sys/systm.h> 7: #include <sys/dir.h> 8: #include <sys/user.h> 9: #include <sys/proc.h> 10: #include <sys/text.h> 11: #include <sys/map.h> 12: #include <sys/file.h> 13: #include <sys/inode.h> 14: #include <sys/buf.h> 15: #include <sys/seg.h> 16: #ifdef UCB_METER 17: #include <sys/vm.h> 18: #endif 19: #include <sys/inline.h> 20: 21: #ifdef UCB_FRCSWAP 22: extern int idleflg ; /* If set, allow incore forks and expands */ 23: /* Set before idle(), cleared in clock.c */ 24: #endif 25: 26: #ifdef CGL_RTP 27: int wantrtp; /* Set when the real-time process is runnable */ 28: #endif 29: 30: #ifdef SMALL 31: #define SQSIZE 010 /* Must be power of 2 */ 32: #else 33: #define SQSIZE 0100 /* Must be power of 2 */ 34: #endif 35: 36: #define HASH(x) (( (int) x >> 5) & (SQSIZE-1)) 37: struct proc *slpque[SQSIZE]; 38: 39: /* 40: * Give up the processor till a wakeup occurs 41: * on chan, at which time the process 42: * enters the scheduling queue at priority pri. 43: * The most important effect of pri is that when 44: * pri<=PZERO a signal cannot disturb the sleep; 45: * if pri>PZERO signals will be processed. 46: * Callers of this routine must be prepared for 47: * premature return, and check that the reason for 48: * sleeping has gone away. 49: */ 50: sleep(chan, pri) 51: caddr_t chan; 52: { 53: register struct proc *rp; 54: #ifdef MENLO_JCL 55: register struct proc **hp; 56: #else 57: register h; 58: struct proc *q; 59: #endif 60: register s; 61: 62: rp = u.u_procp; 63: s = spl6(); 64: #ifdef MENLO_JCL 65: if (chan==0 || rp->p_stat != SRUN) 66: panic("sleep"); 67: #else 68: if (chan==0) 69: panic("sleep"); 70: rp->p_stat = SSLEEP; 71: #endif 72: rp->p_wchan = chan; 73: #ifdef UCB_METER 74: rp->p_slptime = 0; 75: #endif 76: rp->p_pri = pri; 77: #ifdef MENLO_JCL 78: hp = &slpque[HASH(chan)]; 79: rp->p_link = *hp; 80: *hp = rp; 81: #else 82: h = HASH(chan); 83: /* 84: * remove this diagnostic loop 85: * when you're sure it can't happen 86: */ 87: for(q=slpque[h]; q!=NULL; q=q->p_link) 88: if(q == rp) { 89: printf("proc asleep %d\n", rp->p_pid); 90: goto cont; 91: } 92: rp->p_link = slpque[h]; 93: slpque[h] = rp; 94: cont: 95: #endif 96: if(pri > PZERO) { 97: #ifdef MENLO_JCL 98: if(ISSIG(rp)) { 99: if (rp->p_wchan) 100: unsleep(rp); 101: rp->p_stat = SRUN; 102: (void) _spl0(); 103: goto psig; 104: } 105: if (rp->p_wchan == 0) 106: goto out; 107: rp->p_stat = SSLEEP; 108: #else 109: if(issig()) { 110: rp->p_wchan = 0; 111: rp->p_stat = SRUN; 112: slpque[h] = rp->p_link; 113: (void) _spl0(); 114: goto psig; 115: } 116: #endif 117: (void) _spl0(); 118: if(runin != 0) { 119: runin = 0; 120: wakeup((caddr_t)&runin); 121: } 122: swtch(); 123: #ifdef MENLO_JCL 124: if(ISSIG(rp)) 125: #else 126: if(issig()) 127: #endif 128: goto psig; 129: } else { 130: #ifdef MENLO_JCL 131: rp->p_stat = SSLEEP; 132: #endif 133: (void) _spl0(); 134: swtch(); 135: } 136: #ifdef MENLO_JCL 137: out: 138: #endif 139: splx(s); 140: return; 141: 142: /* 143: * If priority was low (>PZERO) and 144: * there has been a signal, 145: * execute non-local goto to 146: * the qsav location. 147: * (see trap.c) 148: */ 149: psig: 150: resume(u.u_procp->p_addr, u.u_qsav); 151: /*NOTREACHED*/ 152: } 153: 154: #ifdef MENLO_JCL 155: 156: /* 157: * Remove a process from its wait queue 158: */ 159: unsleep(p) 160: register struct proc *p; 161: { 162: register struct proc **hp; 163: register s; 164: 165: s = spl6(); 166: if (p->p_wchan) { 167: hp = &slpque[HASH(p->p_wchan)]; 168: while (*hp != p) 169: hp = &(*hp)->p_link; 170: *hp = p->p_link; 171: p->p_wchan = 0; 172: } 173: splx(s); 174: } 175: 176: #endif 177: 178: /* 179: * Wake up all processes sleeping on chan. 180: */ 181: 182: wakeup(chan) 183: register caddr_t chan; 184: { 185: register struct proc *p, **q; 186: struct proc **h; 187: int i, s; 188: #ifndef NOKA5 189: mapinfo map; 190: 191: /* 192: * Since we are called at interrupt time, must insure normal 193: * kernel mapping to access proc. 194: */ 195: savemap(map); 196: #endif 197: s = spl6(); 198: h = &slpque[HASH(chan)]; 199: restart: 200: for (q = h; p = *q; ) { 201: if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 202: panic("wakeup"); 203: if (p->p_wchan==chan) { 204: p->p_wchan = 0; 205: *q = p->p_link; 206: #ifdef UCB_METER 207: p->p_slptime = 0; 208: #endif 209: if (p->p_stat == SSLEEP) { 210: setrun(p); 211: goto restart; 212: } 213: } else 214: q = &p->p_link; 215: } 216: splx(s); 217: #ifndef NOKA5 218: restormap(map); 219: #endif 220: } 221: 222: /* 223: * When you are sure that it 224: * is impossible to get the 225: * 'proc on q' diagnostic, the 226: * diagnostic loop can be removed. 227: */ 228: setrq(p) 229: struct proc *p; 230: { 231: register struct proc *q; 232: register s; 233: 234: s = spl6(); 235: #ifdef DIAGNOSTIC 236: for(q=runq; q!=NULL; q=q->p_link) 237: if(q == p) { 238: printf("proc on q\n"); 239: goto out; 240: } 241: #endif 242: p->p_link = runq; 243: runq = p; 244: out: 245: splx(s); 246: } 247: 248: 249: #ifdef MENLO_JCL 250: /* 251: * Remove runnable job from run queue. 252: * This is done when a runnable job is swapped 253: * out so that it won't be selected in swtch(). 254: * It will be reinserted in the runq with setrq 255: * when it is swapped back in. 256: */ 257: remrq(p) 258: register struct proc *p; 259: { 260: register struct proc *q; 261: int s; 262: 263: s = spl6(); 264: if (p == runq) 265: runq = p->p_link; 266: else { 267: for (q = runq; q; q = q->p_link) 268: if (q->p_link == p) { 269: q->p_link = p->p_link; 270: goto done; 271: } 272: panic("remque"); 273: done: 274: ; 275: } 276: splx(s); 277: } 278: #endif 279: 280: /* 281: * Set the process running; 282: * arrange for it to be swapped in if necessary. 283: */ 284: setrun(p) 285: register struct proc *p; 286: { 287: #ifdef MENLO_JCL 288: register s; 289: 290: s = spl6(); 291: switch (p->p_stat) { 292: 293: case SSTOP: 294: case SSLEEP: 295: unsleep(p); /* e.g. when sending signals */ 296: break; 297: 298: case SIDL: 299: break; 300: 301: default: 302: panic("setrun"); 303: } 304: p->p_stat = SRUN; 305: if (p->p_flag & SLOAD) 306: setrq(p); 307: splx(s); 308: #else 309: register caddr_t w; 310: 311: if (p->p_stat==0 || p->p_stat==SZOMB) 312: panic("setrun"); 313: /* 314: * The assignment to w is necessary because of 315: * race conditions. (Interrupt between test and use) 316: */ 317: if (w = p->p_wchan) { 318: wakeup(w); 319: return; 320: } 321: p->p_stat = SRUN; 322: setrq(p); 323: #endif 324: 325: #ifdef CGL_RTP 326: if (p == rtpp) { 327: wantrtp++; 328: runrun++; 329: } 330: else 331: #endif 332: if (p->p_pri < curpri) 333: runrun++; 334: if(runout != 0 && (p->p_flag&SLOAD) == 0) { 335: runout = 0; 336: wakeup((caddr_t)&runout); 337: } 338: } 339: 340: /* 341: * Set user priority. 342: * The rescheduling flag (runrun) 343: * is set if the priority is better 344: * than the currently running process. 345: */ 346: setpri(pp) 347: register struct proc *pp; 348: { 349: register p; 350: 351: p = ((pp->p_cpu & 0377) / 16) + PUSER + pp->p_nice - NZERO; 352: if(p > 127) 353: p = 127; 354: if(p < curpri) 355: runrun++; 356: pp->p_pri = p; 357: return(p); 358: } 359: 360: /* 361: * The main loop of the scheduling (swapping) 362: * process. 363: * The basic idea is: 364: * see if anyone wants to be swapped in; 365: * swap out processes until there is room; 366: * swap him in; 367: * repeat. 368: * The runout flag is set whenever someone is swapped out. 369: * Sched sleeps on it awaiting work. 370: * 371: * Sched sleeps on runin whenever it cannot find enough 372: * core (by swapping out or otherwise) to fit the 373: * selected swapped process. It is awakened when the 374: * core situation changes and in any case once per second. 375: */ 376: sched() 377: { 378: register struct proc *rp, *p; 379: register outage, inage; 380: size_t maxsize; 381: 382: /* 383: * find user to swap in; 384: * of users ready, select one out longest 385: */ 386: 387: loop: 388: (void) _spl6(); 389: outage = -20000; 390: for (rp = &proc[0]; rp <= maxproc; rp++) 391: #ifdef VIRUS_VFORK 392: /* always bring in parents ending a vfork, to avoid deadlock */ 393: if (rp->p_stat==SRUN && (rp->p_flag&SLOAD)==0 && 394: ((rp->p_time - (rp->p_nice-NZERO)*8 > outage) 395: || (rp->p_flag & SVFPARENT))) 396: #else 397: if (rp->p_stat==SRUN && (rp->p_flag&SLOAD)==0 && 398: rp->p_time - (rp->p_nice-NZERO)*8 > outage) 399: #endif 400: { 401: p = rp; 402: outage = rp->p_time - (rp->p_nice-NZERO)*8; 403: #ifdef VIRUS_VFORK 404: if (rp->p_flag & SVFPARENT) 405: break; 406: #endif 407: } 408: /* 409: * If there is no one there, wait. 410: */ 411: if (outage == -20000) { 412: runout++; 413: sleep((caddr_t)&runout, PSWP); 414: goto loop; 415: } 416: (void) _spl0(); 417: 418: /* 419: * See if there is core for that process; 420: * if so, swap it in. 421: */ 422: 423: if (swapin(p)) 424: goto loop; 425: 426: /* 427: * none found. 428: * look around for core. 429: * Select the largest of those sleeping 430: * at bad priority; if none, select the oldest. 431: */ 432: 433: (void) _spl6(); 434: p = NULL; 435: maxsize = 0; 436: inage = -1; 437: for (rp = &proc[1]; rp <= maxproc; rp++) { 438: if (rp->p_stat==SZOMB 439: || (rp->p_flag&(SSYS|SLOCK|SULOCK|SLOAD))!=SLOAD) 440: continue; 441: if (rp->p_textp && rp->p_textp->x_flag&XLOCK) 442: continue; 443: if (rp->p_stat==SSLEEP&&rp->p_pri>=PZERO || rp->p_stat==SSTOP) { 444: #ifdef VIRUS_VFORK 445: if (maxsize < rp->p_dsize + rp->p_ssize) { 446: p = rp; 447: maxsize = rp->p_dsize + rp->p_ssize; 448: } 449: #else 450: if (maxsize < rp->p_size) { 451: p = rp; 452: maxsize = rp->p_size; 453: } 454: #endif 455: } else if (maxsize==0 && (rp->p_stat==SRUN||rp->p_stat==SSLEEP) 456: #ifdef CGL_RTP 457: /* 458: * Can't swap processes preempted in copy/clear. 459: */ 460: && (rp->p_pri > PRTP + 1) 461: #endif 462: ) { 463: if (rp->p_time+rp->p_nice-NZERO > inage) { 464: p = rp; 465: inage = rp->p_time+rp->p_nice-NZERO; 466: } 467: } 468: } 469: (void) _spl0(); 470: /* 471: * Swap found user out if sleeping at bad pri, 472: * or if he has spent at least 2 seconds in core and 473: * the swapped-out process has spent at least 3 seconds out. 474: * Otherwise wait a bit and try again. 475: */ 476: if (maxsize>0 || (outage>=3 && inage>=2)) { 477: #ifdef MENLO_JCL 478: (void) _spl6(); 479: p->p_flag &= ~SLOAD; 480: if(p->p_stat == SRUN) 481: remrq(p); 482: (void) _spl0(); 483: #else 484: p->p_flag &= ~SLOAD; 485: #endif 486: #ifdef VIRUS_VFORK 487: (void) xswap(p, X_FREECORE, X_OLDSIZE, X_OLDSIZE); 488: #else 489: (void) xswap(p, X_FREECORE, X_OLDSIZE); 490: #endif 491: goto loop; 492: } 493: (void) _spl6(); 494: runin++; 495: sleep((caddr_t)&runin, PSWP); 496: goto loop; 497: } 498: 499: /* 500: * Swap a process in. 501: * Allocate data and possible text separately. 502: * It would be better to do largest first. 503: */ 504: #ifdef VIRUS_VFORK 505: /* 506: * Text, data, stack and u. are allocated in that order, 507: * as that is likely to be in order of size. 508: */ 509: #endif 510: swapin(p) 511: register struct proc *p; 512: { 513: #ifdef VIRUS_VFORK 514: register struct text *xp; 515: memaddr a[3]; 516: register memaddr x = NULL; 517: 518: /* 519: * Malloc the text segment first, as it tends to be largest. 520: */ 521: if (xp = p->p_textp) { 522: xlock(xp); 523: if (xp->x_ccount == 0) { 524: if ((x = malloc(coremap, xp->x_size)) == NULL) { 525: xunlock(xp); 526: return(0); 527: } 528: } 529: } 530: if (malloc3(coremap, p->p_dsize, p->p_ssize, USIZE, a) == NULL) { 531: if (x) 532: mfree(coremap, xp->x_size, x); 533: if (xp) 534: xunlock(xp); 535: return(0); 536: } 537: if (x) { 538: xp->x_caddr = x; 539: if ((xp->x_flag & XLOAD) == 0) 540: swap(xp->x_daddr, x, xp->x_size, B_READ); 541: } 542: if (xp) { 543: xp->x_ccount++; 544: xunlock(xp); 545: } 546: if (p->p_dsize) { 547: swap(p->p_daddr, a[0], p->p_dsize, B_READ); 548: mfree(swapmap, ctod(p->p_dsize), p->p_daddr); 549: } 550: if (p->p_ssize) { 551: swap(p->p_saddr, a[1], p->p_ssize, B_READ); 552: mfree(swapmap, ctod(p->p_ssize), p->p_saddr); 553: } 554: swap(p->p_addr, a[2], USIZE, B_READ); 555: mfree(swapmap, ctod(USIZE), p->p_addr); 556: p->p_daddr = a[0]; 557: p->p_saddr = a[1]; 558: p->p_addr = a[2]; 559: 560: #else VIRUS_VFORK 561: register struct text *xp; 562: register int a; 563: register unsigned x = 0; 564: 565: if ((a = malloc(coremap, p->p_size)) == NULL) 566: return(0); 567: if (xp = p->p_textp) { 568: xlock(xp); 569: if (xp->x_ccount == 0) { 570: if ((x = malloc(coremap, xp->x_size)) == NULL) 571: { 572: xunlock(xp); 573: mfree(coremap, p->p_size, a); 574: return(0); 575: } 576: xp->x_caddr = x; 577: if ((xp->x_flag & XLOAD)==0) 578: swap(xp->x_daddr, x, xp->x_size, B_READ); 579: } 580: xp->x_ccount++; 581: xunlock(xp); 582: } 583: swap(p->p_addr, a, p->p_size, B_READ); 584: mfree(swapmap, ctod(p->p_size), p->p_addr); 585: p->p_addr = a; 586: #endif VIRUS_VFORK 587: 588: #ifdef MENLO_JCL 589: if (p->p_stat == SRUN) 590: setrq(p); 591: #endif 592: p->p_flag |= SLOAD; 593: p->p_time = 0; 594: return(1); 595: } 596: 597: /* 598: * put the current process on 599: * the queue of running processes and 600: * call the scheduler. 601: */ 602: qswtch() 603: { 604: setrq(u.u_procp); 605: swtch(); 606: } 607: 608: /* 609: * This routine is called to reschedule the CPU. 610: * if the calling process is not in the RUN state, 611: * arrangements for it to restart must have 612: * been made elsewhere, usually by calling via sleep. 613: * There is a race here. A process may become 614: * ready after it has been examined. 615: * In this case, idle() will be called and 616: * will return in at most 1hz time. 617: * i.e. it's not worth putting an spl() in. 618: */ 619: swtch() 620: { 621: register n; 622: register struct proc *p, *q; 623: struct proc *pp, *pq; 624: 625: #if defined(DIAGNOSTIC) && !defined(NOKA5) 626: extern struct buf *hasmap; 627: if(hasmap != (struct buf *)0) 628: panic("swtch hasmap"); 629: #endif 630: /* 631: * If not the idle process, resume the idle process. 632: */ 633: if (u.u_procp != &proc[0]) { 634: if (save(u.u_rsav)) { 635: sureg(); 636: return; 637: } 638: #ifndef NONFP 639: if (u.u_fpsaved==0) { 640: savfp(&u.u_fps); 641: u.u_fpsaved = 1; 642: } 643: #endif 644: resume(proc[0].p_addr, u.u_qsav); 645: } 646: /* 647: * The first save returns nonzero when proc 0 is resumed 648: * by another process (above); then the second is not done 649: * and the process-search loop is entered. 650: * 651: * The first save returns 0 when swtch is called in proc 0 652: * from sched(). The second save returns 0 immediately, so 653: * in this case too the process-search loop is entered. 654: * Thus when proc 0 is awakened by being made runnable, it will 655: * find itself and resume itself at rsav, and return to sched(). 656: */ 657: if (save(u.u_qsav)==0 && save(u.u_rsav)) 658: return; 659: #ifdef UCB_METER 660: cnt.v_swtch++; 661: #endif 662: loop: 663: (void) _spl6(); 664: runrun = 0; 665: #ifdef CGL_RTP 666: /* 667: * Test for the presence of a "real time process". 668: * If there is one and it is runnable, give it top 669: * priority. 670: */ 671: if ((p=rtpp) && p->p_stat==SRUN && (p->p_flag&SLOAD)) { 672: pq = NULL; 673: for (q=runq; q!=NULL; q=q->p_link) { 674: if (q == p) 675: break; 676: pq = q; 677: } 678: if (q == NULL) 679: panic("rtp not found\n"); /* "cannot happen" */ 680: n = PRTP; 681: wantrtp = 0; 682: goto runem; 683: } 684: #endif 685: pp = NULL; 686: q = NULL; 687: n = 128; 688: /* 689: * Search for highest-priority runnable process 690: */ 691: for(p=runq; p!=NULL; p=p->p_link) { 692: if((p->p_stat==SRUN) && (p->p_flag&SLOAD)) { 693: if(p->p_pri < n) { 694: pp = p; 695: pq = q; 696: n = p->p_pri; 697: } 698: } 699: q = p; 700: } 701: /* 702: * If no process is runnable, idle. 703: */ 704: p = pp; 705: if(p == NULL) { 706: #ifdef UCB_FRCSWAP 707: idleflg++; 708: #endif 709: idle(); 710: goto loop; 711: } 712: #ifdef CGL_RTP 713: runem: 714: #endif 715: q = pq; 716: if(q == NULL) 717: runq = p->p_link; 718: else 719: q->p_link = p->p_link; 720: curpri = n; 721: (void) _spl0(); 722: /* 723: * The rsav (ssav) contents are interpreted in the new address space 724: */ 725: n = p->p_flag&SSWAP; 726: p->p_flag &= ~SSWAP; 727: resume(p->p_addr, n? u.u_ssav: u.u_rsav); 728: } 729: 730: /* 731: * Create a new process-- the internal version of 732: * sys fork. 733: * It returns 1 in the new process, 0 in the old. 734: */ 735: #ifdef VIRUS_VFORK 736: newproc(isvfork) 737: #else 738: newproc() 739: #endif 740: { 741: int a1, a2; 742: register struct proc *rpp, *rip; 743: register n; 744: #ifdef VIRUS_VFORK 745: unsigned a[3]; 746: #endif 747: 748: rpp = NULL; 749: /* 750: * First, just locate a slot for a process 751: * and copy the useful info from this process into it. 752: * The panic "cannot happen" because fork has already 753: * checked for the existence of a slot. 754: */ 755: retry: 756: mpid++; 757: if(mpid >= 30000) 758: mpid = 1; 759: for(rip = proc; rip < procNPROC; rip++) { 760: if(rip->p_stat == NULL && rpp==NULL) 761: rpp = rip; 762: if (rip->p_pid==mpid || rip->p_pgrp==mpid) 763: goto retry; 764: } 765: if (rpp == NULL) 766: panic("no procs"); 767: 768: /* 769: * make proc entry for new proc 770: */ 771: 772: rip = u.u_procp; 773: rpp->p_clktim = 0; 774: #ifndef MENLO_JCL 775: rpp->p_stat = SRUN; 776: rpp->p_flag = SLOAD; 777: #else 778: rpp->p_stat = SIDL; 779: rpp->p_flag = SLOAD | (rip->p_flag & (SDETACH|SNUSIG)); 780: rpp->p_pptr = rip; 781: rpp->p_siga0 = rip->p_siga0; 782: rpp->p_siga1 = rip->p_siga1; 783: rpp->p_cursig = 0; 784: rpp->p_wchan = 0; 785: #endif 786: #ifdef UCB_SUBM 787: rpp->p_flag |= rip->p_flag & SSUBM; 788: #endif 789: rpp->p_uid = rip->p_uid; 790: rpp->p_pgrp = rip->p_pgrp; 791: rpp->p_nice = rip->p_nice; 792: rpp->p_textp = rip->p_textp; 793: rpp->p_pid = mpid; 794: rpp->p_ppid = rip->p_pid; 795: rpp->p_time = 0; 796: rpp->p_cpu = 0; 797: #ifdef UCB_METER 798: rpp->p_slptime = 0; 799: #endif 800: if (rpp > maxproc) 801: maxproc = rpp; 802: 803: /* 804: * make duplicate entries 805: * where needed 806: */ 807: 808: for(n=0; n<NOFILE; n++) 809: if(u.u_ofile[n] != NULL) 810: u.u_ofile[n]->f_count++; 811: #ifdef VIRUS_VFORK 812: if ((rip->p_textp != NULL) && !isvfork) 813: #else 814: if(rip->p_textp != NULL) 815: #endif 816: { 817: rip->p_textp->x_count++; 818: rip->p_textp->x_ccount++; 819: } 820: u.u_cdir->i_count++; 821: if (u.u_rdir) 822: u.u_rdir->i_count++; 823: /* 824: * When the resume is executed for the new process, 825: * here's where it will resume. 826: */ 827: if (save(u.u_ssav)) { 828: sureg(); 829: return(1); 830: } 831: /* 832: * Partially simulate the environment 833: * of the new process so that when it is actually 834: * created (by copying) it will look right. 835: */ 836: u.u_procp = rpp; 837: 838: #ifdef VIRUS_VFORK 839: rpp->p_dsize = rip->p_dsize; 840: rpp->p_ssize = rip->p_ssize; 841: rpp->p_daddr = rip->p_daddr; 842: rpp->p_saddr = rip->p_saddr; 843: a1 = rip->p_addr; 844: if (isvfork) 845: a[2] = malloc(coremap,USIZE); 846: else { 847: /* 848: * malloc3() will set a[2] to NULL on failure. 849: */ 850: #ifdef UCB_FRCSWAP 851: a[2] = NULL; 852: if (idleflg) 853: #endif 854: (void) malloc3(coremap,rip->p_dsize,rip->p_ssize,USIZE,a); 855: } 856: /* 857: * If there is not enough core for the 858: * new process, swap out the current process to generate the 859: * copy. 860: */ 861: if(a[2] == NULL) { 862: rip->p_stat = SIDL; 863: rpp->p_addr = a1; 864: #ifdef MENLO_JCL 865: rpp->p_stat = SRUN; 866: #endif 867: (void) xswap(rpp, X_DONTFREE, X_OLDSIZE, X_OLDSIZE); 868: rip->p_stat = SRUN; 869: u.u_procp = rip; 870: } else { 871: /* 872: * There is core, so just copy. 873: */ 874: rpp->p_addr = a[2]; 875: #ifdef CGL_RTP 876: /* 877: * Copy is now a preemptable kernel process. 878: * The u. area is non-reentrant so copy it first 879: * in non-preemptable mode. 880: */ 881: copyu(rpp->p_addr); 882: #else 883: copy(a1, rpp->p_addr, USIZE); 884: #endif 885: u.u_procp = rip; 886: if (isvfork == 0) { 887: rpp->p_daddr = a[0]; 888: copy(rip->p_daddr, rpp->p_daddr, rpp->p_dsize); 889: rpp->p_saddr = a[1]; 890: copy(rip->p_saddr, rpp->p_saddr, rpp->p_ssize); 891: } 892: #ifdef MENLO_JCL 893: (void) _spl6(); 894: rpp->p_stat = SRUN; 895: setrq(rpp); 896: (void) _spl0(); 897: #endif 898: } 899: #ifndef MENLO_JCL 900: setrq(rpp); 901: #endif 902: rpp->p_flag |= SSWAP; 903: if (isvfork) { 904: /* 905: * Set the parent's sizes to 0, since the child now 906: * has the data and stack. 907: * (If we had to swap, just free parent resources.) 908: * Then wait for the child to finish with it. 909: */ 910: if (a[2] == NULL) { 911: mfree(coremap,rip->p_dsize,rip->p_daddr); 912: mfree(coremap,rip->p_ssize,rip->p_saddr); 913: } 914: rip->p_dsize = 0; 915: rip->p_ssize = 0; 916: rip->p_textp = NULL; 917: rpp->p_flag |= SVFORK; 918: rip->p_flag |= SVFPARENT; 919: while (rpp->p_flag & SVFORK) 920: sleep((caddr_t)rpp,PSWP+1); 921: if ((rpp->p_flag & SLOAD) == 0) 922: panic("newproc vfork"); 923: u.u_dsize = rip->p_dsize = rpp->p_dsize; 924: rip->p_daddr = rpp->p_daddr; 925: rpp->p_dsize = 0; 926: u.u_ssize = rip->p_ssize = rpp->p_ssize; 927: rip->p_saddr = rpp->p_saddr; 928: rpp->p_ssize = 0; 929: rip->p_textp = rpp->p_textp; 930: rpp->p_textp = NULL; 931: rpp->p_flag |= SVFDONE; 932: wakeup((caddr_t)rip); 933: /* must do estabur if dsize/ssize are different */ 934: estabur(u.u_tsize,u.u_dsize,u.u_ssize,u.u_sep,RO); 935: rip->p_flag &= ~SVFPARENT; 936: } 937: return(0); 938: 939: #else VIRUS_VFORK 940: rpp->p_size = n = rip->p_size; 941: a1 = rip->p_addr; 942: #ifndef UCB_FRCSWAP 943: a2 = malloc(coremap, n); 944: #else 945: if(idleflg) 946: a2 = malloc(coremap, n); 947: else 948: a2 = NULL; 949: #endif 950: /* 951: * If there is not enough core for the 952: * new process, swap out the current process to generate the 953: * copy. 954: */ 955: if(a2 == NULL) { 956: rip->p_stat = SIDL; 957: rpp->p_addr = a1; 958: #ifdef MENLO_JCL 959: rpp->p_stat = SRUN; 960: #endif 961: (void) xswap(rpp, X_DONTFREE, X_OLDSIZE); 962: rip->p_stat = SRUN; 963: #ifdef CGL_RTP 964: u.u_procp = rip; /* see comments below */ 965: #endif 966: } else { 967: /* 968: * There is core, so just copy. 969: */ 970: rpp->p_addr = a2; 971: #ifdef CGL_RTP 972: /* 973: * Copy is now a preemptable kernel process. 974: * The u. area is non-reentrant so copy it first 975: * in non-preemptable mode. 976: */ 977: copyu(a2); 978: /* 979: * If we are to be interrupted we must insure consistency; 980: * restore current process state now. 981: */ 982: u.u_procp = rip; 983: copy(a1+USIZE, a2+USIZE, n-USIZE); 984: #else 985: copy(a1, a2, n); 986: #endif 987: #ifdef MENLO_JCL 988: (void) _spl6(); 989: rpp->p_stat = SRUN; 990: setrq(rpp); 991: (void) _spl0(); 992: #endif 993: } 994: #ifndef CGL_RTP 995: u.u_procp = rip; 996: #endif 997: #ifndef MENLO_JCL 998: (void) _spl6(); 999: setrq(rpp); 1000: (void) _spl0(); 1001: #endif 1002: rpp->p_flag |= SSWAP; 1003: return(0); 1004: #endif VIRUS_VFORK 1005: } 1006: 1007: #ifdef VIRUS_VFORK 1008: /* 1009: * Notify parent that vfork child is finished with parent's data. 1010: * Called during exit/exec(getxfile); must be called before xfree(). 1011: * The child must be locked in core 1012: * so it will be in core when the parent runs. 1013: */ 1014: endvfork() 1015: { 1016: register struct proc *rip, *rpp; 1017: 1018: rpp = u.u_procp; 1019: rip = rpp->p_pptr; 1020: rpp->p_flag &= ~SVFORK; 1021: rpp->p_flag |= SLOCK; 1022: wakeup((caddr_t)rpp); 1023: while(!(rpp->p_flag&SVFDONE)) 1024: sleep((caddr_t)rip,PZERO-1); 1025: /* 1026: * The parent has taken back our data+stack, set our sizes to 0. 1027: */ 1028: u.u_dsize = rpp->p_dsize = 0; 1029: u.u_ssize = rpp->p_ssize = 0; 1030: rpp->p_flag &= ~(SVFDONE | SLOCK); 1031: } 1032: #endif 1033: /* 1034: * Change the size of the data+stack regions of the process. 1035: * If the size is shrinking, it's easy-- just release the extra core. 1036: * If it's growing, and there is core, just allocate it 1037: * and copy the image, taking care to reset registers to account 1038: * for the fact that the system's stack has moved. 1039: * If there is no core, arrange for the process to be swapped 1040: * out after adjusting the size requirement-- when it comes 1041: * in, enough core will be allocated. 1042: * 1043: * After the expansion, the caller will take care of copying 1044: * the user's stack towards or away from the data area. 1045: */ 1046: #ifdef VIRUS_VFORK 1047: /* 1048: * The data and stack segments are separated from each other. The second 1049: * argument to expand specifies which to change. The stack segment will 1050: * not have to be copied again after expansion. 1051: */ 1052: expand(newsize,segment) 1053: #else 1054: expand(newsize) 1055: #endif 1056: { 1057: register i, n; 1058: register struct proc *p; 1059: register a1, a2; 1060: 1061: #ifdef VIRUS_VFORK 1062: p = u.u_procp; 1063: if (segment == S_DATA) { 1064: n = p->p_dsize; 1065: p->p_dsize = newsize; 1066: a1 = p->p_daddr; 1067: if(n >= newsize) { 1068: n -= newsize; 1069: mfree(coremap, n, a1+newsize); 1070: return; 1071: } 1072: } else { 1073: n = p->p_ssize; 1074: p->p_ssize = newsize; 1075: a1 = p->p_saddr; 1076: if(n >= newsize) { 1077: n -= newsize; 1078: p->p_saddr += n; 1079: mfree(coremap, n, a1); 1080: /* 1081: * Since the base of stack is different, 1082: * segmentation registers must be repointed. 1083: */ 1084: sureg(); 1085: return; 1086: } 1087: } 1088: if (save(u.u_ssav)) { 1089: /* 1090: * If we had to swap, the stack needs moving up. 1091: */ 1092: if (segment == S_STACK) { 1093: a1 = p->p_saddr; 1094: i = newsize - n; 1095: a2 = a1 + i; 1096: /* 1097: * i is the amount of growth. Copy i clicks 1098: * at a time, from the top; do the remainder 1099: * (n % i) separately. 1100: */ 1101: while (n >= i) { 1102: n -= i; 1103: copy(a1+n, a2+n, i); 1104: } 1105: copy(a1, a2, n); 1106: } 1107: sureg(); 1108: return; 1109: } 1110: #ifndef NONFP 1111: if (u.u_fpsaved==0) { 1112: savfp(&u.u_fps); 1113: u.u_fpsaved = 1; 1114: } 1115: #endif 1116: #ifdef UCB_FRCSWAP 1117: /* 1118: * Stack must be copied either way, might as well not swap. 1119: */ 1120: if(idleflg || (segment==S_STACK)) 1121: a2 = malloc(coremap, newsize); 1122: else 1123: a2 = NULL; 1124: #else 1125: a2 = malloc(coremap, newsize); 1126: #endif 1127: if(a2 == NULL) { 1128: if (segment == S_DATA) 1129: (void) xswap(p, X_FREECORE, n, X_OLDSIZE); 1130: else 1131: (void) xswap(p, X_FREECORE, X_OLDSIZE, n); 1132: p->p_flag |= SSWAP; 1133: #ifdef MENLO_JCL 1134: swtch(); 1135: #else 1136: qswtch(); 1137: #endif 1138: /* NOTREACHED */ 1139: } 1140: if (segment == S_STACK) { 1141: p->p_saddr = a2; 1142: /* 1143: * Make the copy put the stack at the top of the new area. 1144: */ 1145: a2 += newsize - n; 1146: } else 1147: p->p_daddr = a2; 1148: copy(a1, a2, n); 1149: mfree(coremap, n, a1); 1150: sureg(); 1151: return; 1152: 1153: #else VIRUS_VFORK 1154: p = u.u_procp; 1155: n = p->p_size; 1156: p->p_size = newsize; 1157: a1 = p->p_addr; 1158: if(n >= newsize) { 1159: mfree(coremap, n-newsize, a1+newsize); 1160: return; 1161: } 1162: if (save(u.u_ssav)) { 1163: sureg(); 1164: return; 1165: } 1166: #ifndef NONFP 1167: if (u.u_fpsaved==0) { 1168: savfp(&u.u_fps); 1169: u.u_fpsaved = 1; 1170: } 1171: #endif 1172: #ifdef UCB_FRCSWAP 1173: if(idleflg) 1174: a2 = malloc(coremap, newsize); 1175: else 1176: a2 = NULL; 1177: #else 1178: a2 = malloc(coremap, newsize); 1179: #endif 1180: if(a2 == NULL) { 1181: (void) xswap(p, X_FREECORE, n); 1182: p->p_flag |= SSWAP; 1183: #ifdef MENLO_JCL 1184: swtch(); 1185: #else 1186: qswtch(); 1187: #endif 1188: /*NOTREACHED*/ 1189: } 1190: #ifdef CGL_RTP 1191: copyu(a2); /* see comments in newproc() */ 1192: copy(a1+USIZE, a2+USIZE, n-USIZE); 1193: p->p_addr = a2; 1194: #else 1195: p->p_addr = a2; 1196: copy(a1, a2, n); 1197: #endif 1198: mfree(coremap, n, a1); 1199: resume(a2, u.u_ssav); 1200: #endif VIRUS_VFORK 1201: } 1202: #ifdef CGL_RTP 1203: /* 1204: * Test status of the "real time process"; 1205: * preempt the current process if runnable. 1206: * Use caution when calling this routine, much 1207: * of the kernel is non-reentrant! 1208: */ 1209: runrtp() 1210: { 1211: register struct proc *p; 1212: 1213: if ((p=rtpp)==NULL || p==u.u_procp) 1214: return; 1215: if (p->p_stat==SRUN && (p->p_flag&SLOAD)!=0) { 1216: u.u_procp->p_pri = PRTP+1; 1217: qswtch(); 1218: } 1219: } 1220: #endif