1: /* 2: * Copyright (c) 1986 Regents of the University of California. 3: * All rights reserved. The Berkeley software License Agreement 4: * specifies the terms and conditions for redistribution. 5: * 6: * @(#)kern_synch.c 1.5 (2.11BSD) 1999/9/13 7: */ 8: 9: #include "param.h" 10: #include "../machine/seg.h" 11: 12: #include "user.h" 13: #include "proc.h" 14: #include "buf.h" 15: #include "signal.h" 16: #include "signalvar.h" 17: #include "vm.h" 18: #include "kernel.h" 19: #include "systm.h" 20: 21: #define SQSIZE 16 /* Must be power of 2 */ 22: 23: #define HASH(x) (((int)x >> 5) & (SQSIZE - 1)) 24: #define SCHMAG 8/10 25: 26: struct proc *slpque[SQSIZE]; 27: 28: /* 29: * Recompute process priorities, once a second 30: */ 31: schedcpu() 32: { 33: register struct proc *p; 34: register int a; 35: 36: wakeup((caddr_t)&lbolt); 37: for (p = allproc; p != NULL; p = p->p_nxt) { 38: if (p->p_time != 127) 39: p->p_time++; 40: /* 41: * this is where 2.11 does its real time alarms. 4.X uses 42: * timeouts, since it offers better than second resolution. 43: * Putting it here allows us to continue using use an int 44: * to store the number of ticks in the callout structure, 45: * since the kernel never has a timeout of greater than 46: * around 9 minutes. 47: */ 48: if (p->p_realtimer.it_value && !--p->p_realtimer.it_value) { 49: psignal(p, SIGALRM); 50: p->p_realtimer.it_value = p->p_realtimer.it_interval; 51: } 52: if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 53: if (p->p_slptime != 127) 54: p->p_slptime++; 55: if (p->p_slptime > 1) 56: continue; 57: a = (p->p_cpu & 0377) * SCHMAG + p->p_nice; 58: if (a < 0) 59: a = 0; 60: if (a > 255) 61: a = 255; 62: p->p_cpu = a; 63: if (p->p_pri >= PUSER) 64: setpri(p); 65: } 66: vmmeter(); 67: if (runin!=0) { 68: runin = 0; 69: wakeup((caddr_t)&runin); 70: } 71: ++runrun; /* swtch at least once a second */ 72: timeout(schedcpu, (caddr_t)0, hz); 73: } 74: 75: /* 76: * Recalculate the priority of a process after it has slept for a while. 77: */ 78: updatepri(p) 79: register struct proc *p; 80: { 81: register int a = p->p_cpu & 0377; 82: 83: p->p_slptime--; /* the first time was done in schedcpu */ 84: while (a && --p->p_slptime) 85: a = (SCHMAG * a) /* + p->p_nice */; 86: if (a < 0) 87: a = 0; 88: if (a > 255) 89: a = 255; 90: p->p_cpu = a; 91: (void) setpri(p); 92: } 93: 94: /* 95: * General sleep call "borrowed" from 4.4BSD - the 'wmesg' parameter was 96: * removed due to data space concerns. Sleeps at most timo/hz seconds 97: * 0 means no timeout). NOTE: timeouts in 2.11BSD use a signed int and 98: * thus can be at most 32767 'ticks' or about 540 seconds in the US with 99: * 60hz power (~650 seconds if 50hz power is being used). 100: * 101: * If 'pri' includes the PCATCH flag signals are checked before and after 102: * sleeping otherwise signals are not checked. Returns 0 if a wakeup was 103: * done, EWOULDBLOCK if the timeout expired, ERESTART if the current system 104: * call should be restarted, and EINTR if the system call should be 105: * interrupted and EINTR returned to the user process. 106: */ 107: 108: int 109: tsleep(ident, priority, timo) 110: caddr_t ident; 111: int priority; 112: u_short timo; 113: { 114: register struct proc *p = u.u_procp; 115: register struct proc **qp; 116: int s; 117: int sig, catch = priority & PCATCH; 118: void endtsleep(); 119: 120: s = splhigh(); 121: if (panicstr) 122: { 123: /* 124: * After a panic just give interrupts a chance then just return. Don't 125: * run any other procs (or panic again below) in case this is the idle 126: * process and already asleep. The splnet should be spl0 if the network 127: * was being used but for now avoid network interrupts that might cause 128: * another panic. 129: */ 130: (void)_splnet(); 131: noop(); 132: splx(s); 133: return; 134: } 135: #ifdef DIAGNOSTIC 136: if (ident == NULL || p->p_stat != SRUN) 137: panic("tsleep"); 138: #endif 139: p->p_wchan = ident; 140: p->p_slptime = 0; 141: p->p_pri = priority & PRIMASK; 142: qp = &slpque[HASH(ident)]; 143: p->p_link = *qp; 144: *qp =p; 145: if (timo) 146: timeout(endtsleep, (caddr_t)p, timo); 147: /* 148: * We put outselves on the sleep queue and start the timeout before calling 149: * CURSIG as we could stop there and a wakeup or a SIGCONT (or both) could 150: * occur while we were stopped. A SIGCONT would cause us to be marked SSLEEP 151: * without resuming us thus we must be ready for sleep when CURSIG is called. 152: * If the wakeup happens while we're stopped p->p_wchan will be 0 upon 153: * return from CURSIG. 154: */ 155: if (catch) 156: { 157: p->p_flag |= P_SINTR; 158: if (sig = CURSIG(p)) 159: { 160: if (p->p_wchan) 161: unsleep(p); 162: p->p_stat = SRUN; 163: goto resume; 164: } 165: if (p->p_wchan == 0) 166: { 167: catch = 0; 168: goto resume; 169: } 170: } 171: else 172: sig = 0; 173: p->p_stat = SSLEEP; 174: u.u_ru.ru_nvcsw++; 175: swtch(); 176: resume: 177: splx(s); 178: p->p_flag &= ~P_SINTR; 179: if (p->p_flag & P_TIMEOUT) 180: { 181: p->p_flag &= ~P_TIMEOUT; 182: if (sig == 0) 183: return(EWOULDBLOCK); 184: } 185: else if (timo) 186: untimeout(endtsleep, (caddr_t)p); 187: if (catch && (sig != 0 || (sig = CURSIG(p)))) 188: { 189: if (u.u_sigintr & sigmask(sig)) 190: return(EINTR); 191: return(ERESTART); 192: } 193: return(0); 194: } 195: 196: /* 197: * Implement timeout for tsleep above. If process hasn't been awakened 198: * (p_wchan non zero) then set timeout flag and undo the sleep. If proc 199: * is stopped just unsleep so it will remain stopped. 200: */ 201: 202: void 203: endtsleep(p) 204: register struct proc *p; 205: { 206: register int s; 207: 208: s = splhigh(); 209: if (p->p_wchan) 210: { 211: if (p->p_stat == SSLEEP) 212: setrun(p); 213: else 214: unsleep(p); 215: p->p_flag |= P_TIMEOUT; 216: } 217: splx(s); 218: } 219: 220: /* 221: * Give up the processor till a wakeup occurs on chan, at which time the 222: * process enters the scheduling queue at priority pri. 223: * 224: * This routine was rewritten to use 'tsleep'. The old behaviour of sleep 225: * being interruptible (if 'pri>PZERO') is emulated by setting PCATCH and 226: * then performing the 'longjmp' if the return value of 'tsleep' is 227: * ERESTART. 228: * 229: * Callers of this routine must be prepared for premature return, and check 230: * that the reason for sleeping has gone away. 231: */ 232: sleep(chan, pri) 233: caddr_t chan; 234: int pri; 235: { 236: register int priority = pri; 237: 238: if (pri > PZERO) 239: priority |= PCATCH; 240: 241: u.u_error = tsleep(chan, priority, 0); 242: /* 243: * sleep does not return anything. If it was a non-interruptible sleep _or_ 244: * a successful/normal sleep (one for which a wakeup was done) then return. 245: */ 246: if ((priority & PCATCH) == 0 || (u.u_error == 0)) 247: return; 248: /* 249: * XXX - compatibility uglyness. 250: * 251: * The tsleep() above will leave one of the following in u_error: 252: * 253: * 0 - a wakeup was done, this is handled above 254: * EWOULDBLOCK - since no timeout was passed to tsleep we will not see this 255: * EINTR - put into u_error for trap.c to find (interrupted syscall) 256: * ERESTART - system call to be restared 257: */ 258: longjmp(u.u_procp->p_addr, &u.u_qsave); 259: /*NOTREACHED*/ 260: } 261: 262: /* 263: * Remove a process from its wait queue 264: */ 265: unsleep(p) 266: register struct proc *p; 267: { 268: register struct proc **hp; 269: register int s; 270: 271: s = splhigh(); 272: if (p->p_wchan) { 273: hp = &slpque[HASH(p->p_wchan)]; 274: while (*hp != p) 275: hp = &(*hp)->p_link; 276: *hp = p->p_link; 277: p->p_wchan = 0; 278: } 279: splx(s); 280: } 281: 282: /* 283: * Wake up all processes sleeping on chan. 284: */ 285: wakeup(chan) 286: register caddr_t chan; 287: { 288: register struct proc *p, **q; 289: struct proc **qp; 290: int s; 291: mapinfo map; 292: 293: /* 294: * Since we are called at interrupt time, must insure normal 295: * kernel mapping to access proc. 296: */ 297: savemap(map); 298: s = splclock(); 299: qp = &slpque[HASH(chan)]; 300: restart: 301: for (q = qp; p = *q; ) { 302: if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 303: panic("wakeup"); 304: if (p->p_wchan==chan) { 305: p->p_wchan = 0; 306: *q = p->p_link; 307: if (p->p_stat == SSLEEP) { 308: /* OPTIMIZED INLINE EXPANSION OF setrun(p) */ 309: if (p->p_slptime > 1) 310: updatepri(p); 311: p->p_slptime = 0; 312: p->p_stat = SRUN; 313: if (p->p_flag & SLOAD) 314: setrq(p); 315: /* 316: * Since curpri is a usrpri, 317: * p->p_pri is always better than curpri. 318: */ 319: runrun++; 320: if ((p->p_flag&SLOAD) == 0) { 321: if (runout != 0) { 322: runout = 0; 323: wakeup((caddr_t)&runout); 324: } 325: } 326: /* END INLINE EXPANSION */ 327: goto restart; 328: } 329: p->p_slptime = 0; 330: } else 331: q = &p->p_link; 332: } 333: splx(s); 334: restormap(map); 335: } 336: 337: /* 338: * Set the process running; 339: * arrange for it to be swapped in if necessary. 340: */ 341: setrun(p) 342: register struct proc *p; 343: { 344: register int s; 345: 346: s = splhigh(); 347: switch (p->p_stat) { 348: 349: case 0: 350: case SWAIT: 351: case SRUN: 352: case SZOMB: 353: default: 354: panic("setrun"); 355: 356: case SSTOP: 357: case SSLEEP: 358: unsleep(p); /* e.g. when sending signals */ 359: break; 360: 361: case SIDL: 362: break; 363: } 364: if (p->p_slptime > 1) 365: updatepri(p); 366: p->p_stat = SRUN; 367: if (p->p_flag & SLOAD) 368: setrq(p); 369: splx(s); 370: if (p->p_pri < curpri) 371: runrun++; 372: if ((p->p_flag&SLOAD) == 0) { 373: if (runout != 0) { 374: runout = 0; 375: wakeup((caddr_t)&runout); 376: } 377: } 378: } 379: 380: /* 381: * Set user priority. 382: * The rescheduling flag (runrun) 383: * is set if the priority is better 384: * than the currently running process. 385: */ 386: setpri(pp) 387: register struct proc *pp; 388: { 389: register int p; 390: 391: p = (pp->p_cpu & 0377)/16; 392: p += PUSER + pp->p_nice; 393: if (p > 127) 394: p = 127; 395: if (p < curpri) 396: runrun++; 397: pp->p_pri = p; 398: return (p); 399: } 400: 401: /* 402: * This routine is called to reschedule the CPU. If the calling process is 403: * not in RUN state, arrangements for it to restart must have been made 404: * elsewhere, usually by calling via sleep. There is a race here. A process 405: * may become ready after it has been examined. In this case, idle() will be 406: * called and will return in at most 1hz time, e.g. it's not worth putting an 407: * spl() in. 408: */ 409: swtch() 410: { 411: register struct proc *p, *q; 412: register int n; 413: struct proc *pp, *pq; 414: int s; 415: 416: #ifdef DIAGNOSTIC 417: extern struct buf *hasmap; 418: if (hasmap) 419: panic("swtch hasmap"); 420: #endif 421: #ifdef UCB_METER 422: cnt.v_swtch++; 423: #endif 424: /* If not the idle process, resume the idle process. */ 425: if (u.u_procp != &proc[0]) { 426: if (setjmp(&u.u_rsave)) { 427: sureg(); 428: return; 429: } 430: if (u.u_fpsaved == 0) { 431: savfp(&u.u_fps); 432: u.u_fpsaved = 1; 433: } 434: longjmp(proc[0].p_addr, &u.u_qsave); 435: } 436: /* 437: * The first save returns nonzero when proc 0 is resumed 438: * by another process (above); then the second is not done 439: * and the process-search loop is entered. 440: * 441: * The first save returns 0 when swtch is called in proc 0 442: * from sched(). The second save returns 0 immediately, so 443: * in this case too the process-search loop is entered. 444: * Thus when proc 0 is awakened by being made runnable, it will 445: * find itself and resume itself at rsave, and return to sched(). 446: */ 447: if (setjmp(&u.u_qsave)==0 && setjmp(&u.u_rsave)) 448: return; 449: loop: 450: s = splhigh(); 451: noproc = 0; 452: runrun = 0; 453: #ifdef DIAGNOSTIC 454: for (p = qs; p; p = p->p_link) 455: if (p->p_stat != SRUN) 456: panic("swtch SRUN"); 457: #endif 458: pp = NULL; 459: q = NULL; 460: n = 128; 461: /* 462: * search for highest-priority runnable process 463: */ 464: for (p = qs; p; p = p->p_link) { 465: if (p->p_flag & SLOAD && p->p_pri < n) { 466: pp = p; 467: pq = q; 468: n = p->p_pri; 469: } 470: q = p; 471: } 472: /* 473: * if no process is runnable, idle. 474: */ 475: p = pp; 476: if (p == NULL) { 477: idle(); 478: goto loop; 479: } 480: if (pq) 481: pq->p_link = p->p_link; 482: else 483: qs = p->p_link; 484: curpri = n; 485: splx(s); 486: /* 487: * the rsave (ssave) contents are interpreted 488: * in the new address space 489: */ 490: n = p->p_flag & SSWAP; 491: p->p_flag &= ~SSWAP; 492: longjmp(p->p_addr, n ? &u.u_ssave: &u.u_rsave); 493: } 494: 495: setrq(p) 496: register struct proc *p; 497: { 498: register int s; 499: 500: s = splhigh(); 501: #ifdef DIAGNOSTIC 502: { /* see if already on the run queue */ 503: register struct proc *q; 504: 505: for (q = qs;q != NULL;q = q->p_link) 506: if (q == p) 507: panic("setrq"); 508: } 509: #endif 510: p->p_link = qs; 511: qs = p; 512: splx(s); 513: } 514: 515: /* 516: * Remove runnable job from run queue. This is done when a runnable job 517: * is swapped out so that it won't be selected in swtch(). It will be 518: * reinserted in the qs with setrq when it is swapped back in. 519: */ 520: remrq(p) 521: register struct proc *p; 522: { 523: register struct proc *q; 524: register int s; 525: 526: s = splhigh(); 527: if (p == qs) 528: qs = p->p_link; 529: else { 530: for (q = qs; q; q = q->p_link) 531: if (q->p_link == p) { 532: q->p_link = p->p_link; 533: goto done; 534: } 535: panic("remrq"); 536: } 537: done: 538: splx(s); 539: }