1: /* 2: * Copyright (c) 1986 Regents of the University of California. 3: * All rights reserved. The Berkeley software License Agreement 4: * specifies the terms and conditions for redistribution. 5: * 6: * @(#)kern_clock.c 1.4 (2.11BSD GTE) 1997/2/14 7: */ 8: 9: #include "param.h" 10: #include "../machine/psl.h" 11: #include "../machine/seg.h" 12: 13: #include "user.h" 14: #include "proc.h" 15: #include "callout.h" 16: #include "dk.h" 17: #include "kernel.h" 18: #include "systm.h" 19: 20: /* 21: * The hz hardware interval timer. 22: * We update the events relating to real time. 23: * Also gather statistics. 24: * 25: * reprime clock 26: * implement callouts 27: * maintain user/system times 28: * maintain date 29: * profile 30: */ 31: /*ARGSUSED*/ 32: hardclock(dev,sp,r1,ov,nps,r0,pc,ps) 33: dev_t dev; 34: caddr_t sp, pc; 35: int r1, ov, nps, r0, ps; 36: { 37: register struct callout *p1; 38: register struct proc *p; 39: register int needsoft = 0; 40: mapinfo map; 41: 42: savemap(map); /* ensure normal mapping of kernel data */ 43: 44: /* 45: * Update real-time timeout queue. 46: * At front of queue are some number of events which are ``due''. 47: * The time to these is <= 0 and if negative represents the 48: * number of ticks which have passed since it was supposed to happen. 49: * The rest of the q elements (times > 0) are events yet to happen, 50: * where the time for each is given as a delta from the previous. 51: * Decrementing just the first of these serves to decrement the time 52: * to all events. 53: */ 54: p1 = calltodo.c_next; 55: while (p1) { 56: if (--p1->c_time > 0) 57: break; 58: needsoft = 1; 59: if (p1->c_time == 0) 60: break; 61: p1 = p1->c_next; 62: } 63: 64: /* 65: * Charge the time out based on the mode the cpu is in. 66: * Here again we fudge for the lack of proper interval timers 67: * assuming that the current state has been around at least 68: * one tick. 69: */ 70: if (USERMODE(ps)) { 71: if (u.u_prof.pr_scale) 72: needsoft = 1; 73: /* 74: * CPU was in user state. Increment 75: * user time counter, and process process-virtual time 76: * interval timer. 77: */ 78: u.u_ru.ru_utime++; 79: if (u.u_timer[ITIMER_VIRTUAL - 1].it_value && 80: !--u.u_timer[ITIMER_VIRTUAL - 1].it_value) { 81: psignal(u.u_procp, SIGVTALRM); 82: u.u_timer[ITIMER_VIRTUAL - 1].it_value = 83: u.u_timer[ITIMER_VIRTUAL - 1].it_interval; 84: } 85: } else { 86: /* 87: * CPU was in system state. 88: */ 89: if (!noproc) 90: u.u_ru.ru_stime++; 91: } 92: 93: /* 94: * If the cpu is currently scheduled to a process, then 95: * charge it with resource utilization for a tick, updating 96: * statistics which run in (user+system) virtual time, 97: * such as the cpu time limit and profiling timers. 98: * This assumes that the current process has been running 99: * the entire last tick. 100: */ 101: if (noproc == 0) { 102: p = u.u_procp; 103: if (++p->p_cpu == 0) 104: p->p_cpu--; 105: if ((u.u_ru.ru_utime+u.u_ru.ru_stime+1) > 106: u.u_rlimit[RLIMIT_CPU].rlim_cur) { 107: psignal(p, SIGXCPU); 108: if (u.u_rlimit[RLIMIT_CPU].rlim_cur < 109: u.u_rlimit[RLIMIT_CPU].rlim_max) 110: u.u_rlimit[RLIMIT_CPU].rlim_cur += 5 * hz; 111: } 112: if (u.u_timer[ITIMER_PROF - 1].it_value && 113: !--u.u_timer[ITIMER_PROF - 1].it_value) { 114: psignal(p, SIGPROF); 115: u.u_timer[ITIMER_PROF - 1].it_value = 116: u.u_timer[ITIMER_PROF - 1].it_interval; 117: } 118: } 119: 120: #ifdef UCB_METER 121: gatherstats(pc,ps); 122: #endif 123: 124: /* 125: * Increment the time-of-day, process callouts at a very 126: * low cpu priority, so we don't keep the relatively high 127: * clock interrupt priority any longer than necessary. 128: */ 129: if (adjdelta) 130: if (adjdelta > 0) { 131: ++lbolt; 132: --adjdelta; 133: } else { 134: --lbolt; 135: ++adjdelta; 136: } 137: if (++lbolt >= hz) { 138: lbolt -= hz; 139: ++time.tv_sec; 140: } 141: 142: if (needsoft && BASEPRI(ps)) { /* if ps is high, just return */ 143: (void) _splsoftclock(); 144: softclock(pc,ps); 145: } 146: restormap(map); 147: } 148: 149: #ifdef UCB_METER 150: int dk_ndrive = DK_NDRIVE; 151: /* 152: * Gather statistics on resource utilization. 153: * 154: * We make a gross assumption: that the system has been in the 155: * state it is in (user state, kernel state, interrupt state, 156: * or idle state) for the entire last time interval, and 157: * update statistics accordingly. 158: */ 159: /*ARGSUSED*/ 160: gatherstats(pc, ps) 161: caddr_t pc; 162: int ps; 163: { 164: register int cpstate, s; 165: 166: /* 167: * Determine what state the cpu is in. 168: */ 169: if (USERMODE(ps)) { 170: /* 171: * CPU was in user state. 172: */ 173: if (u.u_procp->p_nice > NZERO) 174: cpstate = CP_NICE; 175: else 176: cpstate = CP_USER; 177: } else { 178: /* 179: * CPU was in system state. If profiling kernel 180: * increment a counter. If no process is running 181: * then this is a system tick if we were running 182: * at a non-zero IPL (in a driver). If a process is running, 183: * then we charge it with system time even if we were 184: * at a non-zero IPL, since the system often runs 185: * this way during processing of system calls. 186: * This is approximate, but the lack of true interval 187: * timers makes doing anything else difficult. 188: */ 189: cpstate = CP_SYS; 190: if (noproc && BASEPRI(ps)) 191: cpstate = CP_IDLE; 192: } 193: /* 194: * We maintain statistics shown by user-level statistics 195: * programs: the amount of time in each cpu state, and 196: * the amount of time each of DK_NDRIVE ``drives'' is busy. 197: */ 198: cp_time[cpstate]++; 199: for (s = 0; s < DK_NDRIVE; s++) 200: if (dk_busy & (1 << s)) 201: dk_time[s]++; 202: } 203: #endif UCB_METER 204: 205: /* 206: * Software priority level clock interrupt. 207: * Run periodic events from timeout queue. 208: */ 209: softclock(pc, ps) 210: caddr_t pc; 211: int ps; 212: { 213: for (;;) { 214: register struct callout *p1; 215: register caddr_t arg; 216: register int (*func)(); 217: register int a, s; 218: 219: s = splhigh(); 220: if ((p1 = calltodo.c_next) == 0 || p1->c_time > 0) { 221: splx(s); 222: break; 223: } 224: arg = p1->c_arg; func = p1->c_func; a = p1->c_time; 225: calltodo.c_next = p1->c_next; 226: p1->c_next = callfree; 227: callfree = p1; 228: splx(s); 229: #ifdef INET 230: if (ISSUPERADD(func)) 231: KScall(KERNELADD(func), sizeof(arg) + sizeof(a), 232: arg, a); 233: else 234: #endif 235: (*func)(arg, a); 236: } 237: /* 238: * If trapped user-mode and profiling, give it 239: * a profiling tick. 240: */ 241: if (USERMODE(ps)) { 242: register struct proc *p = u.u_procp; 243: 244: if (u.u_prof.pr_scale) 245: addupc(pc, &u.u_prof, 1); 246: /* 247: * Check to see if process has accumulated 248: * more than 10 minutes of user time. If so 249: * reduce priority to give others a chance. 250: */ 251: 252: if (p->p_uid && p->p_nice == NZERO && 253: u.u_ru.ru_utime > 10L * 60L * hz) { 254: p->p_nice = NZERO+4; 255: (void) setpri(p); 256: } 257: } 258: } 259: 260: /* 261: * Arrange that (*fun)(arg) is called in t/hz seconds. 262: */ 263: timeout(fun, arg, t) 264: int (*fun)(); 265: caddr_t arg; 266: register int t; 267: { 268: register struct callout *p1, *p2, *pnew; 269: register int s = splclock(); 270: 271: if (t <= 0) 272: t = 1; 273: pnew = callfree; 274: if (pnew == NULL) 275: panic("timeout table overflow"); 276: callfree = pnew->c_next; 277: pnew->c_arg = arg; 278: pnew->c_func = fun; 279: for (p1 = &calltodo; (p2 = p1->c_next) && p2->c_time < t; p1 = p2) 280: if (p2->c_time > 0) 281: t -= p2->c_time; 282: p1->c_next = pnew; 283: pnew->c_next = p2; 284: pnew->c_time = t; 285: if (p2) 286: p2->c_time -= t; 287: splx(s); 288: } 289: 290: /* 291: * untimeout is called to remove a function timeout call 292: * from the callout structure. 293: */ 294: untimeout(fun, arg) 295: int (*fun)(); 296: caddr_t arg; 297: { 298: register struct callout *p1, *p2; 299: register int s; 300: 301: s = splclock(); 302: for (p1 = &calltodo; (p2 = p1->c_next) != 0; p1 = p2) { 303: if (p2->c_func == fun && p2->c_arg == arg) { 304: if (p2->c_next && p2->c_time > 0) 305: p2->c_next->c_time += p2->c_time; 306: p1->c_next = p2->c_next; 307: p2->c_next = callfree; 308: callfree = p2; 309: break; 310: } 311: } 312: splx(s); 313: } 314: 315: profil() 316: { 317: register struct a { 318: short *bufbase; 319: unsigned bufsize; 320: unsigned pcoffset; 321: unsigned pcscale; 322: } *uap = (struct a *)u.u_ap; 323: register struct uprof *upp = &u.u_prof; 324: 325: upp->pr_base = uap->bufbase; 326: upp->pr_size = uap->bufsize; 327: upp->pr_off = uap->pcoffset; 328: upp->pr_scale = uap->pcscale; 329: } 330: 331: /* 332: * Compute number of hz until specified time. 333: * Used to compute third argument to timeout() from an 334: * absolute time. 335: */ 336: hzto(tv) 337: register struct timeval *tv; 338: { 339: register long ticks; 340: register long sec; 341: register int s = splhigh(); 342: 343: /* 344: * If number of milliseconds will fit in 32 bit arithmetic, 345: * then compute number of milliseconds to time and scale to 346: * ticks. Otherwise just compute number of hz in time, rounding 347: * times greater than representible to maximum value. 348: * 349: * Delta times less than 25 days can be computed ``exactly''. 350: * Maximum value for any timeout in 10ms ticks is 250 days. 351: */ 352: sec = tv->tv_sec - time.tv_sec; 353: if (sec <= 0x7fffffff / 1000 - 1000) 354: ticks = ((tv->tv_sec - time.tv_sec) * 1000 + 355: (tv->tv_usec - time.tv_usec) / 1000) / (1000/hz); 356: else if (sec <= 0x7fffffff / hz) 357: ticks = sec * hz; 358: else 359: ticks = 0x7fffffff; 360: splx(s); 361: #ifdef pdp11 362: /* stored in an "int", so 16-bit max */ 363: if (ticks > 0x7fff) 364: ticks = 0x7fff; 365: #endif 366: return ((int)ticks); 367: }