1: /*
   2:  *	SCCS id	@(#)bio.c	2.1 (Berkeley)	8/5/83
   3:  */
   4: 
   5: #include "param.h"
   6: #include <sys/systm.h>
   7: #include <sys/dir.h>
   8: #include <sys/user.h>
   9: #include <sys/buf.h>
  10: #include <sys/conf.h>
  11: #include <sys/proc.h>
  12: #include <sys/seg.h>
  13: #ifdef UCB_METER
  14: #include <sys/vm.h>
  15: #endif
  16: #ifdef  UNIBUS_MAP
  17: #include <sys/uba.h>
  18: #endif
  19: 
  20: #ifdef  DISKMON
  21: struct  ioinfo  io_info;
  22: #endif
  23: 
  24: /*
  25:  * swap IO headers.
  26:  * they are filled in to point
  27:  * at the desired IO operation.
  28:  */
  29: struct  buf swbuf1;
  30: struct  buf swbuf2;
  31: 
  32: /*
  33:  * The following several routines allocate and free
  34:  * buffers with various side effects.  In general the
  35:  * arguments to an allocate routine are a device and
  36:  * a block number, and the value is a pointer to
  37:  * to the buffer header; the buffer is marked "busy"
  38:  * so that no one else can touch it.  If the block was
  39:  * already in core, no I/O need be done; if it is
  40:  * already busy, the process waits until it becomes free.
  41:  * The following routines allocate a buffer:
  42:  *	getblk
  43:  *	bread
  44:  *	breada
  45:  * Eventually the buffer must be released, possibly with the
  46:  * side effect of writing it out, by using one of
  47:  *	bwrite
  48:  *	bdwrite
  49:  *	bawrite
  50:  *	brelse
  51:  */
  52: 
  53: #ifdef  UCB_BHASH
  54: #ifdef  SMALL
  55: #define BUFHSZ  8   /* must be power of 2 */
  56: #else
  57: #define BUFHSZ  64  /* must be power of 2 */
  58: #endif	SMALL
  59: #define BUFHASH(blkno)  (blkno & (BUFHSZ-1))
  60: 
  61: struct  buf *bhash[BUFHSZ];
  62: #endif
  63: 
  64: /*
  65:  * Read in (if necessary) the block and return a buffer pointer.
  66:  */
  67: struct buf *
  68: bread(dev, blkno)
  69: register dev_t dev;
  70: daddr_t blkno;
  71: {
  72:     register struct buf *bp;
  73: 
  74:     bp = getblk(dev, blkno);
  75:     if (bp->b_flags&B_DONE) {
  76: #ifdef  DISKMON
  77:         io_info.ncache++;
  78: #endif
  79:         return(bp);
  80:     }
  81:     bp->b_flags |= B_READ;
  82:     bp->b_bcount = BSIZE;
  83:     (void) (*bdevsw[major(dev)].d_strategy)(bp);
  84: #ifdef  DISKMON
  85:     io_info.nread++;
  86: #endif
  87:     iowait(bp);
  88:     return(bp);
  89: }
  90: 
  91: /*
  92:  * Read in the block, like bread, but also start I/O on the
  93:  * read-ahead block (which is not allocated to the caller)
  94:  */
  95: struct buf *
  96: breada(dev, blkno, rablkno)
  97: register dev_t dev;
  98: daddr_t blkno, rablkno;
  99: {
 100:     register struct buf *bp, *rabp;
 101: 
 102:     bp = NULL;
 103:     if (!incore(dev, blkno)) {
 104:         bp = getblk(dev, blkno);
 105:         if ((bp->b_flags&B_DONE) == 0) {
 106:             bp->b_flags |= B_READ;
 107:             bp->b_bcount = BSIZE;
 108:             (void) (*bdevsw[major(dev)].d_strategy)(bp);
 109: #ifdef  DISKMON
 110:             io_info.nread++;
 111: #endif
 112:         }
 113:     }
 114:     if (rablkno && !incore(dev, rablkno)) {
 115:         rabp = getblk(dev, rablkno);
 116:         if (rabp->b_flags & B_DONE)
 117:             brelse(rabp);
 118:         else {
 119:             rabp->b_flags |= B_READ|B_ASYNC;
 120:             rabp->b_bcount = BSIZE;
 121:             (void) (*bdevsw[major(dev)].d_strategy)(rabp);
 122: #ifdef  DISKMON
 123:             io_info.nreada++;
 124: #endif
 125:         }
 126:     }
 127:     if(bp == NULL)
 128:         return(bread(dev, blkno));
 129:     iowait(bp);
 130:     return(bp);
 131: }
 132: 
 133: /*
 134:  * Write the buffer, waiting for completion.
 135:  * Then release the buffer.
 136:  */
 137: bwrite(bp)
 138: register struct buf *bp;
 139: {
 140:     register flag;
 141: 
 142:     flag = bp->b_flags;
 143:     bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
 144:     bp->b_bcount = BSIZE;
 145: #ifdef  DISKMON
 146:     io_info.nwrite++;
 147: #endif
 148:     (void) (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
 149:     if ((flag&B_ASYNC) == 0) {
 150:         iowait(bp);
 151:         brelse(bp);
 152:     } else if (flag & B_DELWRI)
 153:         bp->b_flags |= B_AGE;
 154: }
 155: 
 156: /*
 157:  * Release the buffer, marking it so that if it is grabbed
 158:  * for another purpose it will be written out before being
 159:  * given up (e.g. when writing a partial block where it is
 160:  * assumed that another write for the same block will soon follow).
 161:  * This can't be done for magtape, since writes must be done
 162:  * in the same order as requested.
 163:  */
 164: bdwrite(bp)
 165: register struct buf *bp;
 166: {
 167:     register struct buf *dp;
 168: 
 169:     dp = bdevsw[major(bp->b_dev)].d_tab;
 170:     if(dp->b_flags & B_TAPE)
 171:         bawrite(bp);
 172:     else {
 173:         bp->b_flags |= B_DELWRI | B_DONE;
 174:         brelse(bp);
 175:     }
 176: }
 177: 
 178: /*
 179:  * Release the buffer, start I/O on it, but don't wait for completion.
 180:  */
 181: bawrite(bp)
 182: register struct buf *bp;
 183: {
 184: 
 185:     bp->b_flags |= B_ASYNC;
 186:     bwrite(bp);
 187: }
 188: 
 189: /*
 190:  * release the buffer, with no I/O implied.
 191:  */
 192: brelse(bp)
 193: register struct buf *bp;
 194: {
 195:     register struct buf **backp;
 196:     register s;
 197: 
 198:     if (bp->b_flags&B_WANTED)
 199:         wakeup((caddr_t)bp);
 200:     if (bfreelist.b_flags&B_WANTED) {
 201:         bfreelist.b_flags &= ~B_WANTED;
 202:         wakeup((caddr_t)&bfreelist);
 203:     }
 204:     if (bp->b_flags&B_ERROR) {
 205: #ifdef UCB_BHASH
 206:         bunhash(bp);
 207: #endif
 208:         bp->b_dev = NODEV;   /* no assoc. on error */
 209:     }
 210:     s = spl6();
 211:     if(bp->b_flags & B_AGE) {
 212:         backp = &bfreelist.av_forw;
 213:         (*backp)->av_back = bp;
 214:         bp->av_forw = *backp;
 215:         *backp = bp;
 216:         bp->av_back = &bfreelist;
 217:     } else {
 218:         backp = &bfreelist.av_back;
 219:         (*backp)->av_forw = bp;
 220:         bp->av_back = *backp;
 221:         *backp = bp;
 222:         bp->av_forw = &bfreelist;
 223:     }
 224:     bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE);
 225:     splx(s);
 226: }
 227: 
 228: /*
 229:  * See if the block is associated with some buffer
 230:  * (mainly to avoid getting hung up on a wait in breada)
 231:  */
 232: incore(dev, blkno)
 233: register dev_t dev;
 234: daddr_t blkno;
 235: {
 236:     register struct buf *bp;
 237: #ifndef UCB_BHASH
 238:     register struct buf *dp;
 239: #endif
 240: 
 241: #ifdef  UCB_BHASH
 242:     bp = bhash[BUFHASH(blkno)];
 243:     blkno = fsbtodb(blkno);
 244:     for(; bp != NULL; bp = bp->b_link)
 245: #else
 246:     dp = bdevsw[major(dev)].d_tab;
 247:     blkno = fsbtodb(blkno);
 248:     for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
 249: #endif
 250:         if (bp->b_blkno == blkno && bp->b_dev == dev)
 251:             return(1);
 252:     return(0);
 253: }
 254: 
 255: /*
 256:  * Assign a buffer for the given block.  If the appropriate
 257:  * block is already associated, return it; otherwise search
 258:  * for the oldest non-busy buffer and reassign it.
 259:  */
 260: struct buf *
 261: getblk(dev, blkno)
 262: dev_t dev;
 263: daddr_t blkno;
 264: {
 265:     register struct buf *bp;
 266:     register struct buf *dp;
 267: #ifdef UCB_BHASH
 268:     register int j;
 269: #endif
 270:     daddr_t dblkno;
 271: 
 272:     if(major(dev) >= nblkdev)
 273:         panic("blkdev");
 274: 
 275:     dp = bdevsw[major(dev)].d_tab;
 276:     if(dp == NULL)
 277:         panic("devtab");
 278:     loop:
 279:     (void) _spl0();
 280: #ifdef  UCB_BHASH
 281:     j = BUFHASH(blkno);
 282:     bp = bhash[j];
 283:     dblkno  = fsbtodb(blkno);
 284:     for(; bp != NULL; bp = bp->b_link)
 285: #else
 286:     for (bp=dp->b_forw; bp != dp; bp = bp->b_forw)
 287: #endif
 288:     {
 289:         if (bp->b_blkno != dblkno || bp->b_dev != dev)
 290:             continue;
 291:         (void) _spl6();
 292:         if (bp->b_flags&B_BUSY) {
 293:             bp->b_flags |= B_WANTED;
 294:             sleep((caddr_t)bp, PRIBIO+1);
 295:             goto loop;
 296:         }
 297:         (void) _spl0();
 298:         notavail(bp);
 299:         return(bp);
 300:     }
 301:     (void) _spl6();
 302:     if (bfreelist.av_forw == &bfreelist) {
 303:         bfreelist.b_flags |= B_WANTED;
 304:         sleep((caddr_t)&bfreelist, PRIBIO+1);
 305:         goto loop;
 306:     }
 307:     (void) _spl0();
 308:     notavail(bp = bfreelist.av_forw);
 309:     if (bp->b_flags & B_DELWRI) {
 310:         bawrite(bp);
 311:         goto loop;
 312:     }
 313: #ifdef UCB_BHASH
 314:     bunhash(bp);
 315: #endif
 316:     bp->b_flags = B_BUSY;
 317:     bp->b_back->b_forw = bp->b_forw;
 318:     bp->b_forw->b_back = bp->b_back;
 319:     bp->b_forw = dp->b_forw;
 320:     bp->b_back = dp;
 321:     dp->b_forw->b_back = bp;
 322:     dp->b_forw = bp;
 323:     bp->b_dev = dev;
 324:     bp->b_blkno = dblkno;
 325: #ifdef UCB_BHASH
 326:     bp->b_link = bhash[j];
 327:     bhash[j] = bp;
 328: #endif
 329:     return(bp);
 330: }
 331: 
 332: /*
 333:  * Get a block not assigned to any device
 334:  */
 335: struct buf *
 336: geteblk()
 337: {
 338:     register struct buf *bp;
 339:     register struct buf *dp;
 340: 
 341: loop:
 342:     (void) _spl6();
 343:     while (bfreelist.av_forw == &bfreelist) {
 344:         bfreelist.b_flags |= B_WANTED;
 345:         sleep((caddr_t)&bfreelist, PRIBIO+1);
 346:     }
 347:     (void) _spl0();
 348:     dp = &bfreelist;
 349:     notavail(bp = bfreelist.av_forw);
 350:     if (bp->b_flags & B_DELWRI) {
 351:         bp->b_flags |= B_ASYNC;
 352:         bwrite(bp);
 353:         goto loop;
 354:     }
 355: #ifdef UCB_BHASH
 356:     bunhash(bp);
 357: #endif
 358:     bp->b_flags = B_BUSY;
 359:     bp->b_back->b_forw = bp->b_forw;
 360:     bp->b_forw->b_back = bp->b_back;
 361:     bp->b_forw = dp->b_forw;
 362:     bp->b_back = dp;
 363:     dp->b_forw->b_back = bp;
 364:     dp->b_forw = bp;
 365:     bp->b_dev = (dev_t)NODEV;
 366: #ifdef UCB_BHASH
 367:     bp->b_link = NULL;
 368: #endif
 369:     return(bp);
 370: }
 371: 
 372: #ifdef UCB_BHASH
 373: bunhash(bp)
 374: register struct buf *bp;
 375: {
 376:     register struct buf *ep;
 377:     register int i;
 378: 
 379:     if (bp->b_dev == NODEV)
 380:         return;
 381:     i = BUFHASH(dbtofsb(bp->b_blkno));
 382:     ep = bhash[i];
 383:     if (ep == bp) {
 384:         bhash[i] = bp->b_link;
 385:         return;
 386:     }
 387:     for(; ep != NULL; ep = ep->b_link)
 388:         if (ep->b_link == bp) {
 389:             ep->b_link = bp->b_link;
 390:             return;
 391:         }
 392:     panic("bunhash");
 393: }
 394: #endif
 395: 
 396: /*
 397:  * Wait for I/O completion on the buffer; return errors
 398:  * to the user.
 399:  */
 400: iowait(bp)
 401: register struct buf *bp;
 402: {
 403: 
 404:     (void) _spl6();
 405:     while ((bp->b_flags&B_DONE)==0)
 406:         sleep((caddr_t)bp, PRIBIO);
 407:     (void) _spl0();
 408:     geterror(bp);
 409: }
 410: 
 411: /*
 412:  * Unlink a buffer from the available list and mark it busy.
 413:  * (internal interface)
 414:  */
 415: notavail(bp)
 416: register struct buf *bp;
 417: {
 418:     register s;
 419: 
 420:     s = spl6();
 421:     bp->av_back->av_forw = bp->av_forw;
 422:     bp->av_forw->av_back = bp->av_back;
 423:     bp->b_flags |= B_BUSY;
 424:     splx(s);
 425: }
 426: 
 427: /*
 428:  * Mark I/O complete on a buffer, release it if I/O is asynchronous,
 429:  * and wake up anyone waiting for it.
 430:  */
 431: iodone(bp)
 432: register struct buf *bp;
 433: {
 434: #ifdef  UNIBUS_MAP
 435:     if(bp->b_flags & (B_MAP|B_UBAREMAP))
 436:         mapfree(bp);
 437: #endif
 438:     bp->b_flags |= B_DONE;
 439:     if (bp->b_flags&B_ASYNC)
 440:         brelse(bp);
 441:     else {
 442:         bp->b_flags &= ~B_WANTED;
 443:         wakeup((caddr_t)bp);
 444:     }
 445: }
 446: 
 447: /*
 448:  * Zero the core associated with a buffer.
 449:  * Since this routine calls mapin,
 450:  * it cannot be called from interrupt routines.
 451:  */
 452: clrbuf(bp)
 453: register struct buf *bp;
 454: {
 455:     register *p;
 456:     register c;
 457: 
 458:     p = (int *) mapin(bp);
 459:     c = (BSIZE/sizeof(int)) >> 2;
 460:     do {
 461:         *p++ = 0;
 462:         *p++ = 0;
 463:         *p++ = 0;
 464:         *p++ = 0;
 465:     } while (--c);
 466:     bp->b_resid = 0;
 467:     mapout(bp);
 468: }
 469: 
 470: /*
 471:  * swap I/O
 472:  */
 473: swap(blkno, coreaddr, count, rdflg)
 474: memaddr blkno, coreaddr;
 475: register count;
 476: {
 477:     register struct buf *bp;
 478:     register tcount;
 479: 
 480: #ifdef UCB_METER
 481:     if (rdflg) {
 482:         cnt.v_pswpin += count;
 483:         cnt.v_swpin++;
 484:     } else {
 485:         cnt.v_pswpout += count;
 486:         cnt.v_swpout++;
 487:     }
 488: #endif
 489:     bp = &swbuf1;
 490:     if(bp->b_flags & B_BUSY)
 491:         if((swbuf2.b_flags&B_WANTED) == 0)
 492:             bp = &swbuf2;
 493:     (void) _spl6();
 494:     while (bp->b_flags&B_BUSY) {
 495:         bp->b_flags |= B_WANTED;
 496:         sleep((caddr_t)bp, PSWP+1);
 497:     }
 498:     (void) _spl0();
 499:     while (count) {
 500:         bp->b_flags = B_BUSY | B_PHYS | rdflg;
 501:         bp->b_dev = swapdev;
 502:         tcount = count;
 503:         if (tcount >= 01700)    /* prevent byte-count wrap */
 504:             tcount = 01700;
 505:         bp->b_bcount = ctob(tcount);
 506:         bp->b_blkno = swplo+blkno;
 507:         bp->b_un.b_addr = (caddr_t)(coreaddr<<6);
 508:         bp->b_xmem = (coreaddr>>10) & 077;
 509:         (*bdevsw[major(swapdev)].d_strategy)(bp);
 510:         (void) _spl6();
 511:         while((bp->b_flags&B_DONE)==0)
 512:             sleep((caddr_t)bp, PSWP);
 513:         (void) _spl0();
 514:         if ((bp->b_flags & B_ERROR) || bp->b_resid)
 515:             panic("IO err in swap");
 516:         count -= tcount;
 517:         coreaddr += tcount;
 518:         blkno += ctod(tcount);
 519:     }
 520:     if (bp->b_flags&B_WANTED)
 521:         wakeup((caddr_t)bp);
 522:     bp->b_flags &= ~(B_BUSY|B_WANTED);
 523: }
 524: 
 525: /*
 526:  * make sure all write-behind blocks
 527:  * on dev (or NODEV for all)
 528:  * are flushed out.
 529:  * (from umount and update)
 530:  */
 531: bflush(dev)
 532: register dev_t dev;
 533: {
 534:     register struct buf *bp;
 535: 
 536: loop:
 537:     (void) _spl6();
 538:     for (bp = bfreelist.av_forw; bp != &bfreelist; bp = bp->av_forw) {
 539:         if (bp->b_flags&B_DELWRI && (dev == NODEV||dev==bp->b_dev)) {
 540:             bp->b_flags |= B_ASYNC;
 541:             notavail(bp);
 542:             bwrite(bp);
 543:             goto loop;
 544:         }
 545:     }
 546:     (void) _spl0();
 547: }
 548: 
 549: /*
 550:  * Raw I/O. The arguments are
 551:  *	The strategy routine for the device
 552:  *	A buffer, which will always be a special buffer
 553:  *	  header owned exclusively by the device for this purpose
 554:  *	The device number
 555:  *	Read/write flag
 556:  * Essentially all the work is computing physical addresses and
 557:  * validating them.
 558:  *
 559:  * physio broken into smaller routines, 3/81 mjk
 560:  *	chkphys(WORD or BYTE) checks validity of word- or byte-
 561:  *	oriented transfer (for physio or device drivers);
 562:  *	physbuf(strat,bp,rw) fills in the buffer header.
 563:  *
 564:  * physio divided into two functions, 1/83 - Mike Edmonds - Tektronix
 565:  *	Physio divided into separate functions:
 566:  *		physio (for WORD i/o)
 567:  *		bphysio (for BYTE i/o)
 568:  *	This allows byte-oriented devices (such as tape drives)
 569:  *	to write/read odd length blocks.
 570:  */
 571: 
 572: physio(strat, bp, dev, rw)
 573: register struct buf *bp;
 574: int (*strat)();
 575: dev_t dev;
 576: {
 577:     physio1(strat, bp, dev, rw, WORD);
 578: }
 579: 
 580: bphysio(strat, bp, dev, rw)
 581: register struct buf *bp;
 582: int (*strat)();
 583: dev_t dev;
 584: {
 585:     physio1(strat, bp, dev, rw, BYTE);
 586: }
 587: 
 588: physio1(strat, bp, dev, rw, kind)
 589: register struct buf *bp;
 590: int (*strat)();
 591: dev_t dev;
 592: {
 593:     if (chkphys(kind))
 594:         return;
 595:     physbuf(bp,dev,rw);
 596:     u.u_procp->p_flag |= SLOCK;
 597:     (*strat)(bp);
 598:     iowait(bp);
 599:     u.u_procp->p_flag &= ~SLOCK;
 600:     if (bp->b_flags&B_WANTED)
 601:         wakeup((caddr_t)bp);
 602:     bp->b_flags &= ~(B_BUSY|B_WANTED);
 603:     u.u_count = bp->b_resid;
 604: }
 605: 
 606: /*
 607:  * check for validity of physical I/O area
 608:  * (modified from physio to use flag for BYTE-oriented transfers)
 609:  */
 610: chkphys(flag)
 611: {
 612:     register unsigned base;
 613:     register int nb;
 614:     register ts;
 615: 
 616:     base = (unsigned)u.u_base;
 617:     /*
 618: 	 * Check odd base, odd count, and address wraparound
 619: 	 * Odd base and count not allowed if flag=WORD,
 620: 	 * allowed if flag=BYTE.
 621: 	 */
 622:     if (flag==WORD && (base&01 || u.u_count&01))
 623:         goto bad;
 624:     if (base>=base+u.u_count)
 625:         goto bad;
 626:     if (u.u_sep)
 627:         ts = 0;
 628:     else
 629:         ts = (u.u_tsize+127) & ~0177;
 630:     nb = (base>>6) & 01777;
 631:     /*
 632: 	 * Check overlap with text. (ts and nb now
 633: 	 * in 64-byte clicks)
 634: 	 */
 635:     if (nb < ts)
 636:         goto bad;
 637:     /*
 638: 	 * Check that transfer is either entirely in the
 639: 	 * data or in the stack: that is, either
 640: 	 * the end is in the data or the start is in the stack
 641: 	 * (remember wraparound was already checked).
 642: 	 */
 643:     if ((((base+u.u_count)>>6)&01777) >= ts+u.u_dsize && nb < 1024-u.u_ssize)
 644:         goto bad;
 645:     return(0);
 646: 
 647:     bad:
 648:     u.u_error = EFAULT;
 649:     return(-1);
 650: }
 651: 
 652: /*
 653:  * wait for buffer header, then fill it in to do physical I/O.
 654:  */
 655: physbuf(bp,dev,rw)
 656: register struct buf *bp;
 657: dev_t dev;
 658: {
 659:     register int nb;
 660:     register unsigned base;
 661:     register int ts;
 662: 
 663:     base = (unsigned)u.u_base;
 664:     nb = (base>>6) & 01777;
 665: 
 666:     (void) _spl6();
 667:     while (bp->b_flags&B_BUSY) {
 668:         bp->b_flags |= B_WANTED;
 669:         sleep((caddr_t)bp, PRIBIO+1);
 670:     }
 671:     (void) _spl0();
 672:     bp->b_flags = B_BUSY | B_PHYS | rw;
 673:     bp->b_dev = dev;
 674:     /*
 675: 	 * Compute physical address by simulating
 676: 	 * the segmentation hardware.
 677: 	 */
 678:     ts = (u.u_sep? UDSA: UISA)[nb>>7] + (nb&0177);
 679:     bp->b_un.b_addr = (caddr_t)((ts<<6) + (base&077));
 680:     bp->b_xmem = (ts>>10) & 077;
 681:     bp->b_blkno = u.u_offset >> PGSHIFT;
 682:     bp->b_bcount = u.u_count;
 683:     bp->b_error = 0;
 684: }
 685: 
 686: /*
 687:  * Pick up the device's error number and pass it to the user;
 688:  * if there is an error but the number is 0 set a generalized
 689:  * code.
 690:  */
 691: geterror(bp)
 692: register struct buf *bp;
 693: {
 694: 
 695:     if (bp->b_flags&B_ERROR)
 696:         if ((u.u_error = bp->b_error)==0)
 697:             u.u_error = EIO;
 698: }

Defined functions

bawrite defined in line 181; used 3 times
bflush defined in line 531; used 1 times
bphysio defined in line 580; used 6 times
breada defined in line 95; used 1 times
bunhash defined in line 373; used 4 times
bwrite defined in line 137; used 11 times
chkphys defined in line 610; used 2 times
clrbuf defined in line 452; used 3 times
getblk defined in line 260; used 10 times
geterror defined in line 691; used 5 times
incore defined in line 232; used 2 times
notavail defined in line 415; used 4 times
physbuf defined in line 655; used 2 times
physio1 defined in line 588; used 2 times
swap defined in line 473; used 12 times

Defined variables

bhash defined in line 61; used 6 times
io_info defined in line 21; used 5 times
swbuf1 defined in line 29; used 1 times
swbuf2 defined in line 30; used 2 times

Defined macros

BUFHASH defined in line 59; used 3 times
BUFHSZ defined in line 57; used 2 times
Last modified: 1983-08-06
Generated: 2016-12-26
Generated by src2html V0.67
page hit count: 1672
Valid CSS Valid XHTML 1.0 Strict