1: #
   2: /*
   3:  */
   4: 
   5: #include "../param.h"
   6: #include "../user.h"
   7: #include "../buf.h"
   8: #include "../conf.h"
   9: #include "../systm.h"
  10: #include "../proc.h"
  11: #include "../seg.h"
  12: 
  13: /*
  14:  * This is the set of buffers proper, whose heads
  15:  * were declared in buf.h.  There can exist buffer
  16:  * headers not pointing here that are used purely
  17:  * as arguments to the I/O routines to describe
  18:  * I/O to be done-- e.g. swbuf, just below, for
  19:  * swapping.
  20:  */
  21: char    buffers[NBUF][514];
  22: struct  buf swbuf;
  23: 
  24: /*
  25:  * Declarations of the tables for the magtape devices;
  26:  * see bdwrite.
  27:  */
  28: int tmtab;
  29: int httab;
  30: 
  31: /*
  32:  * The following several routines allocate and free
  33:  * buffers with various side effects.  In general the
  34:  * arguments to an allocate routine are a device and
  35:  * a block number, and the value is a pointer to
  36:  * to the buffer header; the buffer is marked "busy"
  37:  * so that no on else can touch it.  If the block was
  38:  * already in core, no I/O need be done; if it is
  39:  * already busy, the process waits until it becomes free.
  40:  * The following routines allocate a buffer:
  41:  *	getblk
  42:  *	bread
  43:  *	breada
  44:  * Eventually the buffer must be released, possibly with the
  45:  * side effect of writing it out, by using one of
  46:  *	bwrite
  47:  *	bdwrite
  48:  *	bawrite
  49:  *	brelse
  50:  */
  51: 
  52: /*
  53:  * Read in (if necessary) the block and return a buffer pointer.
  54:  */
  55: bread(dev, blkno)
  56: {
  57:     register struct buf *rbp;
  58: 
  59:     rbp = getblk(dev, blkno);
  60:     if (rbp->b_flags&B_DONE)
  61:         return(rbp);
  62:     rbp->b_flags =| B_READ;
  63:     rbp->b_wcount = -256;
  64:     (*bdevsw[dev.d_major].d_strategy)(rbp);
  65:     iowait(rbp);
  66:     return(rbp);
  67: }
  68: 
  69: /*
  70:  * Read in the block, like bread, but also start I/O on the
  71:  * read-ahead block (which is not allocated to the caller)
  72:  */
  73: breada(adev, blkno, rablkno)
  74: {
  75:     register struct buf *rbp, *rabp;
  76:     register int dev;
  77: 
  78:     dev = adev;
  79:     rbp = 0;
  80:     if (!incore(dev, blkno)) {
  81:         rbp = getblk(dev, blkno);
  82:         if ((rbp->b_flags&B_DONE) == 0) {
  83:             rbp->b_flags =| B_READ;
  84:             rbp->b_wcount = -256;
  85:             (*bdevsw[adev.d_major].d_strategy)(rbp);
  86:         }
  87:     }
  88:     if (rablkno && !incore(dev, rablkno)) {
  89:         rabp = getblk(dev, rablkno);
  90:         if (rabp->b_flags & B_DONE)
  91:             brelse(rabp);
  92:         else {
  93:             rabp->b_flags =| B_READ|B_ASYNC;
  94:             rabp->b_wcount = -256;
  95:             (*bdevsw[adev.d_major].d_strategy)(rabp);
  96:         }
  97:     }
  98:     if (rbp==0)
  99:         return(bread(dev, blkno));
 100:     iowait(rbp);
 101:     return(rbp);
 102: }
 103: 
 104: /*
 105:  * Write the buffer, waiting for completion.
 106:  * Then release the buffer.
 107:  */
 108: bwrite(bp)
 109: struct buf *bp;
 110: {
 111:     register struct buf *rbp;
 112:     register flag;
 113: 
 114:     rbp = bp;
 115:     flag = rbp->b_flags;
 116:     rbp->b_flags =& ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
 117:     rbp->b_wcount = -256;
 118:     (*bdevsw[rbp->b_dev.d_major].d_strategy)(rbp);
 119:     if ((flag&B_ASYNC) == 0) {
 120:         iowait(rbp);
 121:         brelse(rbp);
 122:     } else if ((flag&B_DELWRI)==0)
 123:         geterror(rbp);
 124: }
 125: 
 126: /*
 127:  * Release the buffer, marking it so that if it is grabbed
 128:  * for another purpose it will be written out before being
 129:  * given up (e.g. when writing a partial block where it is
 130:  * assumed that another write for the same block will soon follow).
 131:  * This can't be done for magtape, since writes must be done
 132:  * in the same order as requested.
 133:  */
 134: bdwrite(bp)
 135: struct buf *bp;
 136: {
 137:     register struct buf *rbp;
 138:     register struct devtab *dp;
 139: 
 140:     rbp = bp;
 141:     dp = bdevsw[rbp->b_dev.d_major].d_tab;
 142:     if (dp == &tmtab || dp == &httab)
 143:         bawrite(rbp);
 144:     else {
 145:         rbp->b_flags =| B_DELWRI | B_DONE;
 146:         brelse(rbp);
 147:     }
 148: }
 149: 
 150: /*
 151:  * Release the buffer, start I/O on it, but don't wait for completion.
 152:  */
 153: bawrite(bp)
 154: struct buf *bp;
 155: {
 156:     register struct buf *rbp;
 157: 
 158:     rbp = bp;
 159:     rbp->b_flags =| B_ASYNC;
 160:     bwrite(rbp);
 161: }
 162: 
 163: /*
 164:  * release the buffer, with no I/O implied.
 165:  */
 166: brelse(bp)
 167: struct buf *bp;
 168: {
 169:     register struct buf *rbp, **backp;
 170:     register int sps;
 171: 
 172:     rbp = bp;
 173:     if (rbp->b_flags&B_WANTED)
 174:         wakeup(rbp);
 175:     if (bfreelist.b_flags&B_WANTED) {
 176:         bfreelist.b_flags =& ~B_WANTED;
 177:         wakeup(&bfreelist);
 178:     }
 179:     if (rbp->b_flags&B_ERROR)
 180:         rbp->b_dev.d_minor = -1;  /* no assoc. on error */
 181:     backp = &bfreelist.av_back;
 182:     sps = PS->integ;
 183:     spl6();
 184:     rbp->b_flags =& ~(B_WANTED|B_BUSY|B_ASYNC);
 185:     (*backp)->av_forw = rbp;
 186:     rbp->av_back = *backp;
 187:     *backp = rbp;
 188:     rbp->av_forw = &bfreelist;
 189:     PS->integ = sps;
 190: }
 191: 
 192: /*
 193:  * See if the block is associated with some buffer
 194:  * (mainly to avoid getting hung up on a wait in breada)
 195:  */
 196: incore(adev, blkno)
 197: {
 198:     register int dev;
 199:     register struct buf *bp;
 200:     register struct devtab *dp;
 201: 
 202:     dev = adev;
 203:     dp = bdevsw[adev.d_major].d_tab;
 204:     for (bp=dp->b_forw; bp != dp; bp = bp->b_forw)
 205:         if (bp->b_blkno==blkno && bp->b_dev==dev)
 206:             return(bp);
 207:     return(0);
 208: }
 209: 
 210: /*
 211:  * Assign a buffer for the given block.  If the appropriate
 212:  * block is already associated, return it; otherwise search
 213:  * for the oldest non-busy buffer and reassign it.
 214:  * When a 512-byte area is wanted for some random reason
 215:  * (e.g. during exec, for the user arglist) getblk can be called
 216:  * with device NODEV to avoid unwanted associativity.
 217:  */
 218: getblk(dev, blkno)
 219: {
 220:     register struct buf *bp;
 221:     register struct devtab *dp;
 222:     extern lbolt;
 223: 
 224:     if(dev.d_major >= nblkdev)
 225:         panic("blkdev");
 226: 
 227:     loop:
 228:     if (dev < 0)
 229:         dp = &bfreelist;
 230:     else {
 231:         dp = bdevsw[dev.d_major].d_tab;
 232:         if(dp == NULL)
 233:             panic("devtab");
 234:         for (bp=dp->b_forw; bp != dp; bp = bp->b_forw) {
 235:             if (bp->b_blkno!=blkno || bp->b_dev!=dev)
 236:                 continue;
 237:             spl6();
 238:             if (bp->b_flags&B_BUSY) {
 239:                 bp->b_flags =| B_WANTED;
 240:                 sleep(bp, PRIBIO);
 241:                 spl0();
 242:                 goto loop;
 243:             }
 244:             spl0();
 245:             notavail(bp);
 246:             return(bp);
 247:         }
 248:     }
 249:     spl6();
 250:     if (bfreelist.av_forw == &bfreelist) {
 251:         bfreelist.b_flags =| B_WANTED;
 252:         sleep(&bfreelist, PRIBIO);
 253:         spl0();
 254:         goto loop;
 255:     }
 256:     spl0();
 257:     notavail(bp = bfreelist.av_forw);
 258:     if (bp->b_flags & B_DELWRI) {
 259:         bp->b_flags =| B_ASYNC;
 260:         bwrite(bp);
 261:         goto loop;
 262:     }
 263:     bp->b_flags = B_BUSY | B_RELOC;
 264:     bp->b_back->b_forw = bp->b_forw;
 265:     bp->b_forw->b_back = bp->b_back;
 266:     bp->b_forw = dp->b_forw;
 267:     bp->b_back = dp;
 268:     dp->b_forw->b_back = bp;
 269:     dp->b_forw = bp;
 270:     bp->b_dev = dev;
 271:     bp->b_blkno = blkno;
 272:     return(bp);
 273: }
 274: 
 275: /*
 276:  * Wait for I/O completion on the buffer; return errors
 277:  * to the user.
 278:  */
 279: iowait(bp)
 280: struct buf *bp;
 281: {
 282:     register struct buf *rbp;
 283: 
 284:     rbp = bp;
 285:     spl6();
 286:     while ((rbp->b_flags&B_DONE)==0)
 287:         sleep(rbp, PRIBIO);
 288:     spl0();
 289:     geterror(rbp);
 290: }
 291: 
 292: /*
 293:  * Unlink a buffer from the available list and mark it busy.
 294:  * (internal interface)
 295:  */
 296: notavail(bp)
 297: struct buf *bp;
 298: {
 299:     register struct buf *rbp;
 300:     register int sps;
 301: 
 302:     rbp = bp;
 303:     sps = PS->integ;
 304:     spl6();
 305:     rbp->av_back->av_forw = rbp->av_forw;
 306:     rbp->av_forw->av_back = rbp->av_back;
 307:     rbp->b_flags =| B_BUSY;
 308:     PS->integ = sps;
 309: }
 310: 
 311: /*
 312:  * Mark I/O complete on a buffer, release it if I/O is asynchronous,
 313:  * and wake up anyone waiting for it.
 314:  */
 315: iodone(bp)
 316: struct buf *bp;
 317: {
 318:     register struct buf *rbp;
 319: 
 320:     rbp = bp;
 321:     if(rbp->b_flags&B_MAP)
 322:         mapfree(rbp);
 323:     rbp->b_flags =| B_DONE;
 324:     if (rbp->b_flags&B_ASYNC)
 325:         brelse(rbp);
 326:     else {
 327:         rbp->b_flags =& ~B_WANTED;
 328:         wakeup(rbp);
 329:     }
 330: }
 331: 
 332: /*
 333:  * Zero the core associated with a buffer.
 334:  */
 335: clrbuf(bp)
 336: int *bp;
 337: {
 338:     register *p;
 339:     register c;
 340: 
 341:     p = bp->b_addr;
 342:     c = 256;
 343:     do
 344:         *p++ = 0;
 345:     while (--c);
 346: }
 347: 
 348: /*
 349:  * Initialize the buffer I/O system by freeing
 350:  * all buffers and setting all device buffer lists to empty.
 351:  */
 352: binit()
 353: {
 354:     register struct buf *bp;
 355:     register struct devtab *dp;
 356:     register int i;
 357:     struct bdevsw *bdp;
 358: 
 359:     bfreelist.b_forw = bfreelist.b_back =
 360:         bfreelist.av_forw = bfreelist.av_back = &bfreelist;
 361:     for (i=0; i<NBUF; i++) {
 362:         bp = &buf[i];
 363:         bp->b_dev = -1;
 364:         bp->b_addr = buffers[i];
 365:         bp->b_back = &bfreelist;
 366:         bp->b_forw = bfreelist.b_forw;
 367:         bfreelist.b_forw->b_back = bp;
 368:         bfreelist.b_forw = bp;
 369:         bp->b_flags = B_BUSY;
 370:         brelse(bp);
 371:     }
 372:     i = 0;
 373:     for (bdp = bdevsw; bdp->d_open; bdp++) {
 374:         dp = bdp->d_tab;
 375:         if(dp) {
 376:             dp->b_forw = dp;
 377:             dp->b_back = dp;
 378:         }
 379:         i++;
 380:     }
 381:     nblkdev = i;
 382: }
 383: 
 384: /*
 385:  * Device start routine for disks
 386:  * and other devices that have the register
 387:  * layout of the older DEC controllers (RF, RK, RP, TM)
 388:  */
 389: #define IENABLE 0100
 390: #define WCOM    02
 391: #define RCOM    04
 392: #define GO  01
 393: devstart(bp, devloc, devblk, hbcom)
 394: struct buf *bp;
 395: int *devloc;
 396: {
 397:     register int *dp;
 398:     register struct buf *rbp;
 399:     register int com;
 400: 
 401:     dp = devloc;
 402:     rbp = bp;
 403:     *dp = devblk;           /* block address */
 404:     *--dp = rbp->b_addr;        /* buffer address */
 405:     *--dp = rbp->b_wcount;      /* word count */
 406:     com = (hbcom<<8) | IENABLE | GO |
 407:         ((rbp->b_xmem & 03) << 4);
 408:     if (rbp->b_flags&B_READ)    /* command + x-mem */
 409:         com =| RCOM;
 410:     else
 411:         com =| WCOM;
 412:     *--dp = com;
 413: }
 414: 
 415: /*
 416:  * startup routine for RH controllers.
 417:  */
 418: #define RHWCOM  060
 419: #define RHRCOM  070
 420: 
 421: rhstart(bp, devloc, devblk, abae)
 422: struct buf *bp;
 423: int *devloc, *abae;
 424: {
 425:     register int *dp;
 426:     register struct buf *rbp;
 427:     register int com;
 428: 
 429:     dp = devloc;
 430:     rbp = bp;
 431:     if(cputype == 70)
 432:         *abae = rbp->b_xmem;
 433:     *dp = devblk;           /* block address */
 434:     *--dp = rbp->b_addr;        /* buffer address */
 435:     *--dp = rbp->b_wcount;      /* word count */
 436:     com = IENABLE | GO |
 437:         ((rbp->b_xmem & 03) << 8);
 438:     if (rbp->b_flags&B_READ)    /* command + x-mem */
 439:         com =| RHRCOM; else
 440:         com =| RHWCOM;
 441:     *--dp = com;
 442: }
 443: 
 444: /*
 445:  * 11/70 routine to allocate the
 446:  * UNIBUS map and initialize for
 447:  * a unibus device.
 448:  * The code here and in
 449:  * rhstart assumes that an rh on an 11/70
 450:  * is an rh70 and contains 22 bit addressing.
 451:  */
 452: int maplock;
 453: mapalloc(abp)
 454: struct buf *abp;
 455: {
 456:     register i, a;
 457:     register struct buf *bp;
 458: 
 459:     if(cputype != 70)
 460:         return;
 461:     spl6();
 462:     while(maplock&B_BUSY) {
 463:         maplock =| B_WANTED;
 464:         sleep(&maplock, PSWP);
 465:     }
 466:     maplock =| B_BUSY;
 467:     spl0();
 468:     bp = abp;
 469:     bp->b_flags =| B_MAP;
 470:     a = bp->b_xmem;
 471:     for(i=16; i<32; i=+2)
 472:         UBMAP->r[i+1] = a;
 473:     for(a++; i<48; i=+2)
 474:         UBMAP->r[i+1] = a;
 475:     bp->b_xmem = 1;
 476: }
 477: 
 478: mapfree(bp)
 479: struct buf *bp;
 480: {
 481: 
 482:     bp->b_flags =& ~B_MAP;
 483:     if(maplock&B_WANTED)
 484:         wakeup(&maplock);
 485:     maplock = 0;
 486: }
 487: 
 488: /*
 489:  * swap I/O
 490:  */
 491: swap(blkno, coreaddr, count, rdflg)
 492: {
 493:     register int *fp;
 494: 
 495:     fp = &swbuf.b_flags;
 496:     spl6();
 497:     while (*fp&B_BUSY) {
 498:         *fp =| B_WANTED;
 499:         sleep(fp, PSWP);
 500:     }
 501:     *fp = B_BUSY | B_PHYS | rdflg;
 502:     swbuf.b_dev = swapdev;
 503:     swbuf.b_wcount = - (count<<5);  /* 32 w/block */
 504:     swbuf.b_blkno = blkno;
 505:     swbuf.b_addr = coreaddr<<6; /* 64 b/block */
 506:     swbuf.b_xmem = (coreaddr>>10) & 077;
 507:     (*bdevsw[swapdev>>8].d_strategy)(&swbuf);
 508:     spl6();
 509:     while((*fp&B_DONE)==0)
 510:         sleep(fp, PSWP);
 511:     if (*fp&B_WANTED)
 512:         wakeup(fp);
 513:     spl0();
 514:     *fp =& ~(B_BUSY|B_WANTED);
 515:     return(*fp&B_ERROR);
 516: }
 517: 
 518: /*
 519:  * make sure all write-behind blocks
 520:  * on dev (or NODEV for all)
 521:  * are flushed out.
 522:  * (from umount and update)
 523:  */
 524: bflush(dev)
 525: {
 526:     register struct buf *bp;
 527: 
 528: loop:
 529:     spl6();
 530:     for (bp = bfreelist.av_forw; bp != &bfreelist; bp = bp->av_forw) {
 531:         if (bp->b_flags&B_DELWRI && (dev == NODEV||dev==bp->b_dev)) {
 532:             bp->b_flags =| B_ASYNC;
 533:             notavail(bp);
 534:             bwrite(bp);
 535:             goto loop;
 536:         }
 537:     }
 538:     spl0();
 539: }
 540: 
 541: /*
 542:  * Raw I/O. The arguments are
 543:  *	The strategy routine for the device
 544:  *	A buffer, which will always be a special buffer
 545:  *	  header owned exclusively by the device for this purpose
 546:  *	The device number
 547:  *	Read/write flag
 548:  * Essentially all the work is computing physical addresses and
 549:  * validating them.
 550:  */
 551: physio(strat, abp, dev, rw)
 552: struct buf *abp;
 553: int (*strat)();
 554: {
 555:     register struct buf *bp;
 556:     register char *base;
 557:     register int nb;
 558:     int ts;
 559: 
 560:     bp = abp;
 561:     base = u.u_base;
 562:     /*
 563: 	 * Check odd base, odd count, and address wraparound
 564: 	 */
 565:     if (base&01 || u.u_count&01 || base>=base+u.u_count)
 566:         goto bad;
 567:     ts = (u.u_tsize+127) & ~0177;
 568:     if (u.u_sep)
 569:         ts = 0;
 570:     nb = (base>>6) & 01777;
 571:     /*
 572: 	 * Check overlap with text. (ts and nb now
 573: 	 * in 64-byte clicks)
 574: 	 */
 575:     if (nb < ts)
 576:         goto bad;
 577:     /*
 578: 	 * Check that transfer is either entirely in the
 579: 	 * data or in the stack: that is, either
 580: 	 * the end is in the data or the start is in the stack
 581: 	 * (remember wraparound was already checked).
 582: 	 */
 583:     if ((((base+u.u_count)>>6)&01777) >= ts+u.u_dsize
 584:         && nb < 1024-u.u_ssize)
 585:         goto bad;
 586:     spl6();
 587:     while (bp->b_flags&B_BUSY) {
 588:         bp->b_flags =| B_WANTED;
 589:         sleep(bp, PRIBIO);
 590:     }
 591:     bp->b_flags = B_BUSY | B_PHYS | rw;
 592:     bp->b_dev = dev;
 593:     /*
 594: 	 * Compute physical address by simulating
 595: 	 * the segmentation hardware.
 596: 	 */
 597:     bp->b_addr = base&077;
 598:     base = (u.u_sep? UDSA: UISA)->r[nb>>7] + (nb&0177);
 599:     bp->b_addr =+ base<<6;
 600:     bp->b_xmem = (base>>10) & 077;
 601:     bp->b_blkno = lshift(u.u_offset, -9);
 602:     bp->b_wcount = -((u.u_count>>1) & 077777);
 603:     bp->b_error = 0;
 604:     u.u_procp->p_flag =| SLOCK;
 605:     (*strat)(bp);
 606:     spl6();
 607:     while ((bp->b_flags&B_DONE) == 0)
 608:         sleep(bp, PRIBIO);
 609:     u.u_procp->p_flag =& ~SLOCK;
 610:     if (bp->b_flags&B_WANTED)
 611:         wakeup(bp);
 612:     spl0();
 613:     bp->b_flags =& ~(B_BUSY|B_WANTED);
 614:     u.u_count = (-bp->b_resid)<<1;
 615:     geterror(bp);
 616:     return;
 617:     bad:
 618:     u.u_error = EFAULT;
 619: }
 620: 
 621: /*
 622:  * Pick up the device's error number and pass it to the user;
 623:  * if there is an error but the number is 0 set a generalized
 624:  * code.  Actually the latter is always true because devices
 625:  * don't yet return specific errors.
 626:  */
 627: geterror(abp)
 628: struct buf *abp;
 629: {
 630:     register struct buf *bp;
 631: 
 632:     bp = abp;
 633:     if (bp->b_flags&B_ERROR)
 634:         if ((u.u_error = bp->b_error)==0)
 635:             u.u_error = EIO;
 636: }

Defined functions

bawrite defined in line 153; used 2 times
bdwrite defined in line 134; used 6 times
bflush defined in line 524; used 2 times
binit defined in line 352; used 1 times
bread defined in line 55; used 16 times
breada defined in line 73; used 1 times
brelse defined in line 166; used 27 times
bwrite defined in line 108; used 7 times
clrbuf defined in line 335; used 3 times
devstart defined in line 393; used 3 times
getblk defined in line 218; used 12 times
geterror defined in line 627; used 3 times
incore defined in line 196; used 2 times
iodone defined in line 315; used 21 times
iowait defined in line 279; used 3 times
mapalloc defined in line 453; used 4 times
mapfree defined in line 478; used 1 times
notavail defined in line 296; used 3 times
physio defined in line 551; used 14 times
rhstart defined in line 421; used 3 times
swap defined in line 491; used 4 times

Defined variables

buffers defined in line 21; used 1 times
httab defined in line 29; used 1 times
maplock defined in line 452; used 7 times
swbuf defined in line 22; used 7 times
tmtab defined in line 28; used 1 times

Defined macros

GO defined in line 392; used 2 times
IENABLE defined in line 389; used 2 times
RCOM defined in line 391; used 1 times
RHRCOM defined in line 419; used 1 times
RHWCOM defined in line 418; used 1 times
WCOM defined in line 390; used 1 times
Last modified: 1975-07-18
Generated: 2016-12-26
Generated by src2html V0.67
page hit count: 2608
Valid CSS Valid XHTML 1.0 Strict