1: /*
   2:  * Copyright (c) 1986 Regents of the University of California.
   3:  * All rights reserved.  The Berkeley software License Agreement
   4:  * specifies the terms and conditions for redistribution.
   5:  *
   6:  *	@(#)sys_inode.c	1.11 (2.11BSD) 1999/9/10
   7:  */
   8: 
   9: #include "param.h"
  10: #include "../machine/seg.h"
  11: 
  12: #include "user.h"
  13: #include "proc.h"
  14: #include "signalvar.h"
  15: #include "inode.h"
  16: #include "buf.h"
  17: #include "fs.h"
  18: #include "file.h"
  19: #include "stat.h"
  20: #include "mount.h"
  21: #include "conf.h"
  22: #include "uio.h"
  23: #include "ioctl.h"
  24: #include "tty.h"
  25: #include "kernel.h"
  26: #include "systm.h"
  27: #include "syslog.h"
  28: #ifdef QUOTA
  29: #include "quota.h"
  30: #endif
  31: 
  32: extern  int vn_closefile();
  33: int ino_rw(), ino_ioctl(), ino_select();
  34: 
  35: struct  fileops inodeops =
  36:     { ino_rw, ino_ioctl, ino_select, vn_closefile };
  37: 
  38: ino_rw(fp, uio)
  39:     struct file *fp;
  40: register struct uio *uio;
  41: {
  42:     register struct inode *ip = (struct inode *)fp->f_data;
  43:     u_int count, error;
  44:     int ioflag;
  45: 
  46:     if ((ip->i_mode&IFMT) != IFCHR)
  47:         ILOCK(ip);
  48:     uio->uio_offset = fp->f_offset;
  49:     count = uio->uio_resid;
  50:     if  (uio->uio_rw == UIO_READ)
  51:         {
  52:         error = rwip(ip, uio, fp->f_flag & FNONBLOCK ? IO_NDELAY : 0);
  53:         fp->f_offset += (count - uio->uio_resid);
  54:         }
  55:     else
  56:         {
  57:         ioflag = 0;
  58:         if  ((ip->i_mode&IFMT) == IFREG && (fp->f_flag & FAPPEND))
  59:             ioflag |= IO_APPEND;
  60:         if  (fp->f_flag & FNONBLOCK)
  61:             ioflag |= IO_NDELAY;
  62:         if  (fp->f_flag & FFSYNC ||
  63:              (ip->i_fs->fs_flags & MNT_SYNCHRONOUS))
  64:             ioflag |= IO_SYNC;
  65:         error = rwip(ip, uio, ioflag);
  66:         if  (ioflag & IO_APPEND)
  67:             fp->f_offset = uio->uio_offset;
  68:         else
  69:             fp->f_offset += (count - uio->uio_resid);
  70:         }
  71:     if ((ip->i_mode&IFMT) != IFCHR)
  72:         IUNLOCK(ip);
  73:     return (error);
  74: }
  75: 
  76: rdwri(rw, ip, base, len, offset, segflg, ioflg, aresid)
  77:     enum uio_rw rw;
  78:     struct inode *ip;
  79:     caddr_t base;
  80:     int len;
  81:     off_t offset;
  82:     enum uio_seg segflg;
  83:     int ioflg;
  84: register int *aresid;
  85: {
  86:     struct uio auio;
  87:     struct iovec aiov;
  88: register int error;
  89: 
  90:     auio.uio_iov = &aiov;
  91:     auio.uio_iovcnt = 1;
  92:     aiov.iov_base = base;
  93:     aiov.iov_len = len;
  94:     auio.uio_rw = rw;
  95:     auio.uio_resid = len;
  96:     auio.uio_offset = offset;
  97:     auio.uio_segflg = segflg;
  98:     error = rwip(ip, &auio, ioflg);
  99:     if (aresid)
 100:         *aresid = auio.uio_resid;
 101:     else
 102:         if (auio.uio_resid)
 103:             error = EIO;
 104:     return (error);
 105: }
 106: 
 107: rwip(ip, uio, ioflag)
 108:     register struct inode *ip;
 109:     register struct uio *uio;
 110:     int ioflag;
 111: {
 112:     dev_t dev = (dev_t)ip->i_rdev;
 113:     register struct buf *bp;
 114:     off_t osize;
 115:     daddr_t lbn, bn;
 116:     int n, on, type, resid;
 117:     int error = 0;
 118:     int flags;
 119: 
 120:     if  (uio->uio_offset < 0)
 121:         return (EINVAL);
 122:     type = ip->i_mode&IFMT;
 123: /*
 124:  * The write case below checks that i/o is done synchronously to directories
 125:  * and that i/o to append only files takes place at the end of file.
 126:  * We do not panic on non-sync directory i/o - the sync bit is forced on.
 127: */
 128:     if (uio->uio_rw == UIO_READ)
 129:         {
 130:         if  (!(ip->i_fs->fs_flags & MNT_NOATIME))
 131:             ip->i_flag |= IACC;
 132:         }
 133:     else
 134:        {
 135:        switch (type)
 136:         {
 137:         case IFREG:
 138:             if  (ioflag & IO_APPEND)
 139:             uio->uio_offset = ip->i_size;
 140:             if  (ip->i_flags & APPEND && uio->uio_offset != ip->i_size)
 141:             return(EPERM);
 142:             break;
 143:         case IFDIR:
 144:             if  ((ioflag & IO_SYNC) == 0)
 145:             ioflag |= IO_SYNC;
 146:             break;
 147:         case IFLNK:
 148:         case IFBLK:
 149:         case IFCHR:
 150:             break;
 151:         default:
 152:             return(EFTYPE);
 153:         }
 154:        }
 155: 
 156: /*
 157:  * The IO_SYNC flag is turned off here if the 'async' mount flag is on.
 158:  * Otherwise directory I/O (which is done by the kernel) would still
 159:  * synchronous (because the kernel carefully passes IO_SYNC for all directory
 160:  * I/O) even if the fs was mounted with "-o async".
 161:  *
 162:  * A side effect of this is that if the system administrator mounts a filesystem
 163:  * 'async' then the O_FSYNC flag to open() is ignored.
 164:  *
 165:  * This behaviour should probably be selectable via "sysctl fs.async.dirs" and
 166:  * "fs.async.ofsync".  A project for a rainy day.
 167: */
 168:     if (type == IFREG  || type == IFDIR && (ip->i_fs->fs_flags & MNT_ASYNC))
 169:         ioflag &= ~IO_SYNC;
 170: 
 171:     if (type == IFCHR)
 172:         {
 173:         if  (uio->uio_rw == UIO_READ)
 174:             {
 175:             if  (!(ip->i_fs->fs_flags & MNT_NOATIME))
 176:             ip->i_flag |= IACC;
 177:             error = (*cdevsw[major(dev)].d_read)(dev, uio, ioflag);
 178:             }
 179:         else
 180:             {
 181:             ip->i_flag |= IUPD|ICHG;
 182:             error = (*cdevsw[major(dev)].d_write)(dev, uio, ioflag);
 183:             }
 184:         return (error);
 185:         }
 186:     if (uio->uio_resid == 0)
 187:         return (0);
 188:     if (uio->uio_rw == UIO_WRITE && type == IFREG &&
 189:         uio->uio_offset + uio->uio_resid >
 190:           u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
 191:         psignal(u.u_procp, SIGXFSZ);
 192:         return (EFBIG);
 193:     }
 194: #ifdef  QUOTA
 195:     /*
 196: 	 * we do bytes, see the comment on 'blocks' in ino_stat().
 197: 	 *
 198: 	 * the simplfying assumption is made that the entire write will
 199: 	 * succeed, otherwise we have to check the quota on each block.
 200: 	 * can you say slow?  i knew you could.  SMS
 201: 	*/
 202:     if ((type == IFREG || type == IFDIR || type == IFLNK) &&
 203:         uio->uio_rw == UIO_WRITE && !(ip->i_flag & IPIPE)) {
 204:         if (uio->uio_offset + uio->uio_resid > ip->i_size) {
 205:             QUOTAMAP();
 206:             error = chkdq(ip,
 207:                 uio->uio_offset+uio->uio_resid - ip->i_size,0);
 208:             QUOTAUNMAP();
 209:             if (error)
 210:                 return (error);
 211:         }
 212:     }
 213: #endif
 214:     if (type != IFBLK)
 215:         dev = ip->i_dev;
 216:     resid = uio->uio_resid;
 217:     osize = ip->i_size;
 218: 
 219:     flags = ioflag & IO_SYNC ? B_SYNC : 0;
 220: 
 221:     do {
 222:         lbn = lblkno(uio->uio_offset);
 223:         on = blkoff(uio->uio_offset);
 224:         n = MIN((u_int)(DEV_BSIZE - on), uio->uio_resid);
 225:         if (type != IFBLK) {
 226:             if (uio->uio_rw == UIO_READ) {
 227:                 off_t diff = ip->i_size - uio->uio_offset;
 228:                 if (diff <= 0)
 229:                     return (0);
 230:                 if (diff < n)
 231:                     n = diff;
 232:             bn = bmap(ip, lbn, B_READ, flags);
 233:             }
 234:             else
 235:                 bn = bmap(ip,lbn,B_WRITE,
 236:                        n == DEV_BSIZE ? flags : flags|B_CLRBUF);
 237:             if (u.u_error || uio->uio_rw == UIO_WRITE && (long)bn<0)
 238:                 return (u.u_error);
 239:             if (uio->uio_rw == UIO_WRITE && uio->uio_offset + n > ip->i_size &&
 240:                (type == IFDIR || type == IFREG || type == IFLNK))
 241:                 ip->i_size = uio->uio_offset + n;
 242:         } else {
 243:             bn = lbn;
 244:             rablock = bn + 1;
 245:         }
 246:         if (uio->uio_rw == UIO_READ) {
 247:             if ((long)bn<0) {
 248:                 bp = geteblk();
 249:                 clrbuf(bp);
 250:             } else if (ip->i_lastr + 1 == lbn)
 251:                 bp = breada(dev, bn, rablock);
 252:             else
 253:                 bp = bread(dev, bn);
 254:             ip->i_lastr = lbn;
 255:         } else {
 256:             if (n == DEV_BSIZE)
 257:                 bp = getblk(dev, bn);
 258:             else
 259:                 bp = bread(dev, bn);
 260: /*
 261:  * 4.3 didn't do this, but 2.10 did.  not sure why.
 262:  * something about tape drivers don't clear buffers on end-of-tape
 263:  * any longer (clrbuf can't be called from interrupt).
 264: */
 265:             if (bp->b_resid == DEV_BSIZE) {
 266:                 bp->b_resid = 0;
 267:                 clrbuf(bp);
 268:             }
 269:         }
 270:         n = MIN(n, DEV_BSIZE - bp->b_resid);
 271:         if (bp->b_flags & B_ERROR) {
 272:             error = EIO;
 273:             brelse(bp);
 274:             break;
 275:         }
 276:         u.u_error = uiomove(mapin(bp)+on, n, uio);
 277:         mapout(bp);
 278:         if (uio->uio_rw == UIO_READ) {
 279:             if (n + on == DEV_BSIZE || uio->uio_offset == ip->i_size) {
 280:                 bp->b_flags |= B_AGE;
 281:                 if (ip->i_flag & IPIPE)
 282:                     bp->b_flags &= ~B_DELWRI;
 283:             }
 284:             brelse(bp);
 285:         } else {
 286:             if (ioflag & IO_SYNC)
 287:                 bwrite(bp);
 288: /*
 289:  * The check below interacts _very_ badly with virtual memory tmp files
 290:  * such as those used by 'ld'.   These files tend to be small and repeatedly
 291:  * rewritten in 1kb chunks.  The check below causes the device driver to be
 292:  * called (and I/O initiated)  constantly.  Not sure what to do about this yet
 293:  * but this comment is being placed here as a reminder.
 294: */
 295:             else if (n + on == DEV_BSIZE && !(ip->i_flag & IPIPE)) {
 296:                 bp->b_flags |= B_AGE;
 297:                 bawrite(bp);
 298:             } else
 299:                 bdwrite(bp);
 300:             ip->i_flag |= IUPD|ICHG;
 301:             if (u.u_ruid != 0)
 302:                 ip->i_mode &= ~(ISUID|ISGID);
 303:         }
 304:     } while (u.u_error == 0 && uio->uio_resid && n != 0);
 305:     if (error == 0)             /* XXX */
 306:         error = u.u_error;      /* XXX */
 307:     if (error && (uio->uio_rw == UIO_WRITE) && (ioflag & IO_UNIT) &&
 308:         (type != IFBLK)) {
 309:         itrunc(ip, osize, ioflag & IO_SYNC);
 310:         uio->uio_offset -= (resid - uio->uio_resid);
 311:         uio->uio_resid = resid;
 312: /*
 313:  * Should back out the change to the quota here but that would be a lot
 314:  * of work for little benefit.  Besides we've already made the assumption
 315:  * that the entire write would succeed and users can't turn on the IO_UNIT
 316:  * bit for their writes anyways.
 317: */
 318:     }
 319: #ifdef whybother
 320:     if (!error && (ioflag & IO_SYNC))
 321:         IUPDAT(ip, &time, &time, 1);
 322: #endif
 323:     return (error);
 324: }
 325: 
 326: 
 327: ino_ioctl(fp, com, data)
 328:     register struct file *fp;
 329:     register u_int com;
 330:     caddr_t data;
 331: {
 332:     register struct inode *ip = ((struct inode *)fp->f_data);
 333:     dev_t dev;
 334: 
 335:     switch (ip->i_mode & IFMT) {
 336: 
 337:     case IFREG:
 338:     case IFDIR:
 339:         if (com == FIONREAD) {
 340:             if (fp->f_type==DTYPE_PIPE && !(fp->f_flag&FREAD))
 341:                 *(off_t *)data = 0;
 342:             else
 343:                 *(off_t *)data = ip->i_size - fp->f_offset;
 344:             return (0);
 345:         }
 346:         if (com == FIONBIO || com == FIOASYNC)  /* XXX */
 347:             return (0);         /* XXX */
 348:         /* fall into ... */
 349: 
 350:     default:
 351:         return (ENOTTY);
 352: 
 353:     case IFCHR:
 354:         dev = ip->i_rdev;
 355:         u.u_r.r_val1 = 0;
 356:         if  (setjmp(&u.u_qsave))
 357: /*
 358:  * The ONLY way we can get here is via the longjump in sleep.  Signals have
 359:  * been checked for and u_error set accordingly.  All that remains to do
 360:  * is 'return'.
 361: */
 362:             return(u.u_error);
 363:         return((*cdevsw[major(dev)].d_ioctl)(dev,com,data,fp->f_flag));
 364:     }
 365: }
 366: 
 367: ino_select(fp, which)
 368:     struct file *fp;
 369:     int which;
 370: {
 371:     register struct inode *ip = (struct inode *)fp->f_data;
 372:     register dev_t dev;
 373: 
 374:     switch (ip->i_mode & IFMT) {
 375: 
 376:     default:
 377:         return (1);     /* XXX */
 378: 
 379:     case IFCHR:
 380:         dev = ip->i_rdev;
 381:         return (*cdevsw[major(dev)].d_select)(dev, which);
 382:     }
 383: }
 384: 
 385: ino_stat(ip, sb)
 386:     register struct inode *ip;
 387:     register struct stat *sb;
 388: {
 389:     register struct icommon2 *ic2;
 390: 
 391: #ifdef  EXTERNALITIMES
 392:     mapseg5(xitimes, xitdesc);
 393:     ic2 = &((struct icommon2 *)SEG5)[ip - inode];
 394: #else
 395:     ic2 = &ip->i_ic2;
 396: #endif
 397: 
 398: /*
 399:  * inlined ITIMES which takes advantage of the common times pointer.
 400: */
 401:     if (ip->i_flag & (IUPD|IACC|ICHG)) {
 402:         ip->i_flag |= IMOD;
 403:         if (ip->i_flag & IACC)
 404:             ic2->ic_atime = time.tv_sec;
 405:         if (ip->i_flag & IUPD)
 406:             ic2->ic_mtime = time.tv_sec;
 407:         if (ip->i_flag & ICHG)
 408:             ic2->ic_ctime = time.tv_sec;
 409:         ip->i_flag &= ~(IUPD|IACC|ICHG);
 410:     }
 411:     sb->st_dev = ip->i_dev;
 412:     sb->st_ino = ip->i_number;
 413:     sb->st_mode = ip->i_mode;
 414:     sb->st_nlink = ip->i_nlink;
 415:     sb->st_uid = ip->i_uid;
 416:     sb->st_gid = ip->i_gid;
 417:     sb->st_rdev = (dev_t)ip->i_rdev;
 418:     sb->st_size = ip->i_size;
 419:     sb->st_atime = ic2->ic_atime;
 420:     sb->st_spare1 = 0;
 421:     sb->st_mtime = ic2->ic_mtime;
 422:     sb->st_spare2 = 0;
 423:     sb->st_ctime = ic2->ic_ctime;
 424:     sb->st_spare3 = 0;
 425:     sb->st_blksize = MAXBSIZE;
 426:     /*
 427: 	 * blocks are too tough to do; it's not worth the effort.
 428: 	 */
 429:     sb->st_blocks = btodb(ip->i_size + MAXBSIZE - 1);
 430:     sb->st_flags = ip->i_flags;
 431:     sb->st_spare4[0] = 0;
 432:     sb->st_spare4[1] = 0;
 433:     sb->st_spare4[2] = 0;
 434: #ifdef  EXTERNALITIMES
 435:     normalseg5();
 436: #endif
 437:     return (0);
 438: }
 439: 
 440: /*
 441:  * This routine, like its counterpart openi(), calls the device driver for
 442:  * special (IBLK, ICHR) files.  Normal files simply return early (the default
 443:  * case in the switch statement).  Pipes and sockets do NOT come here because
 444:  * they have their own close routines.
 445: */
 446: 
 447: closei(ip, flag)
 448:     register struct inode *ip;
 449:     int flag;
 450:     {
 451:     register struct mount *mp;
 452:     register struct file *fp;
 453:     int mode, error;
 454:     dev_t   dev;
 455:     int (*cfunc)();
 456: 
 457:     mode = ip->i_mode & IFMT;
 458:     dev = ip->i_rdev;
 459: 
 460:     switch  (mode)
 461:         {
 462:         case    IFCHR:
 463:             cfunc = cdevsw[major(dev)].d_close;
 464:             break;
 465:         case    IFBLK:
 466:         /*
 467: 		 * We don't want to really close the device if it is mounted
 468: 		 */
 469: /* MOUNT TABLE SHOULD HOLD INODE */
 470:             for (mp = mount; mp < &mount[NMOUNT]; mp++)
 471:                 if (mp->m_inodp != NULL && mp->m_dev == dev)
 472:                     return;
 473:             cfunc = bdevsw[major(dev)].d_close;
 474:             break;
 475:         default:
 476:             return(0);
 477:         }
 478:     /*
 479: 	 * Check that another inode for the same device isn't active.
 480: 	 * This is because the same device can be referenced by two
 481: 	 * different inodes.
 482: 	 */
 483:     for (fp = file; fp < fileNFILE; fp++)
 484:         {
 485:         if (fp->f_type != DTYPE_INODE)
 486:             continue;
 487:         if (fp->f_count && (ip = (struct inode *)fp->f_data) &&
 488:             ip->i_rdev == dev && (ip->i_mode&IFMT) == mode)
 489:             return(0);
 490:         }
 491:     if  (mode == IFBLK)
 492:         {
 493:         /*
 494: 		 * On last close of a block device (that isn't mounted)
 495: 		 * we must invalidate any in core blocks, so that
 496: 		 * we can, for instance, change floppy disks.
 497: 		 */
 498:         bflush(dev);
 499:         binval(dev);
 500:         }
 501: /*
 502:  * NOTE:  none of the device drivers appear to either set u_error OR return
 503:  *	  anything meaningful from their close routines.  It's a good thing
 504:  *	  programs don't bother checking the error status on close() calls.
 505:  *	  Apparently the only time "errno" is meaningful after a "close" is
 506:  *	  when the process is interrupted.
 507: */
 508:     if  (setjmp(&u.u_qsave))
 509:         {
 510:         /*
 511: 		 * If device close routine is interrupted,
 512: 		 * must return so closef can clean up.
 513: 		 */
 514:         if  ((error = u.u_error) == 0)
 515:             error = EINTR;
 516:         }
 517:     else
 518:         error = (*cfunc)(dev, flag, mode);
 519:     return(error);
 520:     }
 521: 
 522: /*
 523:  * Place an advisory lock on an inode.
 524:  * NOTE: callers of this routine must be prepared to deal with the pseudo
 525:  *       error return ERESTART.
 526:  */
 527: ino_lock(fp, cmd)
 528:     register struct file *fp;
 529:     int cmd;
 530: {
 531:     register int priority = PLOCK;
 532:     register struct inode *ip = (struct inode *)fp->f_data;
 533:     int error;
 534: 
 535:     if ((cmd & LOCK_EX) == 0)
 536:         priority += 4;
 537: /*
 538:  * If there's a exclusive lock currently applied to the file then we've
 539:  * gotta wait for the lock with everyone else.
 540:  *
 541:  * NOTE:  We can NOT sleep on i_exlockc because it is on an odd byte boundary
 542:  *	  and the low (oddness) bit is reserved for networking/supervisor mode
 543:  *	  sleep channels.  Thus we always sleep on i_shlockc and simply check
 544:  *	  the proper bits to see if the lock we want is granted.  This may
 545:  *	  mean an extra wakeup/sleep event is done once in a while but
 546:  *	  everything will work correctly.
 547: */
 548: again:
 549:     while (ip->i_flag & IEXLOCK) {
 550:         /*
 551: 		 * If we're holding an exclusive
 552: 		 * lock, then release it.
 553: 		 */
 554:         if (fp->f_flag & FEXLOCK) {
 555:             ino_unlock(fp, FEXLOCK);
 556:             continue;
 557:         }
 558:         if (cmd & LOCK_NB)
 559:             return (EWOULDBLOCK);
 560:         ip->i_flag |= ILWAIT;
 561:         error = tsleep((caddr_t)&ip->i_shlockc, priority | PCATCH, 0);
 562:         if  (error)
 563:             return(error);
 564:     }
 565:     if ((cmd & LOCK_EX) && (ip->i_flag & ISHLOCK)) {
 566:         /*
 567: 		 * Must wait for any shared locks to finish
 568: 		 * before we try to apply a exclusive lock.
 569: 		 *
 570: 		 * If we're holding a shared
 571: 		 * lock, then release it.
 572: 		 */
 573:         if (fp->f_flag & FSHLOCK) {
 574:             ino_unlock(fp, FSHLOCK);
 575:             goto again;
 576:         }
 577:         if (cmd & LOCK_NB)
 578:             return (EWOULDBLOCK);
 579:         ip->i_flag |= ILWAIT;
 580:         error = tsleep((caddr_t)&ip->i_shlockc, PLOCK | PCATCH, 0);
 581:         if  (error)
 582:             return(error);
 583:         goto again;
 584:     }
 585:     if (cmd & LOCK_EX) {
 586:         cmd &= ~LOCK_SH;
 587:         ip->i_exlockc++;
 588:         ip->i_flag |= IEXLOCK;
 589:         fp->f_flag |= FEXLOCK;
 590:     }
 591:     if ((cmd & LOCK_SH) && (fp->f_flag & FSHLOCK) == 0) {
 592:         ip->i_shlockc++;
 593:         ip->i_flag |= ISHLOCK;
 594:         fp->f_flag |= FSHLOCK;
 595:     }
 596:     return (0);
 597: }
 598: 
 599: /*
 600:  * Unlock a file.
 601:  */
 602: ino_unlock(fp, kind)
 603:     register struct file *fp;
 604:     int kind;
 605: {
 606:     register struct inode *ip = (struct inode *)fp->f_data;
 607:     register int flags;
 608: 
 609:     kind &= fp->f_flag;
 610:     if (ip == NULL || kind == 0)
 611:         return;
 612:     flags = ip->i_flag;
 613:     if (kind & FSHLOCK) {
 614:         if (--ip->i_shlockc == 0) {
 615:             ip->i_flag &= ~ISHLOCK;
 616:             if (flags & ILWAIT)
 617:                 wakeup((caddr_t)&ip->i_shlockc);
 618:         }
 619:         fp->f_flag &= ~FSHLOCK;
 620:     }
 621:     if (kind & FEXLOCK) {
 622:         if (--ip->i_exlockc == 0) {
 623:             ip->i_flag &= ~(IEXLOCK|ILWAIT);
 624:             if (flags & ILWAIT)
 625:                 wakeup((caddr_t)&ip->i_shlockc);
 626:         }
 627:         fp->f_flag &= ~FEXLOCK;
 628:     }
 629: }
 630: 
 631: /*
 632:  * Openi called to allow handler of special files to initialize and
 633:  * validate before actual IO.
 634:  */
 635: openi(ip, mode)
 636:     register struct inode *ip;
 637: {
 638:     register dev_t dev = ip->i_rdev;
 639:     register int maj = major(dev);
 640:     dev_t bdev;
 641:     int error;
 642: 
 643:     switch (ip->i_mode&IFMT) {
 644: 
 645:     case IFCHR:
 646:         if (ip->i_fs->fs_flags & MNT_NODEV)
 647:             return(ENXIO);
 648:         if ((u_int)maj >= nchrdev)
 649:             return (ENXIO);
 650:         if (mode & FWRITE) {
 651:             /*
 652: 			 * When running in very secure mode, do not allow
 653: 			 * opens for writing of any disk character devices.
 654: 			 */
 655:             if (securelevel >= 2 && isdisk(dev, IFCHR))
 656:                 return(EPERM);
 657:             /*
 658: 			 * When running in secure mode, do not allow opens
 659: 			 * for writing of /dev/mem, /dev/kmem, or character
 660: 			 * devices whose corresponding block devices are
 661: 			 * currently mounted.
 662: 			 */
 663:             if (securelevel >= 1) {
 664:                 if ((bdev = chrtoblk(dev)) != NODEV &&
 665:                     (error = ufs_mountedon(bdev)))
 666:                         return(error);
 667:                 if (iskmemdev(dev))
 668:                     return(EPERM);
 669:             }
 670:         }
 671:         return ((*cdevsw[maj].d_open)(dev, mode, S_IFCHR));
 672: 
 673:     case IFBLK:
 674:         if (ip->i_fs->fs_flags & MNT_NODEV)
 675:             return(ENXIO);
 676:         if ((u_int)maj >= nblkdev)
 677:             return (ENXIO);
 678:         /*
 679: 		 * When running in very secure mode, do not allow
 680: 		 * opens for writing of any disk block devices.
 681: 		 */
 682:         if (securelevel >= 2 && (mode & FWRITE) && isdisk(dev, IFBLK))
 683:             return(EPERM);
 684:         /*
 685: 		 * Do not allow opens of block devices that are
 686: 		 * currently mounted.
 687: 		 *
 688: 		 * 2.11BSD must relax this restriction to allow 'fsck' to
 689:  		 * open the root filesystem (which is always mounted) during
 690: 		 * a reboot.  Once in secure or very secure mode the
 691: 		 * above restriction is fully effective.  On the otherhand
 692: 		 * fsck should 1) use the raw device, 2) not do sync calls...
 693: 		 */
 694:         if (securelevel > 0 && (error = ufs_mountedon(dev)))
 695:             return(error);
 696:         return ((*bdevsw[maj].d_open)(dev, mode, S_IFBLK));
 697:     }
 698:     return (0);
 699: }
 700: 
 701: /*
 702:  * Revoke access the current tty by all processes.
 703:  * Used only by the super-user in init
 704:  * to give ``clean'' terminals at login.
 705:  */
 706: vhangup()
 707: {
 708: 
 709:     if (!suser())
 710:         return;
 711:     if (u.u_ttyp == NULL)
 712:         return;
 713:     forceclose(u.u_ttyd);
 714:     if ((u.u_ttyp->t_state) & TS_ISOPEN)
 715:         gsignal(u.u_ttyp->t_pgrp, SIGHUP);
 716: }
 717: 
 718: forceclose(dev)
 719:     register dev_t dev;
 720: {
 721:     register struct file *fp;
 722:     register struct inode *ip;
 723: 
 724:     for (fp = file; fp < fileNFILE; fp++) {
 725:         if (fp->f_count == 0)
 726:             continue;
 727:         if (fp->f_type != DTYPE_INODE)
 728:             continue;
 729:         ip = (struct inode *)fp->f_data;
 730:         if (ip == 0)
 731:             continue;
 732:         if ((ip->i_mode & IFMT) != IFCHR)
 733:             continue;
 734:         if (ip->i_rdev != dev)
 735:             continue;
 736:         fp->f_flag &= ~(FREAD|FWRITE);
 737:     }
 738: }

Defined functions

closei defined in line 447; used 2 times
forceclose defined in line 718; used 1 times
ino_ioctl defined in line 327; used 4 times
ino_rw defined in line 38; used 2 times
ino_select defined in line 367; used 2 times
ino_unlock defined in line 602; used 4 times
openi defined in line 635; used 1 times
rwip defined in line 107; used 5 times
vhangup defined in line 706; used 2 times

Defined variables

inodeops defined in line 35; used 1 times
Last modified: 1999-09-14
Generated: 2016-12-26
Generated by src2html V0.67
page hit count: 4644
Valid CSS Valid XHTML 1.0 Strict