1: /*
   2:  * Copyright (c) 1982, 1986 Regents of the University of California.
   3:  * All rights reserved.  The Berkeley software License Agreement
   4:  * specifies the terms and conditions for redistribution.
   5:  *
   6:  *	@(#)ufs_alloc.c	7.1 (Berkeley) 6/5/86
   7:  */
   8: 
   9: #include "param.h"
  10: #include "systm.h"
  11: #include "mount.h"
  12: #include "fs.h"
  13: #include "buf.h"
  14: #include "inode.h"
  15: #include "dir.h"
  16: #include "user.h"
  17: #include "quota.h"
  18: #include "kernel.h"
  19: #include "syslog.h"
  20: #include "cmap.h"
  21: 
  22: extern u_long       hashalloc();
  23: extern ino_t        ialloccg();
  24: extern daddr_t      alloccg();
  25: extern daddr_t      alloccgblk();
  26: extern daddr_t      fragextend();
  27: extern daddr_t      blkpref();
  28: extern daddr_t      mapsearch();
  29: extern int      inside[], around[];
  30: extern unsigned char    *fragtbl[];
  31: 
  32: /*
  33:  * Allocate a block in the file system.
  34:  *
  35:  * The size of the requested block is given, which must be some
  36:  * multiple of fs_fsize and <= fs_bsize.
  37:  * A preference may be optionally specified. If a preference is given
  38:  * the following hierarchy is used to allocate a block:
  39:  *   1) allocate the requested block.
  40:  *   2) allocate a rotationally optimal block in the same cylinder.
  41:  *   3) allocate a block in the same cylinder group.
  42:  *   4) quadradically rehash into other cylinder groups, until an
  43:  *      available block is located.
  44:  * If no block preference is given the following heirarchy is used
  45:  * to allocate a block:
  46:  *   1) allocate a block in the cylinder group that contains the
  47:  *      inode for the file.
  48:  *   2) quadradically rehash into other cylinder groups, until an
  49:  *      available block is located.
  50:  */
  51: struct buf *
  52: alloc(ip, bpref, size)
  53:     register struct inode *ip;
  54:     daddr_t bpref;
  55:     int size;
  56: {
  57:     daddr_t bno;
  58:     register struct fs *fs;
  59:     register struct buf *bp;
  60:     int cg;
  61: 
  62:     fs = ip->i_fs;
  63:     if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
  64:         printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
  65:             ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
  66:         panic("alloc: bad size");
  67:     }
  68:     if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
  69:         goto nospace;
  70:     if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
  71:         goto nospace;
  72: #ifdef QUOTA
  73:     u.u_error = chkdq(ip, (long)btodb(size), 0);
  74:     if (u.u_error)
  75:         return (NULL);
  76: #endif
  77:     if (bpref >= fs->fs_size)
  78:         bpref = 0;
  79:     if (bpref == 0)
  80:         cg = itog(fs, ip->i_number);
  81:     else
  82:         cg = dtog(fs, bpref);
  83:     bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size,
  84:         (u_long (*)())alloccg);
  85:     if (bno <= 0)
  86:         goto nospace;
  87:     ip->i_blocks += btodb(size);
  88:     ip->i_flag |= IUPD|ICHG;
  89:     bp = getblk(ip->i_dev, fsbtodb(fs, bno), size);
  90:     clrbuf(bp);
  91:     return (bp);
  92: nospace:
  93:     fserr(fs, "file system full");
  94:     uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
  95:     u.u_error = ENOSPC;
  96:     return (NULL);
  97: }
  98: 
  99: /*
 100:  * Reallocate a fragment to a bigger size
 101:  *
 102:  * The number and size of the old block is given, and a preference
 103:  * and new size is also specified. The allocator attempts to extend
 104:  * the original block. Failing that, the regular block allocator is
 105:  * invoked to get an appropriate block.
 106:  */
 107: struct buf *
 108: realloccg(ip, bprev, bpref, osize, nsize)
 109:     register struct inode *ip;
 110:     daddr_t bprev, bpref;
 111:     int osize, nsize;
 112: {
 113:     register struct fs *fs;
 114:     register struct buf *bp, *obp;
 115:     int cg, request;
 116:     daddr_t bno, bn;
 117:     int i, count, s;
 118:     extern struct cmap *mfind();
 119: 
 120:     fs = ip->i_fs;
 121:     if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
 122:         (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
 123:         printf("dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n",
 124:             ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt);
 125:         panic("realloccg: bad size");
 126:     }
 127:     if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
 128:         goto nospace;
 129:     if (bprev == 0) {
 130:         printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n",
 131:             ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt);
 132:         panic("realloccg: bad bprev");
 133:     }
 134: #ifdef QUOTA
 135:     u.u_error = chkdq(ip, (long)btodb(nsize - osize), 0);
 136:     if (u.u_error)
 137:         return (NULL);
 138: #endif
 139:     cg = dtog(fs, bprev);
 140:     bno = fragextend(ip, cg, (long)bprev, osize, nsize);
 141:     if (bno != 0) {
 142:         do {
 143:             bp = bread(ip->i_dev, fsbtodb(fs, bno), osize);
 144:             if (bp->b_flags & B_ERROR) {
 145:                 brelse(bp);
 146:                 return (NULL);
 147:             }
 148:         } while (brealloc(bp, nsize) == 0);
 149:         bp->b_flags |= B_DONE;
 150:         bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize);
 151:         ip->i_blocks += btodb(nsize - osize);
 152:         ip->i_flag |= IUPD|ICHG;
 153:         return (bp);
 154:     }
 155:     if (bpref >= fs->fs_size)
 156:         bpref = 0;
 157:     switch ((int)fs->fs_optim) {
 158:     case FS_OPTSPACE:
 159:         /*
 160: 		 * Allocate an exact sized fragment. Although this makes
 161: 		 * best use of space, we will waste time relocating it if
 162: 		 * the file continues to grow. If the fragmentation is
 163: 		 * less than half of the minimum free reserve, we choose
 164: 		 * to begin optimizing for time.
 165: 		 */
 166:         request = nsize;
 167:         if (fs->fs_minfree < 5 ||
 168:             fs->fs_cstotal.cs_nffree >
 169:             fs->fs_dsize * fs->fs_minfree / (2 * 100))
 170:             break;
 171:         log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
 172:             fs->fs_fsmnt);
 173:         fs->fs_optim = FS_OPTTIME;
 174:         break;
 175:     case FS_OPTTIME:
 176:         /*
 177: 		 * At this point we have discovered a file that is trying
 178: 		 * to grow a small fragment to a larger fragment. To save
 179: 		 * time, we allocate a full sized block, then free the
 180: 		 * unused portion. If the file continues to grow, the
 181: 		 * `fragextend' call above will be able to grow it in place
 182: 		 * without further copying. If aberrant programs cause
 183: 		 * disk fragmentation to grow within 2% of the free reserve,
 184: 		 * we choose to begin optimizing for space.
 185: 		 */
 186:         request = fs->fs_bsize;
 187:         if (fs->fs_cstotal.cs_nffree <
 188:             fs->fs_dsize * (fs->fs_minfree - 2) / 100)
 189:             break;
 190:         log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
 191:             fs->fs_fsmnt);
 192:         fs->fs_optim = FS_OPTSPACE;
 193:         break;
 194:     default:
 195:         printf("dev = 0x%x, optim = %d, fs = %s\n",
 196:             ip->i_dev, fs->fs_optim, fs->fs_fsmnt);
 197:         panic("realloccg: bad optim");
 198:         /* NOTREACHED */
 199:     }
 200:     bno = (daddr_t)hashalloc(ip, cg, (long)bpref, request,
 201:         (u_long (*)())alloccg);
 202:     if (bno > 0) {
 203:         obp = bread(ip->i_dev, fsbtodb(fs, bprev), osize);
 204:         if (obp->b_flags & B_ERROR) {
 205:             brelse(obp);
 206:             return (NULL);
 207:         }
 208:         bn = fsbtodb(fs, bno);
 209:         bp = getblk(ip->i_dev, bn, nsize);
 210:         bcopy(obp->b_un.b_addr, bp->b_un.b_addr, (u_int)osize);
 211:         count = howmany(osize, DEV_BSIZE);
 212:         s = splimp();
 213:         for (i = 0; i < count; i += CLBYTES / DEV_BSIZE)
 214:             if (mfind(ip->i_dev, bn + i))
 215:                 munhash(ip->i_dev, bn + i);
 216:         splx(s);
 217:         bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize);
 218:         if (obp->b_flags & B_DELWRI) {
 219:             obp->b_flags &= ~B_DELWRI;
 220:             u.u_ru.ru_oublock--;        /* delete charge */
 221:         }
 222:         brelse(obp);
 223:         free(ip, bprev, (off_t)osize);
 224:         if (nsize < request)
 225:             free(ip, bno + numfrags(fs, nsize),
 226:                 (off_t)(request - nsize));
 227:         ip->i_blocks += btodb(nsize - osize);
 228:         ip->i_flag |= IUPD|ICHG;
 229:         return (bp);
 230:     }
 231: nospace:
 232:     /*
 233: 	 * no space available
 234: 	 */
 235:     fserr(fs, "file system full");
 236:     uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
 237:     u.u_error = ENOSPC;
 238:     return (NULL);
 239: }
 240: 
 241: /*
 242:  * Allocate an inode in the file system.
 243:  *
 244:  * A preference may be optionally specified. If a preference is given
 245:  * the following hierarchy is used to allocate an inode:
 246:  *   1) allocate the requested inode.
 247:  *   2) allocate an inode in the same cylinder group.
 248:  *   3) quadradically rehash into other cylinder groups, until an
 249:  *      available inode is located.
 250:  * If no inode preference is given the following heirarchy is used
 251:  * to allocate an inode:
 252:  *   1) allocate an inode in cylinder group 0.
 253:  *   2) quadradically rehash into other cylinder groups, until an
 254:  *      available inode is located.
 255:  */
 256: struct inode *
 257: ialloc(pip, ipref, mode)
 258:     register struct inode *pip;
 259:     ino_t ipref;
 260:     int mode;
 261: {
 262:     ino_t ino;
 263:     register struct fs *fs;
 264:     register struct inode *ip;
 265:     int cg;
 266: 
 267:     fs = pip->i_fs;
 268:     if (fs->fs_cstotal.cs_nifree == 0)
 269:         goto noinodes;
 270: #ifdef QUOTA
 271:     u.u_error = chkiq(pip->i_dev, (struct inode *)NULL, u.u_uid, 0);
 272:     if (u.u_error)
 273:         return (NULL);
 274: #endif
 275:     if (ipref >= fs->fs_ncg * fs->fs_ipg)
 276:         ipref = 0;
 277:     cg = itog(fs, ipref);
 278:     ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg);
 279:     if (ino == 0)
 280:         goto noinodes;
 281:     ip = iget(pip->i_dev, pip->i_fs, ino);
 282:     if (ip == NULL) {
 283:         ifree(pip, ino, 0);
 284:         return (NULL);
 285:     }
 286:     if (ip->i_mode) {
 287:         printf("mode = 0%o, inum = %d, fs = %s\n",
 288:             ip->i_mode, ip->i_number, fs->fs_fsmnt);
 289:         panic("ialloc: dup alloc");
 290:     }
 291:     if (ip->i_blocks) {             /* XXX */
 292:         printf("free inode %s/%d had %d blocks\n",
 293:             fs->fs_fsmnt, ino, ip->i_blocks);
 294:         ip->i_blocks = 0;
 295:     }
 296:     return (ip);
 297: noinodes:
 298:     fserr(fs, "out of inodes");
 299:     uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt);
 300:     u.u_error = ENOSPC;
 301:     return (NULL);
 302: }
 303: 
 304: /*
 305:  * Find a cylinder to place a directory.
 306:  *
 307:  * The policy implemented by this algorithm is to select from
 308:  * among those cylinder groups with above the average number of
 309:  * free inodes, the one with the smallest number of directories.
 310:  */
 311: ino_t
 312: dirpref(fs)
 313:     register struct fs *fs;
 314: {
 315:     int cg, minndir, mincg, avgifree;
 316: 
 317:     avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
 318:     minndir = fs->fs_ipg;
 319:     mincg = 0;
 320:     for (cg = 0; cg < fs->fs_ncg; cg++)
 321:         if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
 322:             fs->fs_cs(fs, cg).cs_nifree >= avgifree) {
 323:             mincg = cg;
 324:             minndir = fs->fs_cs(fs, cg).cs_ndir;
 325:         }
 326:     return ((ino_t)(fs->fs_ipg * mincg));
 327: }
 328: 
 329: /*
 330:  * Select the desired position for the next block in a file.  The file is
 331:  * logically divided into sections. The first section is composed of the
 332:  * direct blocks. Each additional section contains fs_maxbpg blocks.
 333:  *
 334:  * If no blocks have been allocated in the first section, the policy is to
 335:  * request a block in the same cylinder group as the inode that describes
 336:  * the file. If no blocks have been allocated in any other section, the
 337:  * policy is to place the section in a cylinder group with a greater than
 338:  * average number of free blocks.  An appropriate cylinder group is found
 339:  * by using a rotor that sweeps the cylinder groups. When a new group of
 340:  * blocks is needed, the sweep begins in the cylinder group following the
 341:  * cylinder group from which the previous allocation was made. The sweep
 342:  * continues until a cylinder group with greater than the average number
 343:  * of free blocks is found. If the allocation is for the first block in an
 344:  * indirect block, the information on the previous allocation is unavailable;
 345:  * here a best guess is made based upon the logical block number being
 346:  * allocated.
 347:  *
 348:  * If a section is already partially allocated, the policy is to
 349:  * contiguously allocate fs_maxcontig blocks.  The end of one of these
 350:  * contiguous blocks and the beginning of the next is physically separated
 351:  * so that the disk head will be in transit between them for at least
 352:  * fs_rotdelay milliseconds.  This is to allow time for the processor to
 353:  * schedule another I/O transfer.
 354:  */
 355: daddr_t
 356: blkpref(ip, lbn, indx, bap)
 357:     struct inode *ip;
 358:     daddr_t lbn;
 359:     int indx;
 360:     daddr_t *bap;
 361: {
 362:     register struct fs *fs;
 363:     register int cg;
 364:     int avgbfree, startcg;
 365:     daddr_t nextblk;
 366: 
 367:     fs = ip->i_fs;
 368:     if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
 369:         if (lbn < NDADDR) {
 370:             cg = itog(fs, ip->i_number);
 371:             return (fs->fs_fpg * cg + fs->fs_frag);
 372:         }
 373:         /*
 374: 		 * Find a cylinder with greater than average number of
 375: 		 * unused data blocks.
 376: 		 */
 377:         if (indx == 0 || bap[indx - 1] == 0)
 378:             startcg = itog(fs, ip->i_number) + lbn / fs->fs_maxbpg;
 379:         else
 380:             startcg = dtog(fs, bap[indx - 1]) + 1;
 381:         startcg %= fs->fs_ncg;
 382:         avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
 383:         for (cg = startcg; cg < fs->fs_ncg; cg++)
 384:             if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
 385:                 fs->fs_cgrotor = cg;
 386:                 return (fs->fs_fpg * cg + fs->fs_frag);
 387:             }
 388:         for (cg = 0; cg <= startcg; cg++)
 389:             if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
 390:                 fs->fs_cgrotor = cg;
 391:                 return (fs->fs_fpg * cg + fs->fs_frag);
 392:             }
 393:         return (NULL);
 394:     }
 395:     /*
 396: 	 * One or more previous blocks have been laid out. If less
 397: 	 * than fs_maxcontig previous blocks are contiguous, the
 398: 	 * next block is requested contiguously, otherwise it is
 399: 	 * requested rotationally delayed by fs_rotdelay milliseconds.
 400: 	 */
 401:     nextblk = bap[indx - 1] + fs->fs_frag;
 402:     if (indx > fs->fs_maxcontig &&
 403:         bap[indx - fs->fs_maxcontig] + blkstofrags(fs, fs->fs_maxcontig)
 404:         != nextblk)
 405:         return (nextblk);
 406:     if (fs->fs_rotdelay != 0)
 407:         /*
 408: 		 * Here we convert ms of delay to frags as:
 409: 		 * (frags) = (ms) * (rev/sec) * (sect/rev) /
 410: 		 *	((sect/frag) * (ms/sec))
 411: 		 * then round up to the next block.
 412: 		 */
 413:         nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect /
 414:             (NSPF(fs) * 1000), fs->fs_frag);
 415:     return (nextblk);
 416: }
 417: 
 418: /*
 419:  * Implement the cylinder overflow algorithm.
 420:  *
 421:  * The policy implemented by this algorithm is:
 422:  *   1) allocate the block in its requested cylinder group.
 423:  *   2) quadradically rehash on the cylinder group number.
 424:  *   3) brute force search for a free block.
 425:  */
 426: /*VARARGS5*/
 427: u_long
 428: hashalloc(ip, cg, pref, size, allocator)
 429:     struct inode *ip;
 430:     int cg;
 431:     long pref;
 432:     int size;   /* size for data blocks, mode for inodes */
 433:     u_long (*allocator)();
 434: {
 435:     register struct fs *fs;
 436:     long result;
 437:     int i, icg = cg;
 438: 
 439:     fs = ip->i_fs;
 440:     /*
 441: 	 * 1: preferred cylinder group
 442: 	 */
 443:     result = (*allocator)(ip, cg, pref, size);
 444:     if (result)
 445:         return (result);
 446:     /*
 447: 	 * 2: quadratic rehash
 448: 	 */
 449:     for (i = 1; i < fs->fs_ncg; i *= 2) {
 450:         cg += i;
 451:         if (cg >= fs->fs_ncg)
 452:             cg -= fs->fs_ncg;
 453:         result = (*allocator)(ip, cg, 0, size);
 454:         if (result)
 455:             return (result);
 456:     }
 457:     /*
 458: 	 * 3: brute force search
 459: 	 * Note that we start at i == 2, since 0 was checked initially,
 460: 	 * and 1 is always checked in the quadratic rehash.
 461: 	 */
 462:     cg = (icg + 2) % fs->fs_ncg;
 463:     for (i = 2; i < fs->fs_ncg; i++) {
 464:         result = (*allocator)(ip, cg, 0, size);
 465:         if (result)
 466:             return (result);
 467:         cg++;
 468:         if (cg == fs->fs_ncg)
 469:             cg = 0;
 470:     }
 471:     return (NULL);
 472: }
 473: 
 474: /*
 475:  * Determine whether a fragment can be extended.
 476:  *
 477:  * Check to see if the necessary fragments are available, and
 478:  * if they are, allocate them.
 479:  */
 480: daddr_t
 481: fragextend(ip, cg, bprev, osize, nsize)
 482:     struct inode *ip;
 483:     int cg;
 484:     long bprev;
 485:     int osize, nsize;
 486: {
 487:     register struct fs *fs;
 488:     register struct buf *bp;
 489:     register struct cg *cgp;
 490:     long bno;
 491:     int frags, bbase;
 492:     int i;
 493: 
 494:     fs = ip->i_fs;
 495:     if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
 496:         return (NULL);
 497:     frags = numfrags(fs, nsize);
 498:     bbase = fragnum(fs, bprev);
 499:     if (bbase > fragnum(fs, (bprev + frags - 1))) {
 500:         /* cannot extend across a block boundry */
 501:         return (NULL);
 502:     }
 503:     bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize);
 504:     cgp = bp->b_un.b_cg;
 505:     if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) {
 506:         brelse(bp);
 507:         return (NULL);
 508:     }
 509:     cgp->cg_time = time.tv_sec;
 510:     bno = dtogd(fs, bprev);
 511:     for (i = numfrags(fs, osize); i < frags; i++)
 512:         if (isclr(cgp->cg_free, bno + i)) {
 513:             brelse(bp);
 514:             return (NULL);
 515:         }
 516:     /*
 517: 	 * the current fragment can be extended
 518: 	 * deduct the count on fragment being extended into
 519: 	 * increase the count on the remaining fragment (if any)
 520: 	 * allocate the extended piece
 521: 	 */
 522:     for (i = frags; i < fs->fs_frag - bbase; i++)
 523:         if (isclr(cgp->cg_free, bno + i))
 524:             break;
 525:     cgp->cg_frsum[i - numfrags(fs, osize)]--;
 526:     if (i != frags)
 527:         cgp->cg_frsum[i - frags]++;
 528:     for (i = numfrags(fs, osize); i < frags; i++) {
 529:         clrbit(cgp->cg_free, bno + i);
 530:         cgp->cg_cs.cs_nffree--;
 531:         fs->fs_cstotal.cs_nffree--;
 532:         fs->fs_cs(fs, cg).cs_nffree--;
 533:     }
 534:     fs->fs_fmod++;
 535:     bdwrite(bp);
 536:     return (bprev);
 537: }
 538: 
 539: /*
 540:  * Determine whether a block can be allocated.
 541:  *
 542:  * Check to see if a block of the apprpriate size is available,
 543:  * and if it is, allocate it.
 544:  */
 545: daddr_t
 546: alloccg(ip, cg, bpref, size)
 547:     struct inode *ip;
 548:     int cg;
 549:     daddr_t bpref;
 550:     int size;
 551: {
 552:     register struct fs *fs;
 553:     register struct buf *bp;
 554:     register struct cg *cgp;
 555:     int bno, frags;
 556:     int allocsiz;
 557:     register int i;
 558: 
 559:     fs = ip->i_fs;
 560:     if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
 561:         return (NULL);
 562:     bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize);
 563:     cgp = bp->b_un.b_cg;
 564:     if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC ||
 565:         (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
 566:         brelse(bp);
 567:         return (NULL);
 568:     }
 569:     cgp->cg_time = time.tv_sec;
 570:     if (size == fs->fs_bsize) {
 571:         bno = alloccgblk(fs, cgp, bpref);
 572:         bdwrite(bp);
 573:         return (bno);
 574:     }
 575:     /*
 576: 	 * check to see if any fragments are already available
 577: 	 * allocsiz is the size which will be allocated, hacking
 578: 	 * it down to a smaller size if necessary
 579: 	 */
 580:     frags = numfrags(fs, size);
 581:     for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
 582:         if (cgp->cg_frsum[allocsiz] != 0)
 583:             break;
 584:     if (allocsiz == fs->fs_frag) {
 585:         /*
 586: 		 * no fragments were available, so a block will be
 587: 		 * allocated, and hacked up
 588: 		 */
 589:         if (cgp->cg_cs.cs_nbfree == 0) {
 590:             brelse(bp);
 591:             return (NULL);
 592:         }
 593:         bno = alloccgblk(fs, cgp, bpref);
 594:         bpref = dtogd(fs, bno);
 595:         for (i = frags; i < fs->fs_frag; i++)
 596:             setbit(cgp->cg_free, bpref + i);
 597:         i = fs->fs_frag - frags;
 598:         cgp->cg_cs.cs_nffree += i;
 599:         fs->fs_cstotal.cs_nffree += i;
 600:         fs->fs_cs(fs, cg).cs_nffree += i;
 601:         fs->fs_fmod++;
 602:         cgp->cg_frsum[i]++;
 603:         bdwrite(bp);
 604:         return (bno);
 605:     }
 606:     bno = mapsearch(fs, cgp, bpref, allocsiz);
 607:     if (bno < 0) {
 608:         brelse(bp);
 609:         return (NULL);
 610:     }
 611:     for (i = 0; i < frags; i++)
 612:         clrbit(cgp->cg_free, bno + i);
 613:     cgp->cg_cs.cs_nffree -= frags;
 614:     fs->fs_cstotal.cs_nffree -= frags;
 615:     fs->fs_cs(fs, cg).cs_nffree -= frags;
 616:     fs->fs_fmod++;
 617:     cgp->cg_frsum[allocsiz]--;
 618:     if (frags != allocsiz)
 619:         cgp->cg_frsum[allocsiz - frags]++;
 620:     bdwrite(bp);
 621:     return (cg * fs->fs_fpg + bno);
 622: }
 623: 
 624: /*
 625:  * Allocate a block in a cylinder group.
 626:  *
 627:  * This algorithm implements the following policy:
 628:  *   1) allocate the requested block.
 629:  *   2) allocate a rotationally optimal block in the same cylinder.
 630:  *   3) allocate the next available block on the block rotor for the
 631:  *      specified cylinder group.
 632:  * Note that this routine only allocates fs_bsize blocks; these
 633:  * blocks may be fragmented by the routine that allocates them.
 634:  */
 635: daddr_t
 636: alloccgblk(fs, cgp, bpref)
 637:     register struct fs *fs;
 638:     register struct cg *cgp;
 639:     daddr_t bpref;
 640: {
 641:     daddr_t bno;
 642:     int cylno, pos, delta;
 643:     short *cylbp;
 644:     register int i;
 645: 
 646:     if (bpref == 0) {
 647:         bpref = cgp->cg_rotor;
 648:         goto norot;
 649:     }
 650:     bpref = blknum(fs, bpref);
 651:     bpref = dtogd(fs, bpref);
 652:     /*
 653: 	 * if the requested block is available, use it
 654: 	 */
 655:     if (isblock(fs, cgp->cg_free, fragstoblks(fs, bpref))) {
 656:         bno = bpref;
 657:         goto gotit;
 658:     }
 659:     /*
 660: 	 * check for a block available on the same cylinder
 661: 	 */
 662:     cylno = cbtocylno(fs, bpref);
 663:     if (cgp->cg_btot[cylno] == 0)
 664:         goto norot;
 665:     if (fs->fs_cpc == 0) {
 666:         /*
 667: 		 * block layout info is not available, so just have
 668: 		 * to take any block in this cylinder.
 669: 		 */
 670:         bpref = howmany(fs->fs_spc * cylno, NSPF(fs));
 671:         goto norot;
 672:     }
 673:     /*
 674: 	 * check the summary information to see if a block is
 675: 	 * available in the requested cylinder starting at the
 676: 	 * requested rotational position and proceeding around.
 677: 	 */
 678:     cylbp = cgp->cg_b[cylno];
 679:     pos = cbtorpos(fs, bpref);
 680:     for (i = pos; i < NRPOS; i++)
 681:         if (cylbp[i] > 0)
 682:             break;
 683:     if (i == NRPOS)
 684:         for (i = 0; i < pos; i++)
 685:             if (cylbp[i] > 0)
 686:                 break;
 687:     if (cylbp[i] > 0) {
 688:         /*
 689: 		 * found a rotational position, now find the actual
 690: 		 * block. A panic if none is actually there.
 691: 		 */
 692:         pos = cylno % fs->fs_cpc;
 693:         bno = (cylno - pos) * fs->fs_spc / NSPB(fs);
 694:         if (fs->fs_postbl[pos][i] == -1) {
 695:             printf("pos = %d, i = %d, fs = %s\n",
 696:                 pos, i, fs->fs_fsmnt);
 697:             panic("alloccgblk: cyl groups corrupted");
 698:         }
 699:         for (i = fs->fs_postbl[pos][i];; ) {
 700:             if (isblock(fs, cgp->cg_free, bno + i)) {
 701:                 bno = blkstofrags(fs, (bno + i));
 702:                 goto gotit;
 703:             }
 704:             delta = fs->fs_rotbl[i];
 705:             if (delta <= 0 || delta > MAXBPC - i)
 706:                 break;
 707:             i += delta;
 708:         }
 709:         printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt);
 710:         panic("alloccgblk: can't find blk in cyl");
 711:     }
 712: norot:
 713:     /*
 714: 	 * no blocks in the requested cylinder, so take next
 715: 	 * available one in this cylinder group.
 716: 	 */
 717:     bno = mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
 718:     if (bno < 0)
 719:         return (NULL);
 720:     cgp->cg_rotor = bno;
 721: gotit:
 722:     clrblock(fs, cgp->cg_free, (long)fragstoblks(fs, bno));
 723:     cgp->cg_cs.cs_nbfree--;
 724:     fs->fs_cstotal.cs_nbfree--;
 725:     fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
 726:     cylno = cbtocylno(fs, bno);
 727:     cgp->cg_b[cylno][cbtorpos(fs, bno)]--;
 728:     cgp->cg_btot[cylno]--;
 729:     fs->fs_fmod++;
 730:     return (cgp->cg_cgx * fs->fs_fpg + bno);
 731: }
 732: 
 733: /*
 734:  * Determine whether an inode can be allocated.
 735:  *
 736:  * Check to see if an inode is available, and if it is,
 737:  * allocate it using the following policy:
 738:  *   1) allocate the requested inode.
 739:  *   2) allocate the next available inode after the requested
 740:  *      inode in the specified cylinder group.
 741:  */
 742: ino_t
 743: ialloccg(ip, cg, ipref, mode)
 744:     struct inode *ip;
 745:     int cg;
 746:     daddr_t ipref;
 747:     int mode;
 748: {
 749:     register struct fs *fs;
 750:     register struct cg *cgp;
 751:     struct buf *bp;
 752:     int start, len, loc, map, i;
 753: 
 754:     fs = ip->i_fs;
 755:     if (fs->fs_cs(fs, cg).cs_nifree == 0)
 756:         return (NULL);
 757:     bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize);
 758:     cgp = bp->b_un.b_cg;
 759:     if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC ||
 760:         cgp->cg_cs.cs_nifree == 0) {
 761:         brelse(bp);
 762:         return (NULL);
 763:     }
 764:     cgp->cg_time = time.tv_sec;
 765:     if (ipref) {
 766:         ipref %= fs->fs_ipg;
 767:         if (isclr(cgp->cg_iused, ipref))
 768:             goto gotit;
 769:     }
 770:     start = cgp->cg_irotor / NBBY;
 771:     len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
 772:     loc = skpc(0xff, len, &cgp->cg_iused[start]);
 773:     if (loc == 0) {
 774:         len = start + 1;
 775:         start = 0;
 776:         loc = skpc(0xff, len, &cgp->cg_iused[0]);
 777:         if (loc == 0) {
 778:             printf("cg = %s, irotor = %d, fs = %s\n",
 779:                 cg, cgp->cg_irotor, fs->fs_fsmnt);
 780:             panic("ialloccg: map corrupted");
 781:             /* NOTREACHED */
 782:         }
 783:     }
 784:     i = start + len - loc;
 785:     map = cgp->cg_iused[i];
 786:     ipref = i * NBBY;
 787:     for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) {
 788:         if ((map & i) == 0) {
 789:             cgp->cg_irotor = ipref;
 790:             goto gotit;
 791:         }
 792:     }
 793:     printf("fs = %s\n", fs->fs_fsmnt);
 794:     panic("ialloccg: block not in map");
 795:     /* NOTREACHED */
 796: gotit:
 797:     setbit(cgp->cg_iused, ipref);
 798:     cgp->cg_cs.cs_nifree--;
 799:     fs->fs_cstotal.cs_nifree--;
 800:     fs->fs_cs(fs, cg).cs_nifree--;
 801:     fs->fs_fmod++;
 802:     if ((mode & IFMT) == IFDIR) {
 803:         cgp->cg_cs.cs_ndir++;
 804:         fs->fs_cstotal.cs_ndir++;
 805:         fs->fs_cs(fs, cg).cs_ndir++;
 806:     }
 807:     bdwrite(bp);
 808:     return (cg * fs->fs_ipg + ipref);
 809: }
 810: 
 811: /*
 812:  * Free a block or fragment.
 813:  *
 814:  * The specified block or fragment is placed back in the
 815:  * free map. If a fragment is deallocated, a possible
 816:  * block reassembly is checked.
 817:  */
 818: free(ip, bno, size)
 819:     register struct inode *ip;
 820:     daddr_t bno;
 821:     off_t size;
 822: {
 823:     register struct fs *fs;
 824:     register struct cg *cgp;
 825:     register struct buf *bp;
 826:     int cg, blk, frags, bbase;
 827:     register int i;
 828: 
 829:     fs = ip->i_fs;
 830:     if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
 831:         printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
 832:             ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
 833:         panic("free: bad size");
 834:     }
 835:     cg = dtog(fs, bno);
 836:     if (badblock(fs, bno)) {
 837:         printf("bad block %d, ino %d\n", bno, ip->i_number);
 838:         return;
 839:     }
 840:     bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize);
 841:     cgp = bp->b_un.b_cg;
 842:     if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) {
 843:         brelse(bp);
 844:         return;
 845:     }
 846:     cgp->cg_time = time.tv_sec;
 847:     bno = dtogd(fs, bno);
 848:     if (size == fs->fs_bsize) {
 849:         if (isblock(fs, cgp->cg_free, fragstoblks(fs, bno))) {
 850:             printf("dev = 0x%x, block = %d, fs = %s\n",
 851:                 ip->i_dev, bno, fs->fs_fsmnt);
 852:             panic("free: freeing free block");
 853:         }
 854:         setblock(fs, cgp->cg_free, fragstoblks(fs, bno));
 855:         cgp->cg_cs.cs_nbfree++;
 856:         fs->fs_cstotal.cs_nbfree++;
 857:         fs->fs_cs(fs, cg).cs_nbfree++;
 858:         i = cbtocylno(fs, bno);
 859:         cgp->cg_b[i][cbtorpos(fs, bno)]++;
 860:         cgp->cg_btot[i]++;
 861:     } else {
 862:         bbase = bno - fragnum(fs, bno);
 863:         /*
 864: 		 * decrement the counts associated with the old frags
 865: 		 */
 866:         blk = blkmap(fs, cgp->cg_free, bbase);
 867:         fragacct(fs, blk, cgp->cg_frsum, -1);
 868:         /*
 869: 		 * deallocate the fragment
 870: 		 */
 871:         frags = numfrags(fs, size);
 872:         for (i = 0; i < frags; i++) {
 873:             if (isset(cgp->cg_free, bno + i)) {
 874:                 printf("dev = 0x%x, block = %d, fs = %s\n",
 875:                     ip->i_dev, bno + i, fs->fs_fsmnt);
 876:                 panic("free: freeing free frag");
 877:             }
 878:             setbit(cgp->cg_free, bno + i);
 879:         }
 880:         cgp->cg_cs.cs_nffree += i;
 881:         fs->fs_cstotal.cs_nffree += i;
 882:         fs->fs_cs(fs, cg).cs_nffree += i;
 883:         /*
 884: 		 * add back in counts associated with the new frags
 885: 		 */
 886:         blk = blkmap(fs, cgp->cg_free, bbase);
 887:         fragacct(fs, blk, cgp->cg_frsum, 1);
 888:         /*
 889: 		 * if a complete block has been reassembled, account for it
 890: 		 */
 891:         if (isblock(fs, cgp->cg_free, fragstoblks(fs, bbase))) {
 892:             cgp->cg_cs.cs_nffree -= fs->fs_frag;
 893:             fs->fs_cstotal.cs_nffree -= fs->fs_frag;
 894:             fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
 895:             cgp->cg_cs.cs_nbfree++;
 896:             fs->fs_cstotal.cs_nbfree++;
 897:             fs->fs_cs(fs, cg).cs_nbfree++;
 898:             i = cbtocylno(fs, bbase);
 899:             cgp->cg_b[i][cbtorpos(fs, bbase)]++;
 900:             cgp->cg_btot[i]++;
 901:         }
 902:     }
 903:     fs->fs_fmod++;
 904:     bdwrite(bp);
 905: }
 906: 
 907: /*
 908:  * Free an inode.
 909:  *
 910:  * The specified inode is placed back in the free map.
 911:  */
 912: ifree(ip, ino, mode)
 913:     struct inode *ip;
 914:     ino_t ino;
 915:     int mode;
 916: {
 917:     register struct fs *fs;
 918:     register struct cg *cgp;
 919:     register struct buf *bp;
 920:     int cg;
 921: 
 922:     fs = ip->i_fs;
 923:     if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) {
 924:         printf("dev = 0x%x, ino = %d, fs = %s\n",
 925:             ip->i_dev, ino, fs->fs_fsmnt);
 926:         panic("ifree: range");
 927:     }
 928:     cg = itog(fs, ino);
 929:     bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize);
 930:     cgp = bp->b_un.b_cg;
 931:     if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) {
 932:         brelse(bp);
 933:         return;
 934:     }
 935:     cgp->cg_time = time.tv_sec;
 936:     ino %= fs->fs_ipg;
 937:     if (isclr(cgp->cg_iused, ino)) {
 938:         printf("dev = 0x%x, ino = %d, fs = %s\n",
 939:             ip->i_dev, ino, fs->fs_fsmnt);
 940:         panic("ifree: freeing free inode");
 941:     }
 942:     clrbit(cgp->cg_iused, ino);
 943:     if (ino < cgp->cg_irotor)
 944:         cgp->cg_irotor = ino;
 945:     cgp->cg_cs.cs_nifree++;
 946:     fs->fs_cstotal.cs_nifree++;
 947:     fs->fs_cs(fs, cg).cs_nifree++;
 948:     if ((mode & IFMT) == IFDIR) {
 949:         cgp->cg_cs.cs_ndir--;
 950:         fs->fs_cstotal.cs_ndir--;
 951:         fs->fs_cs(fs, cg).cs_ndir--;
 952:     }
 953:     fs->fs_fmod++;
 954:     bdwrite(bp);
 955: }
 956: 
 957: /*
 958:  * Find a block of the specified size in the specified cylinder group.
 959:  *
 960:  * It is a panic if a request is made to find a block if none are
 961:  * available.
 962:  */
 963: daddr_t
 964: mapsearch(fs, cgp, bpref, allocsiz)
 965:     register struct fs *fs;
 966:     register struct cg *cgp;
 967:     daddr_t bpref;
 968:     int allocsiz;
 969: {
 970:     daddr_t bno;
 971:     int start, len, loc, i;
 972:     int blk, field, subfield, pos;
 973: 
 974:     /*
 975: 	 * find the fragment by searching through the free block
 976: 	 * map for an appropriate bit pattern
 977: 	 */
 978:     if (bpref)
 979:         start = dtogd(fs, bpref) / NBBY;
 980:     else
 981:         start = cgp->cg_frotor / NBBY;
 982:     len = howmany(fs->fs_fpg, NBBY) - start;
 983:     loc = scanc((unsigned)len, (caddr_t)&cgp->cg_free[start],
 984:         (caddr_t)fragtbl[fs->fs_frag],
 985:         (int)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
 986:     if (loc == 0) {
 987:         len = start + 1;
 988:         start = 0;
 989:         loc = scanc((unsigned)len, (caddr_t)&cgp->cg_free[0],
 990:             (caddr_t)fragtbl[fs->fs_frag],
 991:             (int)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
 992:         if (loc == 0) {
 993:             printf("start = %d, len = %d, fs = %s\n",
 994:                 start, len, fs->fs_fsmnt);
 995:             panic("alloccg: map corrupted");
 996:             /* NOTREACHED */
 997:         }
 998:     }
 999:     bno = (start + len - loc) * NBBY;
1000:     cgp->cg_frotor = bno;
1001:     /*
1002: 	 * found the byte in the map
1003: 	 * sift through the bits to find the selected frag
1004: 	 */
1005:     for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
1006:         blk = blkmap(fs, cgp->cg_free, bno);
1007:         blk <<= 1;
1008:         field = around[allocsiz];
1009:         subfield = inside[allocsiz];
1010:         for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
1011:             if ((blk & field) == subfield)
1012:                 return (bno + pos);
1013:             field <<= 1;
1014:             subfield <<= 1;
1015:         }
1016:     }
1017:     printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt);
1018:     panic("alloccg: block not in map");
1019:     return (-1);
1020: }
1021: 
1022: /*
1023:  * Fserr prints the name of a file system with an error diagnostic.
1024:  *
1025:  * The form of the error message is:
1026:  *	fs: error message
1027:  */
1028: fserr(fs, cp)
1029:     struct fs *fs;
1030:     char *cp;
1031: {
1032: 
1033:     log(LOG_ERR, "%s: %s\n", fs->fs_fsmnt, cp);
1034: }

Defined functions

alloc defined in line 51; used 4 times
alloccg defined in line 545; used 3 times
alloccgblk defined in line 635; used 3 times
blkpref defined in line 355; used 8 times
dirpref defined in line 311; used 3 times
fragextend defined in line 480; used 2 times
free defined in line 818; used 6 times
fserr defined in line 1028; used 4 times
hashalloc defined in line 427; used 4 times
ialloc defined in line 256; used 3 times
ialloccg defined in line 742; used 2 times
ifree defined in line 912; used 2 times
mapsearch defined in line 963; used 3 times
realloccg defined in line 107; used 3 times
Last modified: 1986-06-05
Generated: 2016-12-26
Generated by src2html V0.67
page hit count: 2604
Valid CSS Valid XHTML 1.0 Strict