1: /* 2: * Copyright (c) 1982, 1986 Regents of the University of California. 3: * All rights reserved. The Berkeley software License Agreement 4: * specifies the terms and conditions for redistribution. 5: * 6: * @(#)quota_kern.c 7.1.4 (2.11BSD GTE) 1997/1/18 7: * 8: * I'll say it here and not every other place i've had to hack: 9: * Mike Karels was right - " just buy a vax...". i have traded cpu cycles 10: * for kernel D space - if enough space ever becomes available then the 11: * p_quota, i_dquot members can be added to their respective structures, 12: * and the quota[] array might possibly be moved into the kernel, it is 13: * highly doubtful the dquot structures can ever be moved into the kernel 14: * D space. Then too, the mapping of the out of kernel quota space is 15: * swamped by the overhead of doing the quotas, and quotas sure as heck 16: * beat doing 'du's all the time! 17: * Steven M. Schultz 1/12/88 18: */ 19: 20: /* 21: * MELBOURNE QUOTAS 22: * 23: * Code pertaining to management of the in-core data structures. 24: */ 25: #include "param.h" 26: #ifdef QUOTA 27: #include "systm.h" 28: #include "user.h" 29: #include "proc.h" 30: #include "inode.h" 31: #include "quota.h" 32: #include "fs.h" 33: #include "mount.h" 34: #include "namei.h" 35: 36: /* 37: * Quota cache - hash chain headers. 38: */ 39: #ifndef pdp11 40: #define NQHASH 32 /* small power of two */ 41: #endif 42: #define QHASH(uid) ((unsigned)(uid) & (NQHASH-1)) 43: 44: #ifndef pdp11 45: struct qhash { 46: struct qhash *qh_forw; /* MUST be first */ 47: struct qhash *qh_back; /* MUST be second */ 48: }; 49: 50: struct qhash qhash[NQHASH]; 51: #else 52: struct qhash *qhash; 53: #endif 54: 55: /* 56: * Quota free list. 57: */ 58: struct quota *qfreelist, **qfreetail; 59: typedef struct quota *Qptr; 60: 61: /* 62: * Dquot cache - hash chain headers. 63: */ 64: #ifndef pdp11 /* and 51 isn't even prime, see quota.h */ 65: #define NDQHASH 51 /* a smallish prime */ 66: #endif 67: #define DQHASH(uid, dev) \ 68: ((unsigned)(((int)(dev) * 4) + (uid)) % NDQHASH) 69: 70: #ifndef pdp11 71: struct dqhead { 72: struct dqhead *dqh_forw; /* MUST be first */ 73: struct dqhead *dqh_back; /* MUST be second */ 74: }; 75: 76: struct dqhead dqhead[NDQHASH]; 77: #else 78: struct dqhead *dqhead; 79: #endif 80: 81: /* 82: * Dquot free list. 83: */ 84: struct dquot *dqfreel, **dqback; 85: 86: typedef struct dquot *DQptr; 87: 88: /* 89: * Initialize quota caches. 90: */ 91: qtinit() 92: { 93: register i; 94: register struct quota *q = quota; 95: register struct qhash *qh = qhash; 96: register struct dquot *dq = dquot; 97: register struct dqhead *dh = dqhead; 98: 99: /* 100: * First the cache of structures assigned users. 101: */ 102: for (i = NQHASH; --i >= 0; qh++) 103: qh->qh_forw = qh->qh_back = qh; 104: qfreelist = q; 105: qfreetail = &q->q_freef; 106: q->q_freeb = &qfreelist; 107: q->q_forw = q; 108: q->q_back = q; 109: for (i = nquota; --i > 0; ) { 110: ++q; 111: q->q_forw = q; 112: q->q_back = q; 113: *qfreetail = q; 114: q->q_freeb = qfreetail; 115: qfreetail = &q->q_freef; 116: } 117: q->q_freef = NOQUOTA; 118: /* 119: * Next, the cache between the in-core structures 120: * and the per-filesystem quota files on disk. 121: */ 122: for (i = NDQHASH; --i >= 0; dh++) 123: dh->dqh_forw = dh->dqh_back = dh; 124: dqfreel = dq; 125: dqback = &dq->dq_freef; 126: dq->dq_freeb = &dqfreel; 127: dq->dq_forw = dq; 128: dq->dq_back = dq; 129: for (i = ndquot; --i > 0; ) { 130: ++dq; 131: dq->dq_forw = dq; 132: dq->dq_back = dq; 133: *dqback = dq; 134: dq->dq_freeb = dqback; 135: dqback = &dq->dq_freef; 136: } 137: dq->dq_freef = NODQUOT; 138: } 139: 140: /* 141: * Find an incore quota structure for a particular uid, 142: * or make one. If lookuponly is non-zero, just the lookup is performed. 143: * If nodq is non-zero, the dquot structures are left uninitialized. 144: */ 145: struct quota * 146: getquota(uid, lookuponly, nodq) 147: register uid_t uid; 148: int lookuponly, nodq; 149: { 150: register struct quota *q; 151: register struct qhash *qh; 152: register struct dquot **dqq; 153: register struct mount *mp; 154: register struct quota *qq; 155: 156: /* 157: * Fast check to see if an existing structure 158: * can be reused with just a reference count change. 159: */ 160: q = u.u_quota; 161: if (q != NOQUOTA && q->q_uid == uid) 162: goto quick; 163: /* 164: * Search the quota chache for a hit. 165: */ 166: qh = &qhash[QHASH(uid)]; 167: for (q = (Qptr)qh->qh_forw; q != (Qptr)qh; q = q->q_forw) { 168: if (q->q_uid == uid) { 169: if (q->q_cnt == 0) { 170: if (lookuponly) 171: return (NOQUOTA); 172: /* 173: * Take it off the free list. 174: */ 175: if ((qq = q->q_freef) != NOQUOTA) 176: qq->q_freeb = q->q_freeb; 177: else 178: qfreetail = q->q_freeb; 179: *q->q_freeb = qq; 180: 181: /* 182: * Recover any lost dquot structs. 183: */ 184: if (!nodq) 185: for (dqq = q->q_dq, mp = mount; 186: dqq < &q->q_dq[NMOUNT]; dqq++, mp++) 187: #ifdef pdp11 188: if (*dqq == LOSTDQUOT && mp->m_inodp) { 189: #else 190: if (*dqq == LOSTDQUOT && mp->m_bufp) { 191: #endif 192: *dqq = discquota(uid, 193: mp->m_qinod); 194: if (*dqq != NODQUOT) 195: (*dqq)->dq_own = q; 196: } 197: } 198: quick: 199: q->q_cnt++; 200: while (q->q_flags & Q_LOCK) { 201: q->q_flags |= Q_WANT; 202: #ifdef pdp11 203: QUOTAUNMAP(); 204: sleep((caddr_t) q, PINOD+1); 205: QUOTAMAP(); 206: #else 207: sleep((caddr_t) q, PINOD+1); 208: #endif 209: } 210: if (q->q_cnt == 1) 211: q->q_flags |= Q_NEW | nodq; 212: return (q); 213: } 214: } 215: if (lookuponly) 216: return (NOQUOTA); 217: /* 218: * Take the quota that is at the head of the free list 219: * (the longest unused quota). 220: */ 221: q = qfreelist; 222: if (q == NOQUOTA) { 223: tablefull("quota"); 224: u.u_error = EUSERS; 225: q = quota; /* the su's slot - we must have one */ 226: q->q_cnt++; 227: return (q); 228: } 229: /* 230: * There is one - it is free no longer. 231: */ 232: qq = q->q_freef; 233: if (qq != NOQUOTA) 234: qq->q_freeb = &qfreelist; 235: qfreelist = qq; 236: /* 237: * Now we are about to change this from one user to another 238: * Must take this off hash chain for old user immediately, in 239: * case some other process claims it before we are done. 240: * We must then put it on the hash chain for the new user, 241: * to make sure that we don't make two quota structs for one uid. 242: * (the quota struct will then be locked till we are done). 243: */ 244: remque(q); 245: insque(q, qh); 246: q->q_uid = uid; 247: q->q_flags = Q_LOCK; 248: q->q_cnt++; /* q->q_cnt = 1; */ 249: /* 250: * Next, before filling in info for the new owning user, 251: * we must get rid of any dquot structs that we own. 252: */ 253: for (mp = mount, dqq = q->q_dq; mp < &mount[NMOUNT]; mp++, dqq++) 254: if (*dqq != NODQUOT && *dqq != LOSTDQUOT) { 255: (*dqq)->dq_own = NOQUOTA; 256: putdq(mp, *dqq, 1); 257: } 258: for (mp = mount, dqq = q->q_dq; dqq < &q->q_dq[NMOUNT]; mp++, dqq++) 259: #ifdef pdp11 260: if (!nodq && mp->m_inodp) { 261: #else 262: if (!nodq && mp->m_bufp) { 263: #endif 264: *dqq = discquota(uid, mp->m_qinod); 265: if (*dqq != NODQUOT) { 266: if ((*dqq)->dq_uid != uid) 267: panic("got bad quota uid"); 268: (*dqq)->dq_own = q; 269: } 270: } else 271: *dqq = NODQUOT; 272: if (q->q_flags & Q_WANT) 273: wakeup((caddr_t)q); 274: q->q_flags = Q_NEW | nodq; 275: return (q); 276: } 277: 278: /* 279: * Delete a quota, wakeup anyone waiting. 280: */ 281: delquota(q) 282: register struct quota *q; 283: { 284: register struct dquot **dqq; 285: register struct mount *mp; 286: 287: top: 288: if (q->q_cnt != 1) { 289: q->q_cnt--; 290: return; 291: } 292: if (q->q_flags & Q_LOCK) { 293: q->q_flags |= Q_WANT; 294: #ifdef pdp11 295: QUOTAUNMAP(); 296: sleep((caddr_t)q, PINOD+2); 297: QUOTAMAP(); 298: #else 299: sleep((caddr_t)q, PINOD+2); 300: #endif 301: /* 302: * Just so we don't sync dquots if not needed; 303: * 'if' would be 'while' if this was deleted. 304: */ 305: goto top; 306: } 307: 308: /* 309: * If we own dquot structs, sync them to disc, but don't release 310: * them - we might be recalled from the LRU chain. 311: * As we will sit on the free list while we are waiting for that, 312: * if dquot structs run out, ours will be taken away. 313: */ 314: q->q_flags = Q_LOCK; 315: if ((q->q_flags & Q_NDQ) == 0) { 316: mp = mount; 317: for (dqq = q->q_dq; dqq < &q->q_dq[NMOUNT]; dqq++, mp++) 318: #ifdef pdp11 319: if (mp->m_inodp) 320: #else 321: if (mp->m_bufp) 322: #endif 323: putdq(mp, *dqq, 0); 324: } 325: if (q->q_flags & Q_WANT) 326: wakeup((caddr_t)q); 327: 328: /* 329: * This test looks unnecessary, but someone might have claimed this 330: * quota while we have been getting rid of the dquot info 331: */ 332: if (--q->q_cnt == 0) { /* now able to be reallocated */ 333: if (qfreelist != NOQUOTA) { 334: *qfreetail = q; 335: q->q_freeb = qfreetail; 336: } else { 337: qfreelist = q; 338: q->q_freeb = &qfreelist; 339: } 340: q->q_freef = NOQUOTA; 341: qfreetail = &q->q_freef; 342: q->q_flags = 0; 343: } else 344: q->q_flags &= ~(Q_LOCK|Q_WANT); 345: } 346: 347: /* 348: * Obtain the user's on-disk quota limit 349: * from the file specified. 350: */ 351: struct dquot * 352: discquota(uid, ip) 353: uid_t uid; 354: register struct inode *ip; 355: { 356: register struct dquot *dq; 357: register struct dqhead *dh; 358: register struct dquot *dp; 359: int fail; 360: 361: if (ip == NULL) 362: return (NODQUOT); 363: /* 364: * Check the cache first. 365: */ 366: dh = &dqhead[DQHASH(uid, ip->i_dev)]; 367: for (dq = (DQptr)dh->dqh_forw; dq != (DQptr)dh; dq = dq->dq_forw) { 368: if (dq->dq_uid != uid || dq->dq_dev != ip->i_dev) 369: continue; 370: /* 371: * Cache hit with no references. Take 372: * the structure off the free list. 373: */ 374: if (dq->dq_cnt++ == 0) { 375: dp = dq->dq_freef; 376: if (dp != NODQUOT) 377: dp->dq_freeb = dq->dq_freeb; 378: else 379: dqback = dq->dq_freeb; 380: *dq->dq_freeb = dp; 381: dq->dq_own = NOQUOTA; 382: } 383: /* 384: * We do this test after the previous one so that 385: * the dquot will be moved to the end of the free 386: * list - frequently accessed ones ought to hang around. 387: */ 388: if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0) { 389: dqrele(dq); 390: return (NODQUOT); 391: } 392: return (dq); 393: } 394: /* 395: * Not in cache, allocate a new one and 396: * bring info in off disk. 397: */ 398: dq = dqalloc(uid, ip->i_dev); 399: if (dq == NODQUOT) 400: return (dq); 401: dq->dq_flags = DQ_LOCK; 402: #ifdef pdp11 403: { 404: struct dqblk xq; 405: 406: QUOTAUNMAP(); 407: ILOCK(ip); 408: fail = rdwri(UIO_READ, ip, &xq, sizeof (xq), 409: (off_t)uid * sizeof (xq), UIO_SYSSPACE, IO_UNIT,(int *)0); 410: QUOTAMAP(); 411: dq->dq_dqb = xq; 412: } 413: #else 414: ILOCK(ip); 415: fail = rdwri(UIO_READ, ip, (caddr_t)&dq->dq_dqb, sizeof (struct dqblk), 416: (off_t)uid * sizeof(struct dqblk), UIO_SYSSPACE, IO_UNIT, (int *)0); 417: #endif 418: IUNLOCK(ip); 419: if (dq->dq_flags & DQ_WANT) 420: wakeup((caddr_t)dq); 421: dq->dq_flags = 0; 422: /* 423: * I/O error in reading quota file, release 424: * quota structure and reflect problem to caller. 425: */ 426: if (fail) { 427: remque(dq); 428: dq->dq_forw = dq; /* on a private, unfindable hash list */ 429: dq->dq_back = dq; 430: /* dqrele() (just below) will put dquot back on free list */ 431: } 432: /* no quota exists */ 433: if (fail || dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0) { 434: dqrele(dq); 435: return (NODQUOT); 436: } 437: return (dq); 438: } 439: 440: /* 441: * Allocate a dquot structure. If there are 442: * no free slots in the cache, flush LRU entry from 443: * the cache to the appropriate quota file on disk. 444: */ 445: struct dquot * 446: dqalloc(uid, dev) 447: uid_t uid; 448: dev_t dev; 449: { 450: register struct dquot *dq; 451: register struct dqhead *dh; 452: register struct dquot *dp; 453: register struct quota *q; 454: register struct mount *mp; 455: static struct dqblk zdqb = { 0 }; 456: 457: top: 458: /* 459: * Locate inode of quota file for 460: * indicated file system in case i/o 461: * is necessary in claiming an entry. 462: */ 463: for (mp = mount; mp < &mount[NMOUNT]; mp++) { 464: #ifdef pdp11 465: if (mp->m_dev == dev && mp->m_inodp) { 466: #else 467: if (mp->m_dev == dev && mp->m_bufp) { 468: #endif 469: if (mp->m_qinod == NULL) { 470: u.u_error = EINVAL; 471: return (NODQUOT); 472: } 473: break; 474: } 475: } 476: if (mp >= &mount[NMOUNT]) { 477: u.u_error = EINVAL; 478: return (NODQUOT); 479: } 480: /* 481: * Check free list. If table is full, pull entries 482: * off the quota free list and flush any associated 483: * dquot references until something frees up on the 484: * dquot free list. 485: */ 486: if ((dq = dqfreel) == NODQUOT && (q = qfreelist) != NOQUOTA) { 487: 488: do { 489: register struct dquot **dqq; 490: register struct mount *mountp = mount; 491: 492: dqq = q->q_dq; 493: while (dqq < &q->q_dq[NMOUNT]) { 494: if ((dq = *dqq) != NODQUOT && 495: dq != LOSTDQUOT) { 496: /* 497: * Mark entry as "lost" due to 498: * scavenging operation. 499: */ 500: if (dq->dq_cnt == 1) { 501: *dqq = LOSTDQUOT; 502: putdq(mountp, dq, 1); 503: goto top; 504: } 505: } 506: mountp++; 507: dqq++; 508: } 509: q = q->q_freef; 510: } while ((dq = dqfreel) == NODQUOT && q != NOQUOTA); 511: } 512: if (dq == NODQUOT) { 513: tablefull("dquot"); 514: u.u_error = EUSERS; 515: return (dq); 516: } 517: /* 518: * This shouldn't happen, as we sync 519: * dquot before freeing it up. 520: */ 521: if (dq->dq_flags & DQ_MOD) 522: panic("discquota"); 523: 524: /* 525: * Now take the dquot off the free list, 526: */ 527: dp = dq->dq_freef; 528: if (dp != NODQUOT) 529: dp->dq_freeb = &dqfreel; 530: dqfreel = dp; 531: /* 532: * and off the hash chain it was on, & onto the new one. 533: */ 534: dh = &dqhead[DQHASH(uid, dev)]; 535: remque(dq); 536: insque(dq, dh); 537: dq->dq_cnt = 1; 538: dq->dq_flags = 0; 539: dq->dq_uid = uid; 540: dq->dq_dev = dev; 541: dq->dq_dqb = zdqb; 542: dq->dq_own = NOQUOTA; 543: return (dq); 544: } 545: 546: /* 547: * dqrele - layman's interface to putdq. 548: */ 549: dqrele(dq) 550: register struct dquot *dq; 551: { 552: register struct mount *mp; 553: 554: if (dq == NODQUOT || dq == LOSTDQUOT) 555: return; 556: if (dq->dq_cnt > 1) { 557: dq->dq_cnt--; 558: return; 559: } 560: /* 561: * I/O required, find appropriate file system 562: * to sync the quota information to. 563: */ 564: for (mp = mount; mp < &mount[NMOUNT]; mp++) 565: #ifdef pdp11 566: if (mp->m_inodp && mp->m_dev == dq->dq_dev) { 567: #else 568: if (mp->m_bufp && mp->m_dev == dq->dq_dev) { 569: #endif 570: putdq(mp, dq, 1); 571: return; 572: } 573: panic("dqrele"); 574: } 575: 576: /* 577: * Update the disc quota in the quota file. 578: */ 579: putdq(mp, dq, free) 580: register struct mount *mp; 581: register struct dquot *dq; 582: { 583: register struct inode *ip; 584: 585: if (dq == NODQUOT || dq == LOSTDQUOT) 586: return; 587: if (free && dq->dq_cnt > 1) { 588: dq->dq_cnt--; 589: return; 590: } 591: /* 592: * Disk quota not modified, just discard 593: * or return (having adjusted the reference 594: * count), as indicated by the "free" param. 595: */ 596: if ((dq->dq_flags & DQ_MOD) == 0) { 597: if (free) { 598: dq->dq_cnt = 0; 599: release: 600: if (dqfreel != NODQUOT) { 601: *dqback = dq; 602: dq->dq_freeb = dqback; 603: } else { 604: dqfreel = dq; 605: dq->dq_freeb = &dqfreel; 606: } 607: dq->dq_freef = NODQUOT; 608: dqback = &dq->dq_freef; 609: } 610: return; 611: } 612: /* 613: * Quota modified, write back to disk. 614: */ 615: while (dq->dq_flags & DQ_LOCK) { 616: dq->dq_flags |= DQ_WANT; 617: #ifdef pdp11 618: QUOTAUNMAP(); 619: sleep((caddr_t)dq, PINOD+2); 620: QUOTAMAP(); 621: #else 622: sleep((caddr_t)dq, PINOD+2); 623: #endif 624: /* someone could sneak in and grab it */ 625: if (free && dq->dq_cnt > 1) { 626: dq->dq_cnt--; 627: return; 628: } 629: } 630: dq->dq_flags |= DQ_LOCK; 631: if ((ip = mp->m_qinod) == NULL) 632: panic("lost quota file"); 633: #ifdef pdp11 634: { 635: struct dqblk xq; 636: uid_t uid; 637: 638: xq = dq->dq_dqb; 639: uid = dq->dq_uid; 640: QUOTAUNMAP(); 641: ILOCK(ip); 642: (void)rdwri(UIO_WRITE, ip, &xq, sizeof (xq), 643: (off_t)uid * sizeof (xq), UIO_SYSSPACE, 644: IO_UNIT, (int *)0); 645: QUOTAMAP(); 646: } 647: #else 648: ILOCK(ip); 649: (void) rdwri(UIO_WRITE, ip, (caddr_t)&dq->dq_dqb, sizeof (struct dqblk), 650: (off_t)dq->dq_uid * sizeof (struct dqblk), UIO_SYSSPACE, 651: IO_UNIT, (int *)0); 652: #endif 653: IUNLOCK(ip); 654: if (dq->dq_flags & DQ_WANT) 655: wakeup((caddr_t)dq); 656: dq->dq_flags &= ~(DQ_MOD|DQ_LOCK|DQ_WANT); 657: if (free && --dq->dq_cnt == 0) 658: goto release; 659: } 660: 661: /* 662: * See if there is a quota struct in core for user 'uid'. 663: */ 664: struct quota * 665: qfind(uid) 666: register uid_t uid; 667: { 668: register struct quota *q; 669: register struct qhash *qh; 670: 671: /* 672: * Check common cases first: asking for own quota, 673: * or that of the super user (has reserved slot 0 674: * in the table). 675: */ 676: q = u.u_quota; 677: if (q != NOQUOTA && q->q_uid == uid) 678: return (q); 679: if (uid == 0) /* the second most likely case */ 680: return (quota); 681: /* 682: * Search cache. 683: */ 684: qh = &qhash[QHASH(uid)]; 685: for (q = (Qptr)qh->qh_forw; q != (Qptr)qh; q = q->q_forw) 686: if (q->q_uid == uid) 687: return (q); 688: return (NOQUOTA); 689: } 690: 691: /* 692: * Set the quota file up for a particular file system. 693: * Called as the result of a setquota system call. 694: */ 695: opendq(mp, fname) 696: register struct mount *mp; 697: caddr_t fname; 698: { 699: register struct inode *ip; 700: register struct quota *q; 701: struct dquot *dq; 702: struct nameidata nd; 703: register struct nameidata *ndp = &nd; 704: int i; 705: 706: if (mp->m_qinod) 707: closedq(mp); 708: QUOTAUNMAP(); /* paranoia */ 709: NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, fname); 710: ip = namei(ndp); 711: QUOTAMAP(); 712: if (ip == NULL) 713: return; 714: IUNLOCK(ip); 715: if (ip->i_dev != mp->m_dev) { 716: u.u_error = EACCES; 717: return; 718: } 719: if ((ip->i_mode & IFMT) != IFREG) { 720: u.u_error = EACCES; 721: return; 722: } 723: /* 724: * Flush in-core references to any previous 725: * quota file for this file system. 726: */ 727: mp->m_qinod = ip; 728: mp->m_flags |= MNT_QUOTA; 729: i = mp - mount; 730: for (q = quota; q < quotaNQUOTA; q++) 731: if ((q->q_flags & Q_NDQ) == 0) { 732: if (q->q_cnt == 0) 733: q->q_dq[i] = LOSTDQUOT; 734: else { 735: q->q_cnt++; /* cannot be released */ 736: dq = discquota(q->q_uid, ip); 737: q->q_dq[i] = dq; 738: if (dq != NODQUOT) 739: dq->dq_own = q; 740: delquota(q); 741: } 742: } 743: } 744: 745: /* 746: * Close off disc quotas for a file system. 747: */ 748: closedq(mp) 749: register struct mount *mp; 750: { 751: register struct dquot *dq; 752: register i = mp - mount; 753: register struct quota *q; 754: register struct inode *ip; 755: 756: if (mp->m_qinod == NULL) 757: return; 758: /* 759: * Search inode table, delete any references 760: * to quota file being closed. 761: */ 762: for (ip = inode; ip < inodeNINODE; ip++) 763: if (ip->i_dev == mp->m_dev) { 764: #ifdef pdp11 765: dq = ix_dquot[ip - inode]; 766: ix_dquot[ip - inode] = NODQUOT; 767: #else 768: dq = ip->i_dquot; 769: ip->i_dquot = NODQUOT; 770: #endif 771: putdq(mp, dq, 1); 772: } 773: /* 774: * Search quota table, flush any pending 775: * quota info to disk and also delete 776: * references to closing quota file. 777: */ 778: for (q = quota; q < quotaNQUOTA; q++) { 779: if ((q->q_flags & Q_NDQ) == 0) { 780: if (q->q_cnt) { 781: q->q_cnt++; 782: putdq(mp, q->q_dq[i], 1); 783: delquota(q); 784: } else 785: putdq(mp, q->q_dq[i], 1); 786: } 787: q->q_dq[i] = NODQUOT; 788: } 789: 790: /* 791: * Move all dquot's that used to refer to this quota 792: * file of into the never-never (they will eventually 793: * fall off the head of the free list and be re-used). 794: */ 795: for (dq = dquot; dq < dquotNDQUOT; dq++) 796: if (dq->dq_dev == mp->m_dev) { 797: if (dq->dq_cnt) 798: panic("closedq: stray dquot"); 799: remque(dq); 800: dq->dq_forw = dq; 801: dq->dq_back = dq; 802: dq->dq_dev = NODEV; 803: } 804: QUOTAUNMAP(); 805: irele(mp->m_qinod); 806: QUOTAMAP(); 807: mp->m_qinod = NULL; 808: mp->m_flags &= ~MNT_QUOTA; 809: } 810: #endif