1: /* 2: * Copyright (c) 1982, 1986 Regents of the University of California. 3: * All rights reserved. The Berkeley software License Agreement 4: * specifies the terms and conditions for redistribution. 5: * 6: * @(#)uipc_mbuf.c 7.1 (Berkeley) 6/5/86 7: */ 8: 9: #include "../machine/pte.h" 10: 11: #include "param.h" 12: #include "dir.h" 13: #include "user.h" 14: #include "proc.h" 15: #include "cmap.h" 16: #include "map.h" 17: #include "mbuf.h" 18: #include "vm.h" 19: #include "kernel.h" 20: 21: mbinit() 22: { 23: int s; 24: 25: s = splimp(); 26: if (m_clalloc(4096/CLBYTES, MPG_MBUFS, M_DONTWAIT) == 0) 27: goto bad; 28: if (m_clalloc(4096/CLBYTES, MPG_CLUSTERS, M_DONTWAIT) == 0) 29: goto bad; 30: splx(s); 31: return; 32: bad: 33: panic("mbinit"); 34: } 35: 36: /* 37: * Must be called at splimp. 38: */ 39: caddr_t 40: m_clalloc(ncl, how, canwait) 41: register int ncl; 42: int how; 43: { 44: int npg, mbx; 45: register struct mbuf *m; 46: register int i; 47: 48: npg = ncl * CLSIZE; 49: mbx = rmalloc(mbmap, (long)npg); 50: if (mbx == 0) { 51: if (canwait == M_WAIT) 52: panic("out of mbufs: map full"); 53: return (0); 54: } 55: m = cltom(mbx / CLSIZE); 56: if (memall(&Mbmap[mbx], npg, proc, CSYS) == 0) { 57: rmfree(mbmap, (long)npg, (long)mbx); 58: return (0); 59: } 60: vmaccess(&Mbmap[mbx], (caddr_t)m, npg); 61: switch (how) { 62: 63: case MPG_CLUSTERS: 64: for (i = 0; i < ncl; i++) { 65: m->m_off = 0; 66: m->m_next = mclfree; 67: mclfree = m; 68: m += CLBYTES / sizeof (*m); 69: mbstat.m_clfree++; 70: } 71: mbstat.m_clusters += ncl; 72: break; 73: 74: case MPG_MBUFS: 75: for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) { 76: m->m_off = 0; 77: m->m_type = MT_DATA; 78: mbstat.m_mtypes[MT_DATA]++; 79: mbstat.m_mbufs++; 80: (void) m_free(m); 81: m++; 82: } 83: break; 84: } 85: return ((caddr_t)m); 86: } 87: 88: m_pgfree(addr, n) 89: caddr_t addr; 90: int n; 91: { 92: 93: #ifdef lint 94: addr = addr; n = n; 95: #endif 96: } 97: 98: /* 99: * Must be called at splimp. 100: */ 101: m_expand(canwait) 102: int canwait; 103: { 104: 105: if (m_clalloc(1, MPG_MBUFS, canwait) == 0) 106: goto steal; 107: return (1); 108: steal: 109: /* should ask protocols to free code */ 110: return (0); 111: } 112: 113: /* NEED SOME WAY TO RELEASE SPACE */ 114: 115: /* 116: * Space allocation routines. 117: * These are also available as macros 118: * for critical paths. 119: */ 120: struct mbuf * 121: m_get(canwait, type) 122: int canwait, type; 123: { 124: register struct mbuf *m; 125: 126: MGET(m, canwait, type); 127: return (m); 128: } 129: 130: struct mbuf * 131: m_getclr(canwait, type) 132: int canwait, type; 133: { 134: register struct mbuf *m; 135: 136: MGET(m, canwait, type); 137: if (m == 0) 138: return (0); 139: bzero(mtod(m, caddr_t), MLEN); 140: return (m); 141: } 142: 143: struct mbuf * 144: m_free(m) 145: struct mbuf *m; 146: { 147: register struct mbuf *n; 148: 149: MFREE(m, n); 150: return (n); 151: } 152: 153: /* 154: * Get more mbufs; called from MGET macro if mfree list is empty. 155: * Must be called at splimp. 156: */ 157: /*ARGSUSED*/ 158: struct mbuf * 159: m_more(canwait, type) 160: int canwait, type; 161: { 162: register struct mbuf *m; 163: 164: while (m_expand(canwait) == 0) { 165: if (canwait == M_WAIT) { 166: m_want++; 167: sleep((caddr_t)&mfree, PZERO - 1); 168: } else { 169: mbstat.m_drops++; 170: return (NULL); 171: } 172: } 173: #define m_more(x,y) (panic("m_more"), (struct mbuf *)0) 174: MGET(m, canwait, type); 175: #undef m_more 176: return (m); 177: } 178: 179: m_freem(m) 180: register struct mbuf *m; 181: { 182: register struct mbuf *n; 183: register int s; 184: 185: if (m == NULL) 186: return; 187: s = splimp(); 188: do { 189: MFREE(m, n); 190: } while (m = n); 191: splx(s); 192: } 193: 194: /* 195: * Mbuffer utility routines. 196: */ 197: 198: /* 199: * Make a copy of an mbuf chain starting "off" bytes from the beginning, 200: * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 201: * Should get M_WAIT/M_DONTWAIT from caller. 202: */ 203: struct mbuf * 204: m_copy(m, off, len) 205: register struct mbuf *m; 206: int off; 207: register int len; 208: { 209: register struct mbuf *n, **np; 210: struct mbuf *top, *p; 211: 212: if (len == 0) 213: return (0); 214: if (off < 0 || len < 0) 215: panic("m_copy"); 216: while (off > 0) { 217: if (m == 0) 218: panic("m_copy"); 219: if (off < m->m_len) 220: break; 221: off -= m->m_len; 222: m = m->m_next; 223: } 224: np = ⊤ 225: top = 0; 226: while (len > 0) { 227: if (m == 0) { 228: if (len != M_COPYALL) 229: panic("m_copy"); 230: break; 231: } 232: MGET(n, M_DONTWAIT, m->m_type); 233: *np = n; 234: if (n == 0) 235: goto nospace; 236: n->m_len = MIN(len, m->m_len - off); 237: if (m->m_off > MMAXOFF) { 238: p = mtod(m, struct mbuf *); 239: n->m_off = ((int)p - (int)n) + off; 240: mclrefcnt[mtocl(p)]++; 241: } else 242: bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 243: (unsigned)n->m_len); 244: if (len != M_COPYALL) 245: len -= n->m_len; 246: off = 0; 247: m = m->m_next; 248: np = &n->m_next; 249: } 250: return (top); 251: nospace: 252: m_freem(top); 253: return (0); 254: } 255: 256: m_cat(m, n) 257: register struct mbuf *m, *n; 258: { 259: while (m->m_next) 260: m = m->m_next; 261: while (n) { 262: if (m->m_off >= MMAXOFF || 263: m->m_off + m->m_len + n->m_len > MMAXOFF) { 264: /* just join the two chains */ 265: m->m_next = n; 266: return; 267: } 268: /* splat the data from one into the other */ 269: bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 270: (u_int)n->m_len); 271: m->m_len += n->m_len; 272: n = m_free(n); 273: } 274: } 275: 276: m_adj(mp, len) 277: struct mbuf *mp; 278: register int len; 279: { 280: register struct mbuf *m; 281: register count; 282: 283: if ((m = mp) == NULL) 284: return; 285: if (len >= 0) { 286: while (m != NULL && len > 0) { 287: if (m->m_len <= len) { 288: len -= m->m_len; 289: m->m_len = 0; 290: m = m->m_next; 291: } else { 292: m->m_len -= len; 293: m->m_off += len; 294: break; 295: } 296: } 297: } else { 298: /* 299: * Trim from tail. Scan the mbuf chain, 300: * calculating its length and finding the last mbuf. 301: * If the adjustment only affects this mbuf, then just 302: * adjust and return. Otherwise, rescan and truncate 303: * after the remaining size. 304: */ 305: len = -len; 306: count = 0; 307: for (;;) { 308: count += m->m_len; 309: if (m->m_next == (struct mbuf *)0) 310: break; 311: m = m->m_next; 312: } 313: if (m->m_len >= len) { 314: m->m_len -= len; 315: return; 316: } 317: count -= len; 318: /* 319: * Correct length for chain is "count". 320: * Find the mbuf with last data, adjust its length, 321: * and toss data from remaining mbufs on chain. 322: */ 323: for (m = mp; m; m = m->m_next) { 324: if (m->m_len >= count) { 325: m->m_len = count; 326: break; 327: } 328: count -= m->m_len; 329: } 330: while (m = m->m_next) 331: m->m_len = 0; 332: } 333: } 334: 335: /* 336: * Rearange an mbuf chain so that len bytes are contiguous 337: * and in the data area of an mbuf (so that mtod and dtom 338: * will work for a structure of size len). Returns the resulting 339: * mbuf chain on success, frees it and returns null on failure. 340: * If there is room, it will add up to MPULL_EXTRA bytes to the 341: * contiguous region in an attempt to avoid being called next time. 342: */ 343: struct mbuf * 344: m_pullup(n, len) 345: register struct mbuf *n; 346: int len; 347: { 348: register struct mbuf *m; 349: register int count; 350: int space; 351: 352: if (n->m_off + len <= MMAXOFF && n->m_next) { 353: m = n; 354: n = n->m_next; 355: len -= m->m_len; 356: } else { 357: if (len > MLEN) 358: goto bad; 359: MGET(m, M_DONTWAIT, n->m_type); 360: if (m == 0) 361: goto bad; 362: m->m_len = 0; 363: } 364: space = MMAXOFF - m->m_off; 365: do { 366: count = MIN(MIN(space - m->m_len, len + MPULL_EXTRA), n->m_len); 367: bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len, 368: (unsigned)count); 369: len -= count; 370: m->m_len += count; 371: n->m_len -= count; 372: if (n->m_len) 373: n->m_off += count; 374: else 375: n = m_free(n); 376: } while (len > 0 && n); 377: if (len > 0) { 378: (void) m_free(m); 379: goto bad; 380: } 381: m->m_next = n; 382: return (m); 383: bad: 384: m_freem(n); 385: return (0); 386: }