1: /* 2: * Copyright (c) 1982, 1986 Regents of the University of California. 3: * All rights reserved. The Berkeley software License Agreement 4: * specifies the terms and conditions for redistribution. 5: * 6: * @(#)vm_text.c 7.1 (Berkeley) 6/5/86 7: */ 8: 9: #include "../machine/pte.h" 10: 11: #include "param.h" 12: #include "systm.h" 13: #include "map.h" 14: #include "dir.h" 15: #include "user.h" 16: #include "proc.h" 17: #include "text.h" 18: #include "inode.h" 19: #include "buf.h" 20: #include "seg.h" 21: #include "vm.h" 22: #include "cmap.h" 23: #include "uio.h" 24: #include "exec.h" 25: 26: #define X_LOCK(xp) { \ 27: while ((xp)->x_flag & XLOCK) { \ 28: (xp)->x_flag |= XWANT; \ 29: sleep((caddr_t)(xp), PSWP); \ 30: } \ 31: (xp)->x_flag |= XLOCK; \ 32: } 33: #define XUNLOCK(xp) { \ 34: if ((xp)->x_flag & XWANT) \ 35: wakeup((caddr_t)(xp)); \ 36: (xp)->x_flag &= ~(XLOCK|XWANT); \ 37: } 38: #define FREE_AT_HEAD(xp) { \ 39: (xp)->x_forw = xhead; \ 40: xhead = (xp); \ 41: (xp)->x_back = &xhead; \ 42: if (xtail == &xhead) \ 43: xtail = &(xp)->x_forw; \ 44: else \ 45: (xp)->x_forw->x_back = &(xp)->x_forw; \ 46: } 47: #define FREE_AT_TAIL(xp) { \ 48: (xp)->x_back = xtail; \ 49: *xtail = (xp); \ 50: xtail = &(xp)->x_forw; \ 51: /* x_forw is NULL */ \ 52: } 53: #define ALLOC(xp) { \ 54: *((xp)->x_back) = (xp)->x_forw; \ 55: if ((xp)->x_forw) \ 56: (xp)->x_forw->x_back = (xp)->x_back; \ 57: else \ 58: xtail = (xp)->x_back; \ 59: (xp)->x_forw = NULL; \ 60: (xp)->x_back = NULL; \ 61: } 62: 63: /* 64: * We place free text table entries on a free list. 65: * All text images are treated as "sticky," 66: * and are placed on the free list (as an LRU cache) when unused. 67: * They may be reclaimed from the free list until reused. 68: * Files marked sticky are locked into the table, and are never freed. 69: * For machines with limited swap space, this may result 70: * in filling up swap, and thus we allow a limit 71: * to be placed on the number of text images to cache. 72: * (In that case, really should change the algorithm 73: * for freeing a text when the cache is full; 74: * should free least-recently-used text rather than current one.) 75: */ 76: struct text *xhead, **xtail; /* text table free list */ 77: int xcache; /* number of "sticky" texts retained */ 78: int maxtextcache = -1; /* maximum number of "sticky" texts */ 79: struct xstats xstats; /* cache statistics */ 80: 81: /* 82: * initialize text table 83: */ 84: xinit() 85: { 86: register struct text *xp; 87: 88: xtail = &xhead; 89: for (xp = text; xp < textNTEXT; xp++) 90: FREE_AT_TAIL(xp); 91: if (maxtextcache == -1) 92: maxtextcache = ntext; 93: } 94: 95: /* 96: * relinquish use of the shared text segment 97: * of a process. 98: */ 99: xfree() 100: { 101: register struct text *xp; 102: 103: if ((xp = u.u_procp->p_textp) == NULL) 104: return; 105: xstats.free++; 106: X_LOCK(xp); 107: if (--xp->x_count == 0 && (xp->x_iptr->i_mode & ISVTX) == 0) { 108: if (xcache >= maxtextcache || xp->x_flag & XTRC || 109: xp->x_iptr->i_nlink == 0) { /* XXX */ 110: xp->x_rssize -= vmemfree(tptopte(u.u_procp, 0), 111: (int)u.u_tsize); 112: if (xp->x_rssize != 0) 113: panic("xfree rssize"); 114: while (xp->x_poip) 115: sleep((caddr_t)&xp->x_poip, PSWP+1); 116: xp->x_flag &= ~XLOCK; 117: xuntext(xp); 118: FREE_AT_HEAD(xp); 119: } else { 120: if (xp->x_flag & XWRIT) { 121: xstats.free_cacheswap++; 122: xp->x_flag |= XUNUSED; 123: } 124: xcache++; 125: xstats.free_cache++; 126: xccdec(xp, u.u_procp); 127: FREE_AT_TAIL(xp); 128: } 129: } else { 130: xccdec(xp, u.u_procp); 131: xstats.free_inuse++; 132: } 133: xunlink(u.u_procp); 134: XUNLOCK(xp); 135: u.u_procp->p_textp = NULL; 136: } 137: 138: /* 139: * Attach to a shared text segment. 140: * If there is no shared text, just return. 141: * If there is, hook up to it: 142: * if it is not currently being used, it has to be read 143: * in from the inode (ip); the written bit is set to force it 144: * to be written out as appropriate. 145: * If it is being used, but is not currently in core, 146: * a swap has to be done to get it back. 147: */ 148: xalloc(ip, ep, pagi) 149: struct exec *ep; 150: register struct inode *ip; 151: { 152: register struct text *xp; 153: register size_t ts; 154: 155: if (ep->a_text == 0) 156: return; 157: xstats.alloc++; 158: while ((xp = ip->i_text) != NULL) { 159: if (xp->x_flag&XLOCK) { 160: /* 161: * Wait for text to be unlocked, 162: * then start over (may have changed state). 163: */ 164: xwait(xp); 165: continue; 166: } 167: X_LOCK(xp); 168: if (xp->x_back) { 169: xstats.alloc_cachehit++; 170: ALLOC(xp); 171: xp->x_flag &= ~XUNUSED; 172: xcache--; 173: } else 174: xstats.alloc_inuse++; 175: xp->x_count++; 176: u.u_procp->p_textp = xp; 177: xlink(u.u_procp); 178: XUNLOCK(xp); 179: return; 180: } 181: xp = xhead; 182: if (xp == NULL) { 183: tablefull("text"); 184: psignal(u.u_procp, SIGKILL); 185: return; 186: } 187: ALLOC(xp); 188: if (xp->x_iptr) { 189: xstats.alloc_cacheflush++; 190: if (xp->x_flag & XUNUSED) 191: xstats.alloc_unused++; 192: xuntext(xp); 193: xcache--; 194: } 195: xp->x_flag = XLOAD|XLOCK; 196: if (pagi) 197: xp->x_flag |= XPAGI; 198: ts = clrnd(btoc(ep->a_text)); 199: xp->x_size = ts; 200: if (vsxalloc(xp) == NULL) { 201: swkill(u.u_procp, "xalloc: no swap space"); 202: return; 203: } 204: xp->x_count = 1; 205: xp->x_ccount = 0; 206: xp->x_rssize = 0; 207: xp->x_iptr = ip; 208: ip->i_flag |= ITEXT; 209: ip->i_text = xp; 210: ip->i_count++; 211: u.u_procp->p_textp = xp; 212: xlink(u.u_procp); 213: if (pagi == 0) { 214: settprot(RW); 215: u.u_procp->p_flag |= SKEEP; 216: (void) rdwri(UIO_READ, ip, 217: (caddr_t)ctob(tptov(u.u_procp, 0)), 218: (int)ep->a_text, (off_t)sizeof (struct exec), 219: 2, (int *)0); 220: u.u_procp->p_flag &= ~SKEEP; 221: } 222: settprot(RO); 223: xp->x_flag |= XWRIT; 224: xp->x_flag &= ~XLOAD; 225: XUNLOCK(xp); 226: } 227: 228: /* 229: * Lock and unlock a text segment from swapping 230: */ 231: xlock(xp) 232: register struct text *xp; 233: { 234: 235: X_LOCK(xp); 236: } 237: 238: /* 239: * Wait for xp to be unlocked if it is currently locked. 240: */ 241: xwait(xp) 242: register struct text *xp; 243: { 244: 245: X_LOCK(xp); 246: XUNLOCK(xp); 247: } 248: 249: xunlock(xp) 250: register struct text *xp; 251: { 252: 253: XUNLOCK(xp); 254: } 255: 256: /* 257: * Decrement the in-core usage count of a shared text segment, 258: * which must be locked. When the count drops to zero, 259: * free the core space. 260: */ 261: xccdec(xp, p) 262: register struct text *xp; 263: register struct proc *p; 264: { 265: 266: if (--xp->x_ccount == 0) { 267: if (xp->x_flag & XWRIT) { 268: vsswap(p, tptopte(p, 0), CTEXT, 0, (int)xp->x_size, 269: (struct dmap *)0); 270: if (xp->x_flag & XPAGI) 271: (void)swap(p, xp->x_ptdaddr, 272: (caddr_t)tptopte(p, 0), 273: (int)xp->x_size * sizeof (struct pte), 274: B_WRITE, B_PAGET, swapdev, 0); 275: xp->x_flag &= ~XWRIT; 276: } else 277: xp->x_rssize -= vmemfree(tptopte(p, 0), 278: (int)xp->x_size); 279: if (xp->x_rssize != 0) 280: panic("text rssize"); 281: } 282: } 283: 284: /* 285: * Detach a process from the in-core text. 286: * External interface to xccdec, used when swapping out a process. 287: */ 288: xdetach(xp, p) 289: register struct text *xp; 290: struct proc *p; 291: { 292: 293: if (xp && xp->x_ccount != 0) { 294: X_LOCK(xp); 295: xccdec(xp, p); 296: xunlink(p); 297: XUNLOCK(xp); 298: } 299: } 300: 301: /* 302: * Free the swap image of all unused saved-text text segments 303: * which are from device dev (used by umount system call). 304: * If dev is NODEV, do all devices (used when rebooting). 305: */ 306: xumount(dev) 307: register dev_t dev; 308: { 309: register struct text *xp; 310: 311: for (xp = text; xp < textNTEXT; xp++) 312: if (xp->x_iptr != NULL && 313: (dev == xp->x_iptr->i_dev || dev == NODEV)) 314: xuntext(xp); 315: } 316: 317: /* 318: * remove a shared text segment from the text table, if possible. 319: */ 320: xrele(ip) 321: register struct inode *ip; 322: { 323: 324: if (ip->i_flag & ITEXT) 325: xuntext(ip->i_text); 326: } 327: 328: /* 329: * remove text image from the text table. 330: * the use count must be zero. 331: */ 332: xuntext(xp) 333: register struct text *xp; 334: { 335: register struct inode *ip; 336: 337: X_LOCK(xp); 338: if (xp->x_count == 0) { 339: ip = xp->x_iptr; 340: xp->x_iptr = NULL; 341: vsxfree(xp, (long)xp->x_size); 342: ip->i_flag &= ~ITEXT; 343: ip->i_text = NULL; 344: irele(ip); 345: } 346: XUNLOCK(xp); 347: } 348: 349: /* 350: * Add a process to those sharing a text segment by 351: * getting the page tables and then linking to x_caddr. 352: */ 353: xlink(p) 354: register struct proc *p; 355: { 356: register struct text *xp = p->p_textp; 357: 358: if (xp == 0) 359: return; 360: vinitpt(p); 361: p->p_xlink = xp->x_caddr; 362: xp->x_caddr = p; 363: xp->x_ccount++; 364: } 365: 366: xunlink(p) 367: register struct proc *p; 368: { 369: register struct text *xp = p->p_textp; 370: register struct proc *q; 371: 372: if (xp == 0) 373: return; 374: if (xp->x_caddr == p) { 375: xp->x_caddr = p->p_xlink; 376: p->p_xlink = 0; 377: return; 378: } 379: for (q = xp->x_caddr; q->p_xlink; q = q->p_xlink) 380: if (q->p_xlink == p) { 381: q->p_xlink = p->p_xlink; 382: p->p_xlink = 0; 383: return; 384: } 385: panic("lost text"); 386: } 387: 388: /* 389: * Replace p by q in a text incore linked list. 390: * Used by vfork(), internally. 391: */ 392: xrepl(p, q) 393: struct proc *p, *q; 394: { 395: register struct text *xp = q->p_textp; 396: 397: if (xp == 0) 398: return; 399: xunlink(p); 400: q->p_xlink = xp->x_caddr; 401: xp->x_caddr = q; 402: }