1: /* 2: * Copyright (c) 1982, 1986 Regents of the University of California. 3: * All rights reserved. The Berkeley software License Agreement 4: * specifies the terms and conditions for redistribution. 5: * 6: * @(#)vm_proc.c 7.1 (Berkeley) 6/5/86 7: */ 8: 9: #include "../machine/pte.h" 10: 11: #include "param.h" 12: #include "systm.h" 13: #include "dir.h" 14: #include "user.h" 15: #include "proc.h" 16: #include "map.h" 17: #include "cmap.h" 18: #include "text.h" 19: #include "vm.h" 20: 21: #ifdef vax 22: #include "../vax/mtpr.h" 23: #endif 24: 25: /* 26: * Get virtual memory resources for a new process. 27: * Called after page tables are allocated, but before they 28: * are initialized, we initialize the memory management registers, 29: * and then expand the page tables for the data and stack segments 30: * creating zero fill pte's there. Text pte's are set up elsewhere. 31: * 32: * SHOULD FREE EXTRA PAGE TABLE PAGES HERE OR SOMEWHERE. 33: */ 34: vgetvm(ts, ds, ss) 35: size_t ts, ds, ss; 36: { 37: 38: u.u_pcb.pcb_p0lr = AST_NONE; 39: setp0lr(ts); 40: setp1lr(P1PAGES - HIGHPAGES); 41: u.u_procp->p_tsize = ts; 42: u.u_tsize = ts; 43: expand((int)ss, 1); 44: expand((int)ds, 0); 45: } 46: 47: /* 48: * Release the virtual memory resources (memory 49: * pages, and swap area) associated with the current process. 50: * Caller must not be swappable. Used at exit or execl. 51: */ 52: vrelvm() 53: { 54: register struct proc *p = u.u_procp; 55: 56: /* 57: * Release memory; text first, then data and stack pages. 58: */ 59: xfree(); 60: p->p_rssize -= vmemfree(dptopte(p, 0), (int)p->p_dsize); 61: p->p_rssize -= vmemfree(sptopte(p, p->p_ssize - 1), (int)p->p_ssize); 62: if (p->p_rssize != 0) 63: panic("vrelvm rss"); 64: /* 65: * Wait for all page outs to complete, then 66: * release swap space. 67: */ 68: p->p_swrss = 0; 69: while (p->p_poip) 70: sleep((caddr_t)&p->p_poip, PSWP+1); 71: (void) vsexpand((size_t)0, &u.u_dmap, 1); 72: (void) vsexpand((size_t)0, &u.u_smap, 1); 73: p->p_tsize = 0; 74: p->p_dsize = 0; 75: p->p_ssize = 0; 76: u.u_tsize = 0; 77: u.u_dsize = 0; 78: u.u_ssize = 0; 79: } 80: 81: /* 82: * Pass virtual memory resources from p to q. 83: * P's u. area is up, q's is uq. Used internally 84: * when starting/ending a vfork(). 85: */ 86: vpassvm(p, q, up, uq, umap) 87: register struct proc *p, *q; 88: register struct user *up, *uq; 89: struct pte *umap; 90: { 91: 92: /* 93: * Pass fields related to vm sizes. 94: */ 95: uq->u_tsize = q->p_tsize = p->p_tsize; up->u_tsize = p->p_tsize = 0; 96: uq->u_dsize = q->p_dsize = p->p_dsize; up->u_dsize = p->p_dsize = 0; 97: uq->u_ssize = q->p_ssize = p->p_ssize; up->u_ssize = p->p_ssize = 0; 98: 99: /* 100: * Pass proc table paging statistics. 101: */ 102: q->p_swrss = p->p_swrss; p->p_swrss = 0; 103: q->p_rssize = p->p_rssize; p->p_rssize = 0; 104: q->p_poip = p->p_poip; p->p_poip = 0; 105: 106: /* 107: * Relink text segment. 108: */ 109: q->p_textp = p->p_textp; 110: xrepl(p, q); 111: p->p_textp = 0; 112: 113: /* 114: * Pass swap space maps. 115: */ 116: uq->u_dmap = up->u_dmap; up->u_dmap = zdmap; 117: uq->u_smap = up->u_smap; up->u_smap = zdmap; 118: 119: /* 120: * Pass u. paging statistics. 121: */ 122: uq->u_outime = up->u_outime; up->u_outime = 0; 123: uq->u_ru = up->u_ru; 124: bzero((caddr_t)&up->u_ru, sizeof (struct rusage)); 125: uq->u_cru = up->u_cru; 126: bzero((caddr_t)&up->u_cru, sizeof (struct rusage)); 127: 128: /* 129: * And finally, pass the page tables themselves. 130: * On return we are running on the other set of 131: * page tables, but still with the same u. area. 132: */ 133: vpasspt(p, q, up, uq, umap); 134: } 135: 136: /* 137: * Change the size of the data+stack regions of the process. 138: * If the size is shrinking, it's easy-- just release virtual memory. 139: * If it's growing, initalize new page table entries as 140: * 'zero fill on demand' pages. 141: */ 142: expand(change, region) 143: int change, region; 144: { 145: register struct proc *p; 146: register struct pte *base, *p0, *p1; 147: struct pte proto; 148: int p0lr, p1lr; 149: struct pte *base0; 150: size_t ods, oss; 151: int size; 152: u_int v; 153: 154: p = u.u_procp; 155: if (change == 0) 156: return; 157: if (change % CLSIZE) 158: panic("expand"); 159: 160: #ifdef PGINPROF 161: vmsizmon(); 162: #endif 163: 164: /* 165: * Update the sizes to reflect the change. Note that we may 166: * swap as a result of a ptexpand, but this will work, because 167: * the routines which swap out will get the current text and data 168: * sizes from the arguments they are passed, and when the process 169: * resumes the lengths in the proc structure are used to 170: * build the new page tables. 171: */ 172: ods = u.u_dsize; 173: oss = u.u_ssize; 174: if (region == 0) { 175: v = dptov(p, p->p_dsize); 176: p->p_dsize += change; 177: u.u_dsize += change; 178: } else { 179: p->p_ssize += change; 180: v = sptov(p, p->p_ssize-1); 181: u.u_ssize += change; 182: } 183: 184: /* 185: * Compute the end of the text+data regions and the beginning 186: * of the stack region in the page tables, 187: * and expand the page tables if necessary. 188: */ 189: p0 = u.u_pcb.pcb_p0br + (u.u_pcb.pcb_p0lr&~AST_CLR); 190: p1 = u.u_pcb.pcb_p1br + (u.u_pcb.pcb_p1lr&~PME_CLR); 191: if (change > p1 - p0) 192: ptexpand(clrnd(ctopt(change - (p1 - p0))), ods, oss); 193: /* PTEXPAND SHOULD GIVE BACK EXCESS PAGE TABLE PAGES */ 194: 195: /* 196: * Compute the base of the allocated/freed region. 197: */ 198: p0lr = u.u_pcb.pcb_p0lr&~AST_CLR; 199: p1lr = u.u_pcb.pcb_p1lr&~PME_CLR; 200: if (region == 0) 201: base = u.u_pcb.pcb_p0br + p0lr + (change > 0 ? 0 : change); 202: else 203: base = u.u_pcb.pcb_p1br + p1lr - (change > 0 ? change : 0); 204: 205: /* 206: * If we shrunk, give back the virtual memory. 207: */ 208: if (change < 0) 209: p->p_rssize -= vmemfree(base, -change); 210: 211: /* 212: * Update the processor length registers and copies in the pcb. 213: */ 214: if (region == 0) 215: setp0lr(p0lr + change); 216: else 217: setp1lr(p1lr - change); 218: 219: /* 220: * If shrinking, clear pte's, otherwise 221: * initialize zero fill on demand pte's. 222: */ 223: *(int *)&proto = PG_UW; 224: if (change < 0) 225: change = -change; 226: else { 227: proto.pg_fod = 1; 228: ((struct fpte *)&proto)->pg_fileno = PG_FZERO; 229: cnt.v_nzfod += change; 230: } 231: base0 = base; 232: size = change; 233: while (--change >= 0) 234: *base++ = proto; 235: 236: /* 237: * We changed mapping for the current process, 238: * so must update the hardware translation 239: */ 240: newptes(base0, v, size); 241: } 242: 243: /* 244: * Create a duplicate copy of the current process 245: * in process slot p, which has been partially initialized 246: * by newproc(). 247: * 248: * Could deadlock here if two large proc's get page tables 249: * and then each gets part of his UPAGES if they then have 250: * consumed all the available memory. This can only happen when 251: * USRPTSIZE + UPAGES * NPROC > maxmem 252: * which is impossible except on systems with tiny real memories, 253: * when large procs stupidly fork() instead of vfork(). 254: */ 255: procdup(p, isvfork) 256: register struct proc *p; 257: { 258: 259: /* 260: * Allocate page tables for new process, waiting 261: * for memory to be free. 262: */ 263: while (vgetpt(p, vmemall) == 0) { 264: kmapwnt++; 265: sleep((caddr_t)kernelmap, PSWP+4); 266: } 267: /* 268: * Snapshot the current u. area pcb and get a u. 269: * for the new process, a copy of our u. 270: */ 271: resume(pcbb(u.u_procp)); 272: (void) vgetu(p, vmemall, Forkmap, &forkutl, &u); 273: 274: /* 275: * Arrange for a non-local goto when the new process 276: * is started, to resume here, returning nonzero from setjmp. 277: */ 278: forkutl.u_pcb.pcb_sswap = (int *)&u.u_ssave; 279: if (savectx(&forkutl.u_ssave)) 280: /* 281: * Return 1 in child. 282: */ 283: return (1); 284: 285: /* 286: * If the new process is being created in vfork(), then 287: * exchange vm resources with it. We want to end up with 288: * just a u. area and an empty p0 region, so initialize the 289: * prototypes in the other area before the exchange. 290: */ 291: if (isvfork) { 292: forkutl.u_pcb.pcb_p0lr = u.u_pcb.pcb_p0lr & AST_CLR; 293: forkutl.u_pcb.pcb_p1lr = P1PAGES - HIGHPAGES; 294: vpassvm(u.u_procp, p, &u, &forkutl, Forkmap); 295: /* 296: * Return 0 in parent. 297: */ 298: return (0); 299: } 300: /* 301: * A real fork; clear vm statistics of new process 302: * and link into the new text segment. 303: * Equivalent things happen during vfork() in vpassvm(). 304: */ 305: bzero((caddr_t)&forkutl.u_ru, sizeof (struct rusage)); 306: bzero((caddr_t)&forkutl.u_cru, sizeof (struct rusage)); 307: forkutl.u_dmap = u.u_cdmap; 308: forkutl.u_smap = u.u_csmap; 309: forkutl.u_outime = 0; 310: 311: /* 312: * Attach to the text segment. 313: */ 314: if (p->p_textp) { 315: p->p_textp->x_count++; 316: xlink(p); 317: } 318: 319: /* 320: * Duplicate data and stack space of current process 321: * in the new process. 322: */ 323: vmdup(p, dptopte(p, 0), dptov(p, 0), p->p_dsize, CDATA); 324: vmdup(p, sptopte(p, p->p_ssize - 1), sptov(p, p->p_ssize - 1), p->p_ssize, CSTACK); 325: 326: /* 327: * Return 0 in parent. 328: */ 329: return (0); 330: } 331: 332: vmdup(p, pte, v, count, type) 333: struct proc *p; 334: register struct pte *pte; 335: register unsigned v; 336: register size_t count; 337: int type; 338: { 339: register struct pte *opte = vtopte(u.u_procp, v); 340: register int i; 341: register struct cmap *c; 342: 343: while (count != 0) { 344: count -= CLSIZE; 345: if (opte->pg_fod) { 346: v += CLSIZE; 347: for (i = 0; i < CLSIZE; i++) 348: *(int *)pte++ = *(int *)opte++; 349: continue; 350: } 351: opte += CLSIZE; 352: (void) vmemall(pte, CLSIZE, p, type); 353: p->p_rssize += CLSIZE; 354: for (i = 0; i < CLSIZE; i++) { 355: copyseg((caddr_t)ctob(v+i), (pte+i)->pg_pfnum); 356: *(int *)(pte+i) |= (PG_V|PG_M) + PG_UW; 357: } 358: v += CLSIZE; 359: c = &cmap[pgtocm(pte->pg_pfnum)]; 360: MUNLOCK(c); 361: pte += CLSIZE; 362: } 363: p->p_flag |= SPTECHG; 364: }