1: /* 2: * Copyright (c) 1982, 1986 Regents of the University of California. 3: * All rights reserved. The Berkeley software License Agreement 4: * specifies the terms and conditions for redistribution. 5: * 6: * @(#)vm_pt.c 7.1 (Berkeley) 6/5/86 7: */ 8: 9: #include "param.h" 10: #include "systm.h" 11: #include "dir.h" 12: #include "user.h" 13: #include "proc.h" 14: #include "map.h" 15: #include "cmap.h" 16: #include "vm.h" 17: #include "buf.h" 18: #include "text.h" 19: #include "mount.h" 20: #include "inode.h" 21: #include "kernel.h" 22: 23: #include "../machine/pte.h" 24: 25: #ifdef vax 26: #include "../vax/mtpr.h" 27: #endif 28: 29: 30: 31: /* 32: * Get page tables for process p. Allocator 33: * for memory is argument; process must be locked 34: * from swapping if vmemall is used; if memall is 35: * used, call will return w/o waiting for memory. 36: * In any case an error return results if no user 37: * page table space is available. 38: */ 39: vgetpt(p, pmemall) 40: register struct proc *p; 41: int (*pmemall)(); 42: { 43: register long a; 44: register int i; 45: 46: if (p->p_szpt == 0) 47: panic("vgetpt"); 48: /* 49: * Allocate space in the kernel map for this process. 50: * Then allocate page table pages, and initialize the 51: * process' p0br and addr pointer to be the kernel 52: * virtual addresses of the base of the page tables and 53: * the pte for the process pcb (at the base of the u.). 54: */ 55: a = rmalloc(kernelmap, (long)p->p_szpt); 56: if (a == 0) 57: return (0); 58: if ((*pmemall)(&Usrptmap[a], p->p_szpt, p, CSYS) == 0) { 59: rmfree(kernelmap, (long)p->p_szpt, a); 60: return (0); 61: } 62: p->p_p0br = kmxtob(a); 63: p->p_addr = uaddr(p); 64: /* 65: * Now validate the system page table entries for the 66: * user page table pages, flushing old translations 67: * for these kernel virtual addresses. Clear the new 68: * page table pages for clean post-mortems. 69: */ 70: vmaccess(&Usrptmap[a], (caddr_t)p->p_p0br, p->p_szpt); 71: for (i = 0; i < p->p_szpt; i++) 72: clearseg(Usrptmap[a + i].pg_pfnum); 73: return (1); 74: } 75: 76: /* 77: * Initialize text portion of page table. 78: */ 79: vinitpt(p) 80: struct proc *p; 81: { 82: register struct text *xp; 83: register struct proc *q; 84: register struct pte *pte; 85: register int i; 86: struct pte proto; 87: 88: xp = p->p_textp; 89: if (xp == 0) 90: return; 91: pte = tptopte(p, 0); 92: /* 93: * If there is another instance of same text in core 94: * then just copy page tables from other process. 95: */ 96: if (q = xp->x_caddr) { 97: bcopy((caddr_t)tptopte(q, 0), (caddr_t)pte, 98: (unsigned) (sizeof(struct pte) * xp->x_size)); 99: goto done; 100: } 101: /* 102: * Initialize text page tables, zfod if we are loading 103: * the text now; unless the process is demand loaded, 104: * this will suffice as the text will henceforth either be 105: * read from a file or demand paged in. 106: */ 107: *(int *)&proto = PG_URKR; 108: if (xp->x_flag & XLOAD) { 109: proto.pg_fod = 1; 110: ((struct fpte *)&proto)->pg_fileno = PG_FZERO; 111: } 112: for (i = 0; i < xp->x_size; i++) 113: *pte++ = proto; 114: if ((xp->x_flag & XPAGI) == 0) 115: goto done; 116: /* 117: * Text is demand loaded. If process is not loaded (i.e. being 118: * swapped in) then retrieve page tables from swap area. Otherwise 119: * this is the first time and we must initialize the page tables 120: * from the blocks in the file system. 121: */ 122: if (xp->x_flag & XLOAD) 123: vinifod((struct fpte *)tptopte(p, 0), PG_FTEXT, xp->x_iptr, 124: (daddr_t)1, xp->x_size); 125: else 126: (void)swap(p, xp->x_ptdaddr, (caddr_t)tptopte(p, 0), 127: (int)xp->x_size * sizeof (struct pte), B_READ, 128: B_PAGET, swapdev, 0); 129: done: 130: /* 131: * In the case where we are overlaying ourself with new page 132: * table entries, old user-space translations should be flushed. 133: */ 134: if (p == u.u_procp) 135: newptes(tptopte(p, 0), tptov(p, 0), (int)xp->x_size); 136: else 137: p->p_flag |= SPTECHG; 138: } 139: 140: /* 141: * Update the page tables of all processes linked 142: * to a particular text segment, by distributing 143: * dpte to the the text page at virtual frame v. 144: * 145: * Note that invalidation in the translation buffer for 146: * the current process is the responsibility of the caller. 147: */ 148: distpte(xp, tp, dpte) 149: struct text *xp; 150: register unsigned tp; 151: register struct pte *dpte; 152: { 153: register struct proc *p; 154: register struct pte *pte; 155: register int i; 156: 157: for (p = xp->x_caddr; p; p = p->p_xlink) { 158: pte = tptopte(p, tp); 159: p->p_flag |= SPTECHG; 160: if (pte != dpte) 161: for (i = 0; i < CLSIZE; i++) 162: pte[i] = dpte[i]; 163: } 164: } 165: 166: /* 167: * Release page tables of process p. 168: */ 169: vrelpt(p) 170: register struct proc *p; 171: { 172: register int a; 173: 174: if (p->p_szpt == 0) 175: return; 176: a = btokmx(p->p_p0br); 177: (void) vmemfree(&Usrptmap[a], p->p_szpt); 178: rmfree(kernelmap, (long)p->p_szpt, (long)a); 179: } 180: 181: #define Xu(a) t = up->u_pcb.a; up->u_pcb.a = uq ->u_pcb.a; uq->u_pcb.a = t; 182: #define Xup(a) tp = up->u_pcb.a; up->u_pcb.a = uq ->u_pcb.a; uq->u_pcb.a = tp; 183: #define Xp(a) t = p->a; p->a = q->a; q->a = t; 184: #define Xpp(a) tp = p->a; p->a = q->a; q->a = tp; 185: 186: /* 187: * Pass the page tables of process p to process q. 188: * Used during vfork(). P and q are not symmetric; 189: * p is the giver and q the receiver; after calling vpasspt 190: * p will be ``cleaned out''. Thus before vfork() we call vpasspt 191: * with the child as q and give it our resources; after vfork() we 192: * call vpasspt with the child as p to steal our resources back. 193: * We are cognizant of whether we are p or q because we have to 194: * be careful to keep our u. area and restore the other u. area from 195: * umap after we temporarily put our u. area in both p and q's page tables. 196: */ 197: vpasspt(p, q, up, uq, umap) 198: register struct proc *p, *q; 199: register struct user *up, *uq; 200: struct pte *umap; 201: { 202: int t; 203: int s; 204: struct pte *tp; 205: register int i; 206: 207: s = splhigh(); /* conservative, and slightly paranoid */ 208: Xu(pcb_szpt); Xu(pcb_p0lr); Xu(pcb_p1lr); 209: Xup(pcb_p0br); Xup(pcb_p1br); 210: 211: /* 212: * The u. area is contained in the process' p1 region. 213: * Thus we map the current u. area into the process virtual space 214: * of both sets of page tables we will deal with so that it 215: * will stay with us as we rearrange memory management. 216: */ 217: for (i = 0; i < UPAGES; i++) 218: if (up == &u) 219: q->p_addr[i] = p->p_addr[i]; 220: else 221: p->p_addr[i] = q->p_addr[i]; 222: #ifdef vax 223: mtpr(TBIA, 0); 224: #endif 225: /* 226: * Now have u. double mapped, and have flushed 227: * any stale translations to new u. area. 228: * Switch the page tables. 229: */ 230: Xpp(p_p0br); Xp(p_szpt); Xpp(p_addr); 231: #ifdef vax 232: mtpr(P0BR, u.u_pcb.pcb_p0br); 233: mtpr(P1BR, u.u_pcb.pcb_p1br); 234: mtpr(P0LR, u.u_pcb.pcb_p0lr &~ AST_CLR); 235: mtpr(P1LR, u.u_pcb.pcb_p1lr); 236: #endif 237: /* 238: * Now running on the ``other'' set of page tables. 239: * Flush translation to insure that we get correct u. 240: * Resurrect the u. for the other process in the other 241: * (our old) set of page tables. Thus the other u. has moved 242: * from its old (our current) set of page tables to our old 243: * (its current) set of page tables, while we have kept our 244: * u. by mapping it into the other page table and then keeping 245: * the other page table. 246: */ 247: #ifdef vax 248: mtpr(TBIA, 0); 249: #endif 250: for (i = 0; i < UPAGES; i++) { 251: int pf; 252: struct pte *pte; 253: if (up == &u) { 254: pf = umap[i].pg_pfnum; 255: pte = &q->p_addr[i]; 256: pte->pg_pfnum = pf; 257: } else { 258: pf = umap[i].pg_pfnum; 259: pte = &p->p_addr[i]; 260: pte->pg_pfnum = pf; 261: } 262: } 263: #ifdef vax 264: mtpr(TBIA, 0); 265: #endif 266: splx(s); 267: } 268: 269: /* 270: * Compute number of pages to be allocated to the u. area 271: * and data and stack area page tables, which are stored on the 272: * disk immediately after the u. area. 273: */ 274: /*ARGSUSED*/ 275: vusize(p, utl) 276: register struct proc *p; 277: struct user *utl; 278: { 279: register int tsz = p->p_tsize / NPTEPG; 280: 281: /* 282: * We do not need page table space on the disk for page 283: * table pages wholly containing text. This is well 284: * understood in the code in vmswap.c. 285: */ 286: return (clrnd(UPAGES + 287: clrnd(ctopt(p->p_tsize+p->p_dsize+p->p_ssize+UPAGES)) - tsz)); 288: } 289: 290: /* 291: * Get u area for process p. If a old u area is given, 292: * then copy the new area from the old, else 293: * swap in as specified in the proc structure. 294: * 295: * Since argument map/newu is potentially shared 296: * when an old u. is provided we have to be careful not 297: * to block after beginning to use them in this case. 298: * (This is not true when called from swapin() with no old u.) 299: */ 300: vgetu(p, palloc, map, newu, oldu) 301: register struct proc *p; 302: int (*palloc)(); 303: register struct pte *map; 304: register struct user *newu; 305: struct user *oldu; 306: { 307: register int i; 308: 309: if ((*palloc)(p->p_addr, clrnd(UPAGES), p, CSYS) == 0) 310: return (0); 311: /* 312: * New u. pages are to be accessible in map/newu as well 313: * as in process p's virtual memory. 314: */ 315: for (i = 0; i < UPAGES; i++) { 316: map[i] = p->p_addr[i]; 317: *(int *)(p->p_addr + i) |= PG_URKW | PG_V; 318: } 319: setredzone(p->p_addr, (caddr_t)0); 320: vmaccess(map, (caddr_t)newu, UPAGES); 321: /* 322: * New u.'s come from forking or inswap. 323: */ 324: if (oldu) { 325: bcopy((caddr_t)oldu, (caddr_t)newu, UPAGES * NBPG); 326: newu->u_procp = p; 327: } else { 328: (void)swap(p, p->p_swaddr, (caddr_t)0, ctob(UPAGES), 329: B_READ, B_UAREA, swapdev, 0); 330: if ( 331: #ifdef vax 332: newu->u_pcb.pcb_ssp != -1 || newu->u_pcb.pcb_esp != -1 || 333: #endif 334: newu->u_tsize != p->p_tsize || newu->u_dsize != p->p_dsize || 335: newu->u_ssize != p->p_ssize || newu->u_procp != p) 336: panic("vgetu"); 337: } 338: /* 339: * Initialize the pcb copies of the p0 and p1 region bases and 340: * software page table size from the information in the proc structure. 341: */ 342: newu->u_pcb.pcb_p0br = p->p_p0br; 343: newu->u_pcb.pcb_p1br = initp1br(p->p_p0br + p->p_szpt * NPTEPG); 344: newu->u_pcb.pcb_szpt = p->p_szpt; 345: return (1); 346: } 347: 348: /* 349: * Release swap space for a u. area. 350: */ 351: vrelswu(p, utl) 352: struct proc *p; 353: struct user *utl; 354: { 355: 356: rmfree(swapmap, (long)ctod(vusize(p, utl)), p->p_swaddr); 357: /* p->p_swaddr = 0; */ /* leave for post-mortems */ 358: } 359: 360: /* 361: * Get swap space for a u. area. 362: */ 363: vgetswu(p, utl) 364: struct proc *p; 365: struct user *utl; 366: { 367: 368: p->p_swaddr = rmalloc(swapmap, (long)ctod(vusize(p, utl))); 369: return (p->p_swaddr); 370: } 371: 372: /* 373: * Release u. area, swapping it out if desired. 374: */ 375: vrelu(p, swapu) 376: register struct proc *p; 377: { 378: register int i; 379: struct pte uu[UPAGES]; 380: 381: if (swapu) 382: (void)swap(p, p->p_swaddr, (caddr_t)0, ctob(UPAGES), 383: B_WRITE, B_UAREA, swapdev, 0); 384: for (i = 0; i < UPAGES; i++) 385: uu[i] = p->p_addr[i]; 386: /* 387: * If freeing the user structure and kernel stack 388: * for the current process, have to run a bit longer 389: * using the pages which have already been freed... 390: * block memory allocation from the network by raising ipl. 391: */ 392: if (p == u.u_procp) 393: (void) splimp(); /* XXX */ 394: (void) vmemfree(uu, clrnd(UPAGES)); 395: } 396: 397: #ifdef unneeded 398: int ptforceswap; 399: #endif 400: /* 401: * Expand a page table, assigning new kernel virtual 402: * space and copying the page table entries over both 403: * in the system map and as necessary in the user page table space. 404: */ 405: ptexpand(change, ods, oss) 406: register int change; 407: size_t ods, oss; 408: { 409: register struct pte *p1, *p2; 410: register int i; 411: register int spages, ss = P1PAGES - u.u_pcb.pcb_p1lr; 412: register int kold = btokmx(u.u_pcb.pcb_p0br); 413: int knew, tdpages; 414: int szpt = u.u_pcb.pcb_szpt; 415: int s; 416: 417: if (change <= 0 || change % CLSIZE) 418: panic("ptexpand"); 419: /* 420: * Change is the number of new page table pages needed. 421: * Kold is the old index in the kernelmap of the page tables. 422: * Allocate a new kernel map segment of size szpt+change for 423: * the page tables, and the new page table pages in the 424: * middle of this new region. 425: */ 426: top: 427: #ifdef unneeded 428: if (ptforceswap) 429: goto bad; 430: #endif 431: if ((knew=rmalloc(kernelmap, (long)(szpt+change))) == 0) 432: goto bad; 433: spages = ss/NPTEPG; 434: tdpages = szpt - spages; 435: if (memall(&Usrptmap[knew+tdpages], change, u.u_procp, CSYS) == 0) { 436: rmfree(kernelmap, (long)(szpt+change), (long)knew); 437: goto bad; 438: } 439: 440: /* 441: * Spages pages of u.+stack page tables go over unchanged. 442: * Tdpages of text+data page table may contain a few stack 443: * pages which need to go in one of the newly allocated pages; 444: * this is a rough cut. 445: */ 446: kmcopy(knew, kold, tdpages); 447: kmcopy(knew+tdpages+change, kold+tdpages, spages); 448: 449: /* 450: * Validate and clear the newly allocated page table pages in the 451: * center of the new region of the kernelmap. 452: */ 453: i = knew + tdpages; 454: p1 = &Usrptmap[i]; 455: p2 = p1 + change; 456: while (p1 < p2) { 457: /* tptov BELOW WORKS ONLY FOR VAX */ 458: mapin(p1, tptov(u.u_procp, i), p1->pg_pfnum, 1, 459: (int)(PG_V|PG_KW)); 460: clearseg(p1->pg_pfnum); 461: p1++; 462: i++; 463: } 464: #ifdef vax 465: mtpr(TBIA, 0); 466: #endif 467: 468: /* 469: * Move the stack and u. pte's which are before the newly 470: * allocated pages into the last of the newly allocated pages. 471: * They are taken from the end of the current p1 region, 472: * and moved to the end of the new p1 region. 473: */ 474: p1 = u.u_pcb.pcb_p1br + u.u_pcb.pcb_p1lr; 475: p2 = initp1br(kmxtob(knew+szpt+change)) + u.u_pcb.pcb_p1lr; 476: for (i = kmxtob(kold+szpt) - p1; i != 0; i--) 477: *p2++ = *p1++; 478: 479: /* 480: * Now switch to the new page tables. 481: */ 482: #ifdef vax 483: mtpr(TBIA, 0); /* paranoid */ 484: #endif 485: s = splhigh(); /* conservative */ 486: u.u_procp->p_p0br = kmxtob(knew); 487: setp0br(u.u_procp->p_p0br); 488: u.u_pcb.pcb_p1br = initp1br(kmxtob(knew+szpt+change)); 489: setp1br(u.u_pcb.pcb_p1br); 490: u.u_pcb.pcb_szpt += change; 491: u.u_procp->p_szpt += change; 492: u.u_procp->p_addr = uaddr(u.u_procp); 493: #ifdef vax 494: mtpr(TBIA, 0); 495: #endif 496: splx(s); 497: 498: /* 499: * Finally, free old kernelmap. 500: */ 501: if (szpt) 502: rmfree(kernelmap, (long)szpt, (long)kold); 503: return; 504: 505: bad: 506: /* 507: * Swap out the process so that the unavailable 508: * resource will be allocated upon swapin. 509: * 510: * When resume is executed for the process, 511: * here is where it will resume. 512: */ 513: resume(pcbb(u.u_procp)); 514: if (savectx(&u.u_ssave)) 515: return; 516: if (swapout(u.u_procp, ods, oss) == 0) { 517: /* 518: * No space to swap... it is inconvenient to try 519: * to exit, so just wait a bit and hope something 520: * turns up. Could deadlock here. 521: * 522: * SOMEDAY REFLECT ERROR BACK THROUGH expand TO CALLERS 523: * (grow, sbreak) SO CAN'T DEADLOCK HERE. 524: */ 525: sleep((caddr_t)&lbolt, PRIBIO); 526: goto top; 527: } 528: /* 529: * Set SSWAP bit, so that when process is swapped back in 530: * swapin will set u.u_pcb.pcb_sswap to u_sswap and force a 531: * return from the savectx() above. 532: */ 533: u.u_procp->p_flag |= SSWAP; 534: swtch(); 535: /* NOTREACHED */ 536: } 537: 538: kmcopy(to, from, count) 539: register int to; 540: int from; 541: register int count; 542: { 543: register struct pte *tp = &Usrptmap[to]; 544: register struct pte *fp = &Usrptmap[from]; 545: 546: while (count != 0) { 547: mapin(tp, tptov(u.u_procp, to), fp->pg_pfnum, 1, 548: (int)(*((int *)fp) & (PG_V|PG_PROT))); 549: tp++; 550: fp++; 551: to++; 552: count--; 553: } 554: }