0
|
1 #include "param.h"
|
|
2 #include "types.h"
|
|
3 #include "defs.h"
|
|
4 #include "arm.h"
|
|
5 #include "memlayout.h"
|
|
6 #include "mmu.h"
|
|
7 #include "proc.h"
|
|
8 #include "spinlock.h"
|
|
9 #include "elf.h"
|
|
10
|
|
11 extern char data[]; // defined by kernel.ld
|
|
12 pde_t *kpgdir; // for use in scheduler()
|
|
13
|
|
14 // Xv6 can only allocate memory in 4KB blocks. This is fine
|
|
15 // for x86. ARM's page table and page directory (for 28-bit
|
|
16 // user address) have a size of 1KB. kpt_alloc/free is used
|
|
17 // as a wrapper to support allocating page tables during boot
|
|
18 // (use the initial kernel map, and during runtime, use buddy
|
|
19 // memory allocator.
|
|
20 struct run {
|
|
21 struct run *next;
|
|
22 };
|
|
23
|
|
24 struct {
|
|
25 struct spinlock lock;
|
|
26 struct run *freelist;
|
|
27 } kpt_mem;
|
|
28
|
|
29 void init_vmm (void)
|
|
30 {
|
|
31 initlock(&kpt_mem.lock, "vm");
|
|
32 kpt_mem.freelist = NULL;
|
|
33 }
|
|
34
|
|
35 static void _kpt_free (char *v)
|
|
36 {
|
|
37 struct run *r;
|
|
38
|
|
39 r = (struct run*) v;
|
|
40 r->next = kpt_mem.freelist;
|
|
41 kpt_mem.freelist = r;
|
|
42 }
|
|
43
|
|
44
|
|
45 static void kpt_free (char *v)
|
|
46 {
|
|
47 if (v >= (char*)P2V(INIT_KERNMAP)) {
|
|
48 kfree(v, PT_ORDER);
|
|
49 return;
|
|
50 }
|
|
51
|
|
52 acquire(&kpt_mem.lock);
|
|
53 _kpt_free (v);
|
|
54 release(&kpt_mem.lock);
|
|
55 }
|
|
56
|
|
57 // add some memory used for page tables (initialization code)
|
|
58 void kpt_freerange (uint32 low, uint32 hi)
|
|
59 {
|
|
60 while (low < hi) {
|
|
61 _kpt_free ((char*)low);
|
|
62 low += PT_SZ;
|
|
63 }
|
|
64 }
|
|
65
|
|
66 void* kpt_alloc (void)
|
|
67 {
|
|
68 struct run *r;
|
|
69
|
|
70 acquire(&kpt_mem.lock);
|
|
71
|
|
72 if ((r = kpt_mem.freelist) != NULL ) {
|
|
73 kpt_mem.freelist = r->next;
|
|
74 }
|
|
75
|
|
76 release(&kpt_mem.lock);
|
|
77
|
|
78 // Allocate a PT page if no inital pages is available
|
|
79 if ((r == NULL) && ((r = kmalloc (PT_ORDER)) == NULL)) {
|
|
80 panic("oom: kpt_alloc");
|
|
81 }
|
|
82
|
|
83 memset(r, 0, PT_SZ);
|
|
84 return (char*) r;
|
|
85 }
|
|
86
|
|
87 // Return the address of the PTE in page directory that corresponds to
|
|
88 // virtual address va. If alloc!=0, create any required page table pages.
|
|
89 static pte_t* walkpgdir (pde_t *pgdir, const void *va, int alloc)
|
|
90 {
|
|
91 pde_t *pde;
|
|
92 pte_t *pgtab;
|
|
93
|
|
94 // pgdir points to the page directory, get the page direcotry entry (pde)
|
|
95 pde = &pgdir[PDE_IDX(va)];
|
|
96
|
|
97 if (*pde & PE_TYPES) {
|
|
98 pgtab = (pte_t*) p2v(PT_ADDR(*pde));
|
|
99
|
|
100 } else {
|
|
101 if (!alloc || (pgtab = (pte_t*) kpt_alloc()) == 0) {
|
|
102 return 0;
|
|
103 }
|
|
104
|
|
105 // Make sure all those PTE_P bits are zero.
|
|
106 memset(pgtab, 0, PT_SZ);
|
|
107
|
|
108 // The permissions here are overly generous, but they can
|
|
109 // be further restricted by the permissions in the page table
|
|
110 // entries, if necessary.
|
|
111 *pde = v2p(pgtab) | UPDE_TYPE;
|
|
112 }
|
|
113
|
|
114 return &pgtab[PTE_IDX(va)];
|
|
115 }
|
|
116
|
|
117 // Create PTEs for virtual addresses starting at va that refer to
|
|
118 // physical addresses starting at pa. va and size might not
|
|
119 // be page-aligned.
|
|
120 static int mappages (pde_t *pgdir, void *va, uint size, uint pa, int ap)
|
|
121 {
|
|
122 char *a, *last;
|
|
123 pte_t *pte;
|
|
124
|
|
125 a = (char*) align_dn(va, PTE_SZ);
|
|
126 last = (char*) align_dn((uint)va + size - 1, PTE_SZ);
|
|
127
|
|
128 for (;;) {
|
|
129 if ((pte = walkpgdir(pgdir, a, 1)) == 0) {
|
|
130 return -1;
|
|
131 }
|
|
132
|
|
133 if (*pte & PE_TYPES) {
|
|
134 panic("remap");
|
|
135 }
|
|
136
|
|
137 *pte = pa | ((ap & 0x3) << 4) | PE_CACHE | PE_BUF | PTE_TYPE;
|
|
138
|
|
139 if (a == last) {
|
|
140 break;
|
|
141 }
|
|
142
|
|
143 a += PTE_SZ;
|
|
144 pa += PTE_SZ;
|
|
145 }
|
|
146
|
|
147 return 0;
|
|
148 }
|
|
149
|
|
150 // flush all TLB
|
|
151 static void flush_tlb (void)
|
|
152 {
|
|
153 uint val = 0;
|
|
154 asm("MCR p15, 0, %[r], c8, c7, 0" : :[r]"r" (val):);
|
|
155
|
|
156 // invalid entire data and instruction cache
|
|
157 asm ("MCR p15,0,%[r],c7,c10,0": :[r]"r" (val):);
|
|
158 asm ("MCR p15,0,%[r],c7,c11,0": :[r]"r" (val):);
|
|
159 }
|
|
160
|
|
161 // Switch to the user page table (TTBR0)
|
|
162 void switchuvm (struct proc *p)
|
|
163 {
|
|
164 uint val;
|
|
165
|
|
166 pushcli();
|
|
167
|
|
168 if (p->pgdir == 0) {
|
|
169 panic("switchuvm: no pgdir");
|
|
170 }
|
|
171
|
|
172 val = (uint) V2P(p->pgdir) | 0x00;
|
|
173
|
|
174 asm("MCR p15, 0, %[v], c2, c0, 0": :[v]"r" (val):);
|
|
175 flush_tlb();
|
|
176
|
|
177 popcli();
|
|
178 }
|
|
179
|
|
180 // Load the initcode into address 0 of pgdir. sz must be less than a page.
|
|
181 void inituvm (pde_t *pgdir, char *init, uint sz)
|
|
182 {
|
|
183 char *mem;
|
|
184
|
|
185 if (sz >= PTE_SZ) {
|
|
186 panic("inituvm: more than a page");
|
|
187 }
|
|
188
|
|
189 mem = alloc_page();
|
|
190 memset(mem, 0, PTE_SZ);
|
|
191 mappages(pgdir, 0, PTE_SZ, v2p(mem), AP_KU);
|
|
192 memmove(mem, init, sz);
|
|
193 }
|
|
194
|
|
195 // Load a program segment into pgdir. addr must be page-aligned
|
|
196 // and the pages from addr to addr+sz must already be mapped.
|
|
197 int loaduvm (pde_t *pgdir, char *addr, struct inode *ip, uint offset, uint sz)
|
|
198 {
|
|
199 uint i, pa, n;
|
|
200 pte_t *pte;
|
|
201
|
|
202 if ((uint) addr % PTE_SZ != 0) {
|
|
203 panic("loaduvm: addr must be page aligned");
|
|
204 }
|
|
205
|
|
206 for (i = 0; i < sz; i += PTE_SZ) {
|
|
207 if ((pte = walkpgdir(pgdir, addr + i, 0)) == 0) {
|
|
208 panic("loaduvm: address should exist");
|
|
209 }
|
|
210
|
|
211 pa = PTE_ADDR(*pte);
|
|
212
|
|
213 if (sz - i < PTE_SZ) {
|
|
214 n = sz - i;
|
|
215 } else {
|
|
216 n = PTE_SZ;
|
|
217 }
|
|
218
|
|
219 if (readi(ip, p2v(pa), offset + i, n) != n) {
|
|
220 return -1;
|
|
221 }
|
|
222 }
|
|
223
|
|
224 return 0;
|
|
225 }
|
|
226
|
|
227 // Allocate page tables and physical memory to grow process from oldsz to
|
|
228 // newsz, which need not be page aligned. Returns new size or 0 on error.
|
|
229 int allocuvm (pde_t *pgdir, uint oldsz, uint newsz)
|
|
230 {
|
|
231 char *mem;
|
|
232 uint a;
|
|
233
|
|
234 if (newsz >= UADDR_SZ) {
|
|
235 return 0;
|
|
236 }
|
|
237
|
|
238 if (newsz < oldsz) {
|
|
239 return oldsz;
|
|
240 }
|
|
241
|
|
242 a = align_up(oldsz, PTE_SZ);
|
|
243
|
|
244 for (; a < newsz; a += PTE_SZ) {
|
|
245 mem = alloc_page();
|
|
246
|
|
247 if (mem == 0) {
|
|
248 cprintf("allocuvm out of memory\n");
|
|
249 deallocuvm(pgdir, newsz, oldsz);
|
|
250 return 0;
|
|
251 }
|
|
252
|
|
253 memset(mem, 0, PTE_SZ);
|
|
254 mappages(pgdir, (char*) a, PTE_SZ, v2p(mem), AP_KU);
|
|
255 }
|
|
256
|
|
257 return newsz;
|
|
258 }
|
|
259
|
|
260 // Deallocate user pages to bring the process size from oldsz to
|
|
261 // newsz. oldsz and newsz need not be page-aligned, nor does newsz
|
|
262 // need to be less than oldsz. oldsz can be larger than the actual
|
|
263 // process size. Returns the new process size.
|
|
264 int deallocuvm (pde_t *pgdir, uint oldsz, uint newsz)
|
|
265 {
|
|
266 pte_t *pte;
|
|
267 uint a;
|
|
268 uint pa;
|
|
269
|
|
270 if (newsz >= oldsz) {
|
|
271 return oldsz;
|
|
272 }
|
|
273
|
|
274 for (a = align_up(newsz, PTE_SZ); a < oldsz; a += PTE_SZ) {
|
|
275 pte = walkpgdir(pgdir, (char*) a, 0);
|
|
276
|
|
277 if (!pte) {
|
|
278 // pte == 0 --> no page table for this entry
|
|
279 // round it up to the next page directory
|
|
280 a = align_up (a, PDE_SZ);
|
|
281
|
|
282 } else if ((*pte & PE_TYPES) != 0) {
|
|
283 pa = PTE_ADDR(*pte);
|
|
284
|
|
285 if (pa == 0) {
|
|
286 panic("deallocuvm");
|
|
287 }
|
|
288
|
|
289 free_page(p2v(pa));
|
|
290 *pte = 0;
|
|
291 }
|
|
292 }
|
|
293
|
|
294 return newsz;
|
|
295 }
|
|
296
|
|
297 // Free a page table and all the physical memory pages
|
|
298 // in the user part.
|
|
299 void freevm (pde_t *pgdir)
|
|
300 {
|
|
301 uint i;
|
|
302 char *v;
|
|
303
|
|
304 if (pgdir == 0) {
|
|
305 panic("freevm: no pgdir");
|
|
306 }
|
|
307
|
|
308 // release the user space memroy, but not page tables
|
|
309 deallocuvm(pgdir, UADDR_SZ, 0);
|
|
310
|
|
311 // release the page tables
|
|
312 for (i = 0; i < NUM_UPDE; i++) {
|
|
313 if (pgdir[i] & PE_TYPES) {
|
|
314 v = p2v(PT_ADDR(pgdir[i]));
|
|
315 kpt_free(v);
|
|
316 }
|
|
317 }
|
|
318
|
|
319 kpt_free((char*) pgdir);
|
|
320 }
|
|
321
|
|
322 // Clear PTE_U on a page. Used to create an inaccessible page beneath
|
|
323 // the user stack (to trap stack underflow).
|
|
324 void clearpteu (pde_t *pgdir, char *uva)
|
|
325 {
|
|
326 pte_t *pte;
|
|
327
|
|
328 pte = walkpgdir(pgdir, uva, 0);
|
|
329 if (pte == 0) {
|
|
330 panic("clearpteu");
|
|
331 }
|
|
332
|
|
333 // in ARM, we change the AP field (ap & 0x3) << 4)
|
|
334 *pte = (*pte & ~(0x03 << 4)) | AP_KO << 4;
|
|
335 }
|
|
336
|
|
337 // Given a parent process's page table, create a copy
|
|
338 // of it for a child.
|
|
339 pde_t* copyuvm (pde_t *pgdir, uint sz)
|
|
340 {
|
|
341 pde_t *d;
|
|
342 pte_t *pte;
|
|
343 uint pa, i, ap;
|
|
344 char *mem;
|
|
345
|
|
346 // allocate a new first level page directory
|
|
347 d = kpt_alloc();
|
|
348 if (d == NULL ) {
|
|
349 return NULL ;
|
|
350 }
|
|
351
|
|
352 // copy the whole address space over (no COW)
|
|
353 for (i = 0; i < sz; i += PTE_SZ) {
|
|
354 if ((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) {
|
|
355 panic("copyuvm: pte should exist");
|
|
356 }
|
|
357
|
|
358 if (!(*pte & PE_TYPES)) {
|
|
359 panic("copyuvm: page not present");
|
|
360 }
|
|
361
|
|
362 pa = PTE_ADDR (*pte);
|
|
363 ap = PTE_AP (*pte);
|
|
364
|
|
365 if ((mem = alloc_page()) == 0) {
|
|
366 goto bad;
|
|
367 }
|
|
368
|
|
369 memmove(mem, (char*) p2v(pa), PTE_SZ);
|
|
370
|
|
371 if (mappages(d, (void*) i, PTE_SZ, v2p(mem), ap) < 0) {
|
|
372 goto bad;
|
|
373 }
|
|
374 }
|
|
375 return d;
|
|
376
|
|
377 bad: freevm(d);
|
|
378 return 0;
|
|
379 }
|
|
380
|
|
381 //PAGEBREAK!
|
|
382 // Map user virtual address to kernel address.
|
|
383 char* uva2ka (pde_t *pgdir, char *uva)
|
|
384 {
|
|
385 pte_t *pte;
|
|
386
|
|
387 pte = walkpgdir(pgdir, uva, 0);
|
|
388
|
|
389 // make sure it exists
|
|
390 if ((*pte & PE_TYPES) == 0) {
|
|
391 return 0;
|
|
392 }
|
|
393
|
|
394 // make sure it is a user page
|
|
395 if (PTE_AP(*pte) != AP_KU) {
|
|
396 return 0;
|
|
397 }
|
|
398
|
|
399 return (char*) p2v(PTE_ADDR(*pte));
|
|
400 }
|
|
401
|
|
402 // Copy len bytes from p to user address va in page table pgdir.
|
|
403 // Most useful when pgdir is not the current page table.
|
|
404 // uva2ka ensures this only works for user pages.
|
|
405 int copyout (pde_t *pgdir, uint va, void *p, uint len)
|
|
406 {
|
|
407 char *buf, *pa0;
|
|
408 uint n, va0;
|
|
409
|
|
410 buf = (char*) p;
|
|
411
|
|
412 while (len > 0) {
|
|
413 va0 = align_dn(va, PTE_SZ);
|
|
414 pa0 = uva2ka(pgdir, (char*) va0);
|
|
415
|
|
416 if (pa0 == 0) {
|
|
417 return -1;
|
|
418 }
|
|
419
|
|
420 n = PTE_SZ - (va - va0);
|
|
421
|
|
422 if (n > len) {
|
|
423 n = len;
|
|
424 }
|
|
425
|
|
426 memmove(pa0 + (va - va0), buf, n);
|
|
427
|
|
428 len -= n;
|
|
429 buf += n;
|
|
430 va = va0 + PTE_SZ;
|
|
431 }
|
|
432
|
|
433 return 0;
|
|
434 }
|
|
435
|
|
436
|
|
437 // 1:1 map the memory [phy_low, phy_hi] in kernel. We need to
|
|
438 // use 2-level mapping for this block of memory. The rumor has
|
|
439 // it that ARMv6's small brain cannot handle the case that memory
|
|
440 // be mapped in both 1-level page table and 2-level page. For
|
|
441 // initial kernel, we use 1MB mapping, other memory needs to be
|
|
442 // mapped as 4KB pages
|
|
443 void paging_init (uint phy_low, uint phy_hi)
|
|
444 {
|
|
445 mappages (P2V(&_kernel_pgtbl), P2V(phy_low), phy_hi - phy_low, phy_low, AP_KU);
|
|
446 flush_tlb ();
|
|
447 }
|