Mercurial > hg > Members > menikon > CbC_xv6
annotate src/impl/vm_impl_private.cbc @ 281:4d76280758db
move context.pm to util.pm ...
author | anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Tue, 28 Jan 2020 16:50:07 +0900 |
parents | 052669f2ef74 |
children | 9fa2e66bc9ed |
rev | line source |
---|---|
226 | 1 #include "param.h" |
2 #include "proc.h" | |
206 | 3 #include "mmu.h" |
222 | 4 #include "defs.h" |
206 | 5 #include "memlayout.h" |
200 | 6 #interface "vm_impl.h" |
7 | |
206 | 8 /* |
281
4d76280758db
move context.pm to util.pm ...
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
270
diff
changeset
|
9 vm_impl* createvm_impl2(); //:skip |
206 | 10 */ |
11 | |
219 | 12 __code loaduvm_ptesize_checkvm_impl(struct vm_impl* vm_impl, __code next(int ret, ...)) { |
208 | 13 char* addr = vm_impl->addr; |
206 | 14 |
208 | 15 if ((uint) addr %PTE_SZ != 0) { |
206 | 16 // goto panic |
17 } | |
18 | |
219 | 19 goto loaduvm_loopvm_impl(vm_impl, next(ret, ...)); |
200 | 20 } |
21 | |
219 | 22 __code loaduvm_loopvm_impl(struct vm_impl* vm_impl, __code next(int ret, ...)) { |
208 | 23 uint i = vm_impl->i; |
24 uint sz = vm_impl->sz; | |
206 | 25 |
208 | 26 if (i < sz) { |
219 | 27 goto loaduvm_check_pgdir(vm_impl, next(ret, ...)); |
206 | 28 } |
208 | 29 |
219 | 30 goto loaduvm_exit(vm_impl, next(ret, ...)); |
200 | 31 } |
208 | 32 |
33 | |
206 | 34 static pte_t* walkpgdir (pde_t *pgdir, const void *va, int alloc) |
35 { | |
36 pde_t *pde; | |
37 pte_t *pgtab; | |
38 | |
39 // pgdir points to the page directory, get the page direcotry entry (pde) | |
40 pde = &pgdir[PDE_IDX(va)]; | |
41 | |
42 if (*pde & PE_TYPES) { | |
43 pgtab = (pte_t*) p2v(PT_ADDR(*pde)); | |
44 | |
45 } else { | |
46 if (!alloc || (pgtab = (pte_t*) kpt_alloc()) == 0) { | |
47 return 0; | |
48 } | |
49 | |
50 // Make sure all those PTE_P bits are zero. | |
51 memset(pgtab, 0, PT_SZ); | |
52 | |
53 // The permissions here are overly generous, but they can | |
54 // be further restricted by the permissions in the page table | |
55 // entries, if necessary. | |
56 *pde = v2p(pgtab) | UPDE_TYPE; | |
57 } | |
58 | |
59 return &pgtab[PTE_IDX(va)]; | |
60 } | |
61 | |
62 | |
219 | 63 __code loaduvm_check_pgdir(struct vm_impl* vm_impl, __code next(int ret, ...)) { |
208 | 64 pte_t* pte = vm_impl->pte; |
65 pde_t* pgdir = vm_impl->pgdir; | |
66 uint i = vm_impl->i; | |
67 char* addr = vm_impl->addr; | |
68 uint pa = vm_impl->pa; | |
69 | |
70 if ((pte = walkpgdir(pgdir, addr + i, 0)) == 0) { | |
206 | 71 // goto panic |
72 } | |
208 | 73 pa = PTE_ADDR(*pte); |
74 | |
75 vm_impl->pte = pte; | |
76 vm_impl->pgdir = pgdir; | |
77 vm_impl->addr = addr; | |
78 vm_impl->pa = pa; | |
79 | |
219 | 80 goto loaduvm_check_PTE_SZ(vm_impl, next(ret, ...)); |
206 | 81 } |
82 | |
219 | 83 __code loaduvm_check_PTE_SZ(struct vm_impl* vm_impl, __code next(int ret, ...)) { |
208 | 84 uint sz = vm_impl->sz; |
85 uint i = vm_impl->i; | |
86 uint n = vm_impl->n; | |
87 struct inode* ip = vm_impl->ip; | |
88 uint pa = vm_impl->pa; | |
89 uint offset = vm_impl->offset; | |
90 | |
91 if (sz - i < PTE_SZ) { | |
92 n = sz - i; | |
207 | 93 } else { |
208 | 94 n = PTE_SZ; |
207 | 95 } |
96 | |
208 | 97 if (readi(ip, p2v(pa), offset + i, n) != n) { |
219 | 98 ret = -1; |
99 goto next(ret, ...); | |
207 | 100 } |
101 | |
208 | 102 vm_impl->n = n; |
103 | |
219 | 104 goto loaduvm_loopvm_impl(vm_impl, next(ret, ...)); |
206 | 105 } |
106 | |
219 | 107 __code loaduvm_exit(struct vm_impl* vm_impl, __code next(int ret, ...)) { |
108 ret = 0; | |
109 goto next(ret, ...); | |
206 | 110 } |
211 | 111 |
212 | 112 struct run { |
113 struct run *next; | |
114 }; | |
115 | |
116 struct { | |
117 struct spinlock lock; | |
118 struct run* freelist; | |
119 } kpt_mem; | |
120 | |
215 | 121 |
122 static int mappages (pde_t *pgdir, void *va, uint size, uint pa, int ap) | |
123 { | |
124 char *a, *last; | |
125 pte_t *pte; | |
126 | |
127 a = (char*) align_dn(va, PTE_SZ); | |
128 last = (char*) align_dn((uint)va + size - 1, PTE_SZ); | |
129 | |
130 for (;;) { | |
131 if ((pte = walkpgdir(pgdir, a, 1)) == 0) { | |
132 return -1; | |
133 } | |
134 | |
135 if (*pte & PE_TYPES) { | |
136 panic("remap"); | |
137 } | |
138 | |
139 *pte = pa | ((ap & 0x3) << 4) | PE_CACHE | PE_BUF | PTE_TYPE; | |
140 | |
141 if (a == last) { | |
142 break; | |
143 } | |
144 | |
145 a += PTE_SZ; | |
146 pa += PTE_SZ; | |
147 } | |
148 | |
149 return 0; | |
150 } | |
151 | |
211 | 152 __code kpt_alloc_check_impl(struct vm_impl* vm_impl, __code next(...)) { |
212 | 153 struct run* r; |
154 if ((r = kpt_mem.freelist) != NULL ) { | |
155 kpt_mem.freelist = r->next; | |
156 } | |
213 | 157 release(&kpt_mem.lock); |
211 | 158 |
213 | 159 if ((r == NULL) && ((r = kmalloc (PT_ORDER)) == NULL)) { |
160 // panic("oom: kpt_alloc"); | |
161 // goto panic | |
162 } | |
163 | |
164 memset(r, 0, PT_SZ); | |
211 | 165 goto next((char*)r); |
166 } | |
214 | 167 |
168 __code allocuvm_check_newszvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, __code next(int ret, ...)){ | |
169 if (newsz >= UADDR_SZ) { | |
170 goto next(0, ...); | |
171 } | |
172 | |
173 if (newsz < oldsz) { | |
174 ret = newsz; | |
215 | 175 goto next(ret, ...); |
214 | 176 } |
177 | |
215 | 178 char* mem; |
214 | 179 uint a = align_up(oldsz, PTE_SZ); |
180 | |
215 | 181 goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, mem, a, next(ret, ...)); |
214 | 182 } |
183 | |
215 | 184 __code allocuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, char* mem, uint a, __code next(int ret, ...)){ |
214 | 185 |
186 if (a < newsz) { | |
215 | 187 mem = alloc_page(); |
188 | |
189 if (mem == 0) { | |
190 cprintf("allocuvm out of memory\n"); | |
191 deallocuvm(pgdir, newsz, oldsz); | |
216 | 192 goto next(0, ...); |
215 | 193 } |
194 | |
195 memset(mem, 0, PTE_SZ); | |
196 mappages(pgdir, (char*) a, PTE_SZ, v2p(mem), AP_KU); | |
197 | |
198 goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, a + PTE_SZ, next(ret, ...)); | |
214 | 199 } |
215 | 200 ret = newsz; |
201 goto next(ret, ...); | |
214 | 202 } |
216 | 203 |
204 __code clearpteu_check_ptevm_impl(struct vm_impl* vm_impl, pde_t* pgdir, char* uva, __code next(int ret, ...)) { | |
205 pte_t *pte; | |
206 | |
207 pte = walkpgdir(pgdir, uva, 0); | |
208 if (pte == 0) { | |
209 // panic("clearpteu"); | |
210 // goto panic; | |
211 } | |
212 | |
213 // in ARM, we change the AP field (ap & 0x3) << 4) | |
214 *pte = (*pte & ~(0x03 << 4)) | AP_KO << 4; | |
215 | |
216 goto next(ret, ...); | |
217 } | |
217 | 218 |
219 __code copyuvm_check_nullvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, __code next(int ret, ...)) { | |
220 pde_t *d; | |
221 pte_t *pte; | |
222 uint pa, i, ap; | |
223 char *mem; | |
224 | |
225 // allocate a new first level page directory | |
226 d = kpt_alloc(); | |
227 if (d == NULL ) { | |
228 ret = NULL; | |
229 goto next(ret, ...); | |
230 } | |
231 i = 0; | |
232 | |
233 goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, *d, *pte, pa, i, ap, *mem, next(ret, ...)); | |
234 } | |
235 | |
236 __code copyuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { | |
237 | |
238 if (i < sz) { | |
218 | 239 goto copyuvm_loop_check_walkpgdir(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); |
217 | 240 |
241 } | |
242 ret = d; | |
243 goto next(ret, ...); | |
244 } | |
245 | |
218 | 246 __code copyuvm_loop_check_walkpgdir(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { |
247 if ((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) { | |
248 // panic("copyuvm: pte should exist"); | |
249 // goto panic(); | |
250 } | |
251 goto copyuvm_loop_check_pte(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
252 } | |
253 | |
254 __code copyuvm_loop_check_pte(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { | |
255 | |
256 if (!(*pte & PE_TYPES)) { | |
257 // panic("copyuvm: page not present"); | |
258 // goto panic(); | |
259 } | |
260 | |
261 goto copyuvm_loop_check_mem(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
262 } | |
217 | 263 |
218 | 264 __code copyuvm_loop_check_mem(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { |
265 pa = PTE_ADDR (*pte); | |
266 ap = PTE_AP (*pte); | |
267 | |
268 if ((mem = alloc_page()) == 0) { | |
269 goto copyuvm_loop_bad(vm_impl, d, next(...)); | |
270 } | |
271 goto copyuvm_loop_check_mappages(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
272 | |
273 } | |
274 | |
275 __code copyuvm_loop_check_mappages(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { | |
276 | |
277 memmove(mem, (char*) p2v(pa), PTE_SZ); | |
278 | |
279 if (mappages(d, (void*) i, PTE_SZ, v2p(mem), ap) < 0) { | |
280 goto copyuvm_loop_bad(vm_impl, d, next(...)); | |
281 } | |
282 goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
283 | |
217 | 284 } |
285 | |
286 __code copyuvm_loop_bad(struct vm_impl* vm_impl, pde_t* d, __code next(int ret, ...)) { | |
287 freevm(d); | |
288 ret = 0; | |
289 goto next(ret, ...); | |
290 } | |
220 | 291 |
292 | |
293 __code uva2ka_check_pe_types(struct vm_impl* vm, pde_t* pgdir, char* uva, __code next(int ret, ...)) { | |
294 pte_t* pte; | |
295 | |
296 pte = walkpgdir(pgdir, uva, 0); | |
297 | |
298 // make sure it exists | |
299 if ((*pte & PE_TYPES) == 0) { | |
300 ret = 0; | |
301 goto next(ret, ...); | |
302 } | |
303 goto uva2ka_check_pte_ap(vm, pgdir, uva, pte, next(...)); | |
304 } | |
305 | |
306 __code uva2ka_check_pte_ap(struct vm_impl* vm, pde_t* pgdir, char* uva, pte_t* pte, __code next(int ret, ...)) { | |
221 | 307 // make sure it is a user page |
308 if (PTE_AP(*pte) != AP_KU) { | |
309 ret = 0; | |
310 goto next(ret, ...); | |
311 } | |
312 ret = (char*) p2v(PTE_ADDR(*pte)); | |
220 | 313 goto next(ret, ...); |
314 } | |
315 | |
222 | 316 // flush all TLB |
317 static void flush_tlb (void) | |
318 { | |
319 uint val = 0; | |
320 asm("MCR p15, 0, %[r], c8, c7, 0" : :[r]"r" (val):); | |
321 | |
322 // invalid entire data and instruction cache | |
323 asm ("MCR p15,0,%[r],c7,c10,0": :[r]"r" (val):); | |
324 asm ("MCR p15,0,%[r],c7,c11,0": :[r]"r" (val):); | |
325 } | |
326 | |
327 __code paging_intvmvm_impl(struct vm_impl* vm_impl, uint phy_low, uint phy_hi, __code next(...)) { | |
328 mappages (P2V(&_kernel_pgtbl), P2V(phy_low), phy_hi - phy_low, phy_low, AP_KU); | |
329 flush_tlb (); | |
330 | |
223 | 331 goto next(...); |
222 | 332 } |
333 | |
224 | 334 __code copyout_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, __code next(int ret, ...)) { |
335 if (len > 0) { | |
336 va0 = align_dn(va, PTE_SZ); | |
337 pa0 = uva2ka(pgdir, (char*) va0); | |
338 goto copyout_loop_check_pa0(vm_impl, pgdir, va, pp, len, va0, pa0, n, next(...)); | |
339 } | |
340 ret = 0; | |
341 goto next(ret, ...); | |
223 | 342 |
343 } | |
344 | |
224 | 345 __code copyout_loop_check_pa0(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, uint n, __code next(int ret, ...)) { |
346 if (pa0 == 0) { | |
347 ret = -1; | |
348 goto next(ret, ...); | |
349 } | |
350 goto copyout_loop_check_n(vm_impl, pgdir, va, pp, len, va0, pa0, n, buf, next(...)); | |
351 } | |
352 __code copyout_loop_check_n(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, uint n, char* buf, __code next(...)) { | |
353 n = PTE_SZ - (va - va0); | |
223 | 354 |
224 | 355 if (n > len) { |
356 n = len; | |
357 } | |
358 | |
359 len -= n; | |
360 buf += n; | |
361 va = va0 + PTE_SZ; | |
362 goto copyout_loopvm_impl(vm_impl, pgdir, va, pp, len, va0, pa0, next(...)); | |
363 } | |
364 | |
226 | 365 typedef struct proc proc_struct; |
366 __code switchuvm_check_pgdirvm_impl(struct vm_impl* vm_impl, proc_struct* p, __code next(...)) { //:skip | |
367 uint val; | |
368 | |
369 pushcli(); | |
370 | |
371 if (p->pgdir == 0) { | |
372 panic("switchuvm: no pgdir"); | |
373 } | |
374 | |
375 val = (uint) V2P(p->pgdir) | 0x00; | |
376 | |
377 asm("MCR p15, 0, %[v], c2, c0, 0": :[v]"r" (val):); | |
378 flush_tlb(); | |
379 | |
380 popcli(); | |
381 | |
382 goto next(...); | |
383 } | |
227 | 384 |
385 __code init_inituvm_check_sz(struct vm_impl* vm_impl, pde_t* pgdir, char* init, uint sz, __code next(...)) { | |
386 char* mem; | |
387 | |
388 if (sz >= PTE_SZ) { | |
389 // goto panic; | |
390 // panic("inituvm: more than a page"); | |
391 } | |
392 | |
393 mem = alloc_page(); | |
394 memset(mem, 0, PTE_SZ); | |
395 mappages(pgdir, 0, PTE_SZ, v2p(mem), AP_KU); | |
396 memmove(mem, init, sz); | |
397 | |
398 goto next(...); | |
399 } | |
269 | 400 |