Mercurial > hg > Members > menikon > CbC_xv6
annotate src/impl/vm_impl_private.cbc @ 306:97c6157bac16
fix_goto_switchuvm_check_pgdirvm_impl
author | anatofuz |
---|---|
date | Wed, 05 Feb 2020 15:22:41 +0900 |
parents | 9fa2e66bc9ed |
children | 50fd5d414066 |
rev | line source |
---|---|
226 | 1 #include "param.h" |
2 #include "proc.h" | |
206 | 3 #include "mmu.h" |
222 | 4 #include "defs.h" |
206 | 5 #include "memlayout.h" |
200 | 6 #interface "vm_impl.h" |
7 | |
206 | 8 /* |
281
4d76280758db
move context.pm to util.pm ...
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
270
diff
changeset
|
9 vm_impl* createvm_impl2(); //:skip |
206 | 10 */ |
11 | |
304 | 12 __code loaduvm_ptesize_checkvm_impl(struct vm_impl* vm_impl,char* addr, __code next(int ret, ...)) { |
208 | 13 if ((uint) addr %PTE_SZ != 0) { |
206 | 14 // goto panic |
15 } | |
16 | |
219 | 17 goto loaduvm_loopvm_impl(vm_impl, next(ret, ...)); |
200 | 18 } |
19 | |
304 | 20 __code loaduvm_loopvm_impl(struct vm_impl* vm_impl, uint i, uint sz,__code next(int ret, ...)) { |
208 | 21 if (i < sz) { |
219 | 22 goto loaduvm_check_pgdir(vm_impl, next(ret, ...)); |
206 | 23 } |
208 | 24 |
219 | 25 goto loaduvm_exit(vm_impl, next(ret, ...)); |
200 | 26 } |
208 | 27 |
28 | |
206 | 29 static pte_t* walkpgdir (pde_t *pgdir, const void *va, int alloc) |
30 { | |
31 pde_t *pde; | |
32 pte_t *pgtab; | |
33 | |
34 // pgdir points to the page directory, get the page direcotry entry (pde) | |
35 pde = &pgdir[PDE_IDX(va)]; | |
36 | |
37 if (*pde & PE_TYPES) { | |
38 pgtab = (pte_t*) p2v(PT_ADDR(*pde)); | |
39 | |
40 } else { | |
41 if (!alloc || (pgtab = (pte_t*) kpt_alloc()) == 0) { | |
42 return 0; | |
43 } | |
44 | |
45 // Make sure all those PTE_P bits are zero. | |
46 memset(pgtab, 0, PT_SZ); | |
47 | |
48 // The permissions here are overly generous, but they can | |
49 // be further restricted by the permissions in the page table | |
50 // entries, if necessary. | |
51 *pde = v2p(pgtab) | UPDE_TYPE; | |
52 } | |
53 | |
54 return &pgtab[PTE_IDX(va)]; | |
55 } | |
56 | |
57 | |
304 | 58 __code loaduvm_check_pgdir(struct vm_impl* vm_impl, pte_t* pte, pde_t* pgdir, uint i, char* addr, uint pa, __code next(int ret, ...)) { |
208 | 59 if ((pte = walkpgdir(pgdir, addr + i, 0)) == 0) { |
206 | 60 // goto panic |
61 } | |
208 | 62 pa = PTE_ADDR(*pte); |
63 | |
64 vm_impl->pte = pte; | |
65 vm_impl->pgdir = pgdir; | |
66 vm_impl->addr = addr; | |
67 vm_impl->pa = pa; | |
68 | |
219 | 69 goto loaduvm_check_PTE_SZ(vm_impl, next(ret, ...)); |
206 | 70 } |
71 | |
304 | 72 __code loaduvm_check_PTE_SZ(struct vm_impl* vm_impl, uint sz, uint i, uint n, struct inode* ip, uint pa, uint offset, __code next(int ret, ...)) { |
208 | 73 |
74 if (sz - i < PTE_SZ) { | |
75 n = sz - i; | |
207 | 76 } else { |
208 | 77 n = PTE_SZ; |
207 | 78 } |
79 | |
208 | 80 if (readi(ip, p2v(pa), offset + i, n) != n) { |
219 | 81 ret = -1; |
82 goto next(ret, ...); | |
207 | 83 } |
84 | |
208 | 85 vm_impl->n = n; |
86 | |
219 | 87 goto loaduvm_loopvm_impl(vm_impl, next(ret, ...)); |
206 | 88 } |
89 | |
219 | 90 __code loaduvm_exit(struct vm_impl* vm_impl, __code next(int ret, ...)) { |
91 ret = 0; | |
92 goto next(ret, ...); | |
206 | 93 } |
211 | 94 |
212 | 95 struct run { |
96 struct run *next; | |
97 }; | |
98 | |
99 struct { | |
100 struct spinlock lock; | |
101 struct run* freelist; | |
102 } kpt_mem; | |
103 | |
215 | 104 |
105 static int mappages (pde_t *pgdir, void *va, uint size, uint pa, int ap) | |
106 { | |
107 char *a, *last; | |
108 pte_t *pte; | |
109 | |
110 a = (char*) align_dn(va, PTE_SZ); | |
111 last = (char*) align_dn((uint)va + size - 1, PTE_SZ); | |
112 | |
113 for (;;) { | |
114 if ((pte = walkpgdir(pgdir, a, 1)) == 0) { | |
115 return -1; | |
116 } | |
117 | |
118 if (*pte & PE_TYPES) { | |
119 panic("remap"); | |
120 } | |
121 | |
122 *pte = pa | ((ap & 0x3) << 4) | PE_CACHE | PE_BUF | PTE_TYPE; | |
123 | |
124 if (a == last) { | |
125 break; | |
126 } | |
127 | |
128 a += PTE_SZ; | |
129 pa += PTE_SZ; | |
130 } | |
131 | |
132 return 0; | |
133 } | |
134 | |
211 | 135 __code kpt_alloc_check_impl(struct vm_impl* vm_impl, __code next(...)) { |
212 | 136 struct run* r; |
137 if ((r = kpt_mem.freelist) != NULL ) { | |
138 kpt_mem.freelist = r->next; | |
139 } | |
213 | 140 release(&kpt_mem.lock); |
211 | 141 |
213 | 142 if ((r == NULL) && ((r = kmalloc (PT_ORDER)) == NULL)) { |
143 // panic("oom: kpt_alloc"); | |
144 // goto panic | |
145 } | |
146 | |
147 memset(r, 0, PT_SZ); | |
211 | 148 goto next((char*)r); |
149 } | |
214 | 150 |
151 __code allocuvm_check_newszvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, __code next(int ret, ...)){ | |
152 if (newsz >= UADDR_SZ) { | |
153 goto next(0, ...); | |
154 } | |
155 | |
156 if (newsz < oldsz) { | |
157 ret = newsz; | |
215 | 158 goto next(ret, ...); |
214 | 159 } |
160 | |
215 | 161 char* mem; |
214 | 162 uint a = align_up(oldsz, PTE_SZ); |
163 | |
215 | 164 goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, mem, a, next(ret, ...)); |
214 | 165 } |
166 | |
215 | 167 __code allocuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, char* mem, uint a, __code next(int ret, ...)){ |
214 | 168 |
169 if (a < newsz) { | |
215 | 170 mem = alloc_page(); |
171 | |
172 if (mem == 0) { | |
173 cprintf("allocuvm out of memory\n"); | |
174 deallocuvm(pgdir, newsz, oldsz); | |
216 | 175 goto next(0, ...); |
215 | 176 } |
177 | |
178 memset(mem, 0, PTE_SZ); | |
179 mappages(pgdir, (char*) a, PTE_SZ, v2p(mem), AP_KU); | |
180 | |
181 goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, a + PTE_SZ, next(ret, ...)); | |
214 | 182 } |
215 | 183 ret = newsz; |
184 goto next(ret, ...); | |
214 | 185 } |
216 | 186 |
187 __code clearpteu_check_ptevm_impl(struct vm_impl* vm_impl, pde_t* pgdir, char* uva, __code next(int ret, ...)) { | |
188 pte_t *pte; | |
189 | |
190 pte = walkpgdir(pgdir, uva, 0); | |
191 if (pte == 0) { | |
192 // panic("clearpteu"); | |
193 // goto panic; | |
194 } | |
195 | |
196 // in ARM, we change the AP field (ap & 0x3) << 4) | |
197 *pte = (*pte & ~(0x03 << 4)) | AP_KO << 4; | |
198 | |
199 goto next(ret, ...); | |
200 } | |
217 | 201 |
202 __code copyuvm_check_nullvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, __code next(int ret, ...)) { | |
203 pde_t *d; | |
204 pte_t *pte; | |
205 uint pa, i, ap; | |
206 char *mem; | |
207 | |
208 // allocate a new first level page directory | |
209 d = kpt_alloc(); | |
210 if (d == NULL ) { | |
211 ret = NULL; | |
212 goto next(ret, ...); | |
213 } | |
214 i = 0; | |
215 | |
216 goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, *d, *pte, pa, i, ap, *mem, next(ret, ...)); | |
217 } | |
218 | |
219 __code copyuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { | |
220 | |
221 if (i < sz) { | |
218 | 222 goto copyuvm_loop_check_walkpgdir(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); |
217 | 223 |
224 } | |
225 ret = d; | |
226 goto next(ret, ...); | |
227 } | |
228 | |
218 | 229 __code copyuvm_loop_check_walkpgdir(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { |
230 if ((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) { | |
231 // panic("copyuvm: pte should exist"); | |
232 // goto panic(); | |
233 } | |
234 goto copyuvm_loop_check_pte(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
235 } | |
236 | |
237 __code copyuvm_loop_check_pte(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { | |
238 | |
239 if (!(*pte & PE_TYPES)) { | |
240 // panic("copyuvm: page not present"); | |
241 // goto panic(); | |
242 } | |
243 | |
244 goto copyuvm_loop_check_mem(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
245 } | |
217 | 246 |
218 | 247 __code copyuvm_loop_check_mem(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { |
248 pa = PTE_ADDR (*pte); | |
249 ap = PTE_AP (*pte); | |
250 | |
251 if ((mem = alloc_page()) == 0) { | |
252 goto copyuvm_loop_bad(vm_impl, d, next(...)); | |
253 } | |
254 goto copyuvm_loop_check_mappages(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
255 | |
256 } | |
257 | |
258 __code copyuvm_loop_check_mappages(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { | |
259 | |
260 memmove(mem, (char*) p2v(pa), PTE_SZ); | |
261 | |
262 if (mappages(d, (void*) i, PTE_SZ, v2p(mem), ap) < 0) { | |
263 goto copyuvm_loop_bad(vm_impl, d, next(...)); | |
264 } | |
265 goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
266 | |
217 | 267 } |
268 | |
269 __code copyuvm_loop_bad(struct vm_impl* vm_impl, pde_t* d, __code next(int ret, ...)) { | |
270 freevm(d); | |
271 ret = 0; | |
272 goto next(ret, ...); | |
273 } | |
220 | 274 |
275 | |
276 __code uva2ka_check_pe_types(struct vm_impl* vm, pde_t* pgdir, char* uva, __code next(int ret, ...)) { | |
277 pte_t* pte; | |
278 | |
279 pte = walkpgdir(pgdir, uva, 0); | |
280 | |
281 // make sure it exists | |
282 if ((*pte & PE_TYPES) == 0) { | |
283 ret = 0; | |
284 goto next(ret, ...); | |
285 } | |
286 goto uva2ka_check_pte_ap(vm, pgdir, uva, pte, next(...)); | |
287 } | |
288 | |
289 __code uva2ka_check_pte_ap(struct vm_impl* vm, pde_t* pgdir, char* uva, pte_t* pte, __code next(int ret, ...)) { | |
221 | 290 // make sure it is a user page |
291 if (PTE_AP(*pte) != AP_KU) { | |
292 ret = 0; | |
293 goto next(ret, ...); | |
294 } | |
295 ret = (char*) p2v(PTE_ADDR(*pte)); | |
220 | 296 goto next(ret, ...); |
297 } | |
298 | |
222 | 299 // flush all TLB |
300 static void flush_tlb (void) | |
301 { | |
302 uint val = 0; | |
303 asm("MCR p15, 0, %[r], c8, c7, 0" : :[r]"r" (val):); | |
304 | |
305 // invalid entire data and instruction cache | |
306 asm ("MCR p15,0,%[r],c7,c10,0": :[r]"r" (val):); | |
307 asm ("MCR p15,0,%[r],c7,c11,0": :[r]"r" (val):); | |
308 } | |
309 | |
310 __code paging_intvmvm_impl(struct vm_impl* vm_impl, uint phy_low, uint phy_hi, __code next(...)) { | |
311 mappages (P2V(&_kernel_pgtbl), P2V(phy_low), phy_hi - phy_low, phy_low, AP_KU); | |
312 flush_tlb (); | |
313 | |
223 | 314 goto next(...); |
222 | 315 } |
316 | |
224 | 317 __code copyout_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, __code next(int ret, ...)) { |
318 if (len > 0) { | |
319 va0 = align_dn(va, PTE_SZ); | |
320 pa0 = uva2ka(pgdir, (char*) va0); | |
321 goto copyout_loop_check_pa0(vm_impl, pgdir, va, pp, len, va0, pa0, n, next(...)); | |
322 } | |
323 ret = 0; | |
324 goto next(ret, ...); | |
223 | 325 |
326 } | |
327 | |
224 | 328 __code copyout_loop_check_pa0(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, uint n, __code next(int ret, ...)) { |
329 if (pa0 == 0) { | |
330 ret = -1; | |
331 goto next(ret, ...); | |
332 } | |
333 goto copyout_loop_check_n(vm_impl, pgdir, va, pp, len, va0, pa0, n, buf, next(...)); | |
334 } | |
335 __code copyout_loop_check_n(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, uint n, char* buf, __code next(...)) { | |
336 n = PTE_SZ - (va - va0); | |
223 | 337 |
224 | 338 if (n > len) { |
339 n = len; | |
340 } | |
341 | |
342 len -= n; | |
343 buf += n; | |
344 va = va0 + PTE_SZ; | |
345 goto copyout_loopvm_impl(vm_impl, pgdir, va, pp, len, va0, pa0, next(...)); | |
346 } | |
347 | |
226 | 348 typedef struct proc proc_struct; |
349 __code switchuvm_check_pgdirvm_impl(struct vm_impl* vm_impl, proc_struct* p, __code next(...)) { //:skip | |
350 uint val; | |
351 | |
352 pushcli(); | |
353 | |
354 if (p->pgdir == 0) { | |
355 panic("switchuvm: no pgdir"); | |
356 } | |
357 | |
358 val = (uint) V2P(p->pgdir) | 0x00; | |
359 | |
360 asm("MCR p15, 0, %[v], c2, c0, 0": :[v]"r" (val):); | |
361 flush_tlb(); | |
362 | |
363 popcli(); | |
364 | |
365 goto next(...); | |
366 } | |
227 | 367 |
368 __code init_inituvm_check_sz(struct vm_impl* vm_impl, pde_t* pgdir, char* init, uint sz, __code next(...)) { | |
369 char* mem; | |
370 | |
371 if (sz >= PTE_SZ) { | |
372 // goto panic; | |
373 // panic("inituvm: more than a page"); | |
374 } | |
375 | |
376 mem = alloc_page(); | |
377 memset(mem, 0, PTE_SZ); | |
378 mappages(pgdir, 0, PTE_SZ, v2p(mem), AP_KU); | |
379 memmove(mem, init, sz); | |
380 | |
381 goto next(...); | |
382 } | |
269 | 383 |