Mercurial > hg > Members > menikon > CbC_xv6
annotate src/impl/vm_impl_private.cbc @ 323:f0b337cb6024 default tip
use goto err->panic
author | anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Sat, 08 Feb 2020 20:37:42 +0900 |
parents | 173753022721 |
children |
rev | line source |
---|---|
226 | 1 #include "param.h" |
2 #include "proc.h" | |
206 | 3 #include "mmu.h" |
222 | 4 #include "defs.h" |
206 | 5 #include "memlayout.h" |
200 | 6 #interface "vm_impl.h" |
316 | 7 #interface "Err.h" |
200 | 8 |
206 | 9 /* |
281
4d76280758db
move context.pm to util.pm ...
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
270
diff
changeset
|
10 vm_impl* createvm_impl2(); //:skip |
206 | 11 */ |
12 | |
304 | 13 __code loaduvm_ptesize_checkvm_impl(struct vm_impl* vm_impl,char* addr, __code next(int ret, ...)) { |
208 | 14 if ((uint) addr %PTE_SZ != 0) { |
317 | 15 char* msg = "addr % PTE_SZ != 0"; |
16 struct Err* err = createKernelError(&proc->cbc_context); | |
17 Gearef(cbc_context, Err)->msg = msg; | |
318 | 18 goto meta(cbc_context, err->panic); |
206 | 19 } |
20 | |
219 | 21 goto loaduvm_loopvm_impl(vm_impl, next(ret, ...)); |
200 | 22 } |
23 | |
304 | 24 __code loaduvm_loopvm_impl(struct vm_impl* vm_impl, uint i, uint sz,__code next(int ret, ...)) { |
208 | 25 if (i < sz) { |
219 | 26 goto loaduvm_check_pgdir(vm_impl, next(ret, ...)); |
206 | 27 } |
208 | 28 |
219 | 29 goto loaduvm_exit(vm_impl, next(ret, ...)); |
200 | 30 } |
208 | 31 |
32 | |
206 | 33 static pte_t* walkpgdir (pde_t *pgdir, const void *va, int alloc) |
34 { | |
35 pde_t *pde; | |
36 pte_t *pgtab; | |
37 | |
38 // pgdir points to the page directory, get the page direcotry entry (pde) | |
39 pde = &pgdir[PDE_IDX(va)]; | |
40 | |
41 if (*pde & PE_TYPES) { | |
42 pgtab = (pte_t*) p2v(PT_ADDR(*pde)); | |
43 | |
44 } else { | |
45 if (!alloc || (pgtab = (pte_t*) kpt_alloc()) == 0) { | |
46 return 0; | |
47 } | |
48 | |
49 // Make sure all those PTE_P bits are zero. | |
50 memset(pgtab, 0, PT_SZ); | |
51 | |
52 // The permissions here are overly generous, but they can | |
53 // be further restricted by the permissions in the page table | |
54 // entries, if necessary. | |
55 *pde = v2p(pgtab) | UPDE_TYPE; | |
56 } | |
57 | |
58 return &pgtab[PTE_IDX(va)]; | |
59 } | |
60 | |
61 | |
304 | 62 __code loaduvm_check_pgdir(struct vm_impl* vm_impl, pte_t* pte, pde_t* pgdir, uint i, char* addr, uint pa, __code next(int ret, ...)) { |
208 | 63 if ((pte = walkpgdir(pgdir, addr + i, 0)) == 0) { |
317 | 64 char* msg = "pte != walkpgdir..."; |
65 struct Err* err = createKernelError(&proc->cbc_context); | |
66 Gearef(cbc_context, Err)->msg = msg; | |
318 | 67 goto meta(cbc_context, err->panic); |
206 | 68 } |
208 | 69 pa = PTE_ADDR(*pte); |
70 | |
308
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
306
diff
changeset
|
71 Gearef(cbc_context, vm_impl)->pte = pte; |
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
306
diff
changeset
|
72 Gearef(cbc_context, vm_impl)->pgdir = pgdir; |
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
306
diff
changeset
|
73 Gearef(cbc_context, vm_impl)->addr = addr; |
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
306
diff
changeset
|
74 Gearef(cbc_context, vm_impl)->pa = pa; |
208 | 75 |
219 | 76 goto loaduvm_check_PTE_SZ(vm_impl, next(ret, ...)); |
206 | 77 } |
78 | |
304 | 79 __code loaduvm_check_PTE_SZ(struct vm_impl* vm_impl, uint sz, uint i, uint n, struct inode* ip, uint pa, uint offset, __code next(int ret, ...)) { |
208 | 80 |
81 if (sz - i < PTE_SZ) { | |
82 n = sz - i; | |
207 | 83 } else { |
208 | 84 n = PTE_SZ; |
207 | 85 } |
86 | |
208 | 87 if (readi(ip, p2v(pa), offset + i, n) != n) { |
219 | 88 ret = -1; |
89 goto next(ret, ...); | |
207 | 90 } |
91 | |
308
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
306
diff
changeset
|
92 Gearef(cbc_context, vm_impl)->n = n; |
208 | 93 |
219 | 94 goto loaduvm_loopvm_impl(vm_impl, next(ret, ...)); |
206 | 95 } |
96 | |
219 | 97 __code loaduvm_exit(struct vm_impl* vm_impl, __code next(int ret, ...)) { |
98 ret = 0; | |
99 goto next(ret, ...); | |
206 | 100 } |
211 | 101 |
212 | 102 struct run { |
103 struct run *next; | |
104 }; | |
105 | |
106 struct { | |
107 struct spinlock lock; | |
108 struct run* freelist; | |
109 } kpt_mem; | |
110 | |
215 | 111 |
112 static int mappages (pde_t *pgdir, void *va, uint size, uint pa, int ap) | |
113 { | |
114 char *a, *last; | |
115 pte_t *pte; | |
116 | |
117 a = (char*) align_dn(va, PTE_SZ); | |
118 last = (char*) align_dn((uint)va + size - 1, PTE_SZ); | |
119 | |
120 for (;;) { | |
121 if ((pte = walkpgdir(pgdir, a, 1)) == 0) { | |
122 return -1; | |
123 } | |
124 | |
125 if (*pte & PE_TYPES) { | |
126 panic("remap"); | |
127 } | |
128 | |
129 *pte = pa | ((ap & 0x3) << 4) | PE_CACHE | PE_BUF | PTE_TYPE; | |
130 | |
131 if (a == last) { | |
132 break; | |
133 } | |
134 | |
135 a += PTE_SZ; | |
136 pa += PTE_SZ; | |
137 } | |
138 | |
139 return 0; | |
140 } | |
141 | |
211 | 142 __code kpt_alloc_check_impl(struct vm_impl* vm_impl, __code next(...)) { |
212 | 143 struct run* r; |
144 if ((r = kpt_mem.freelist) != NULL ) { | |
145 kpt_mem.freelist = r->next; | |
146 } | |
213 | 147 release(&kpt_mem.lock); |
211 | 148 |
213 | 149 if ((r == NULL) && ((r = kmalloc (PT_ORDER)) == NULL)) { |
316 | 150 char* msg = "oom: kpt_alloc"; |
151 struct Err* err = createKernelError(&proc->cbc_context); | |
152 Gearef(cbc_context, Err)->msg = msg; | |
318 | 153 goto meta(cbc_context, err->panic); |
213 | 154 } |
155 | |
156 memset(r, 0, PT_SZ); | |
211 | 157 goto next((char*)r); |
158 } | |
214 | 159 |
160 __code allocuvm_check_newszvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, __code next(int ret, ...)){ | |
161 if (newsz >= UADDR_SZ) { | |
162 goto next(0, ...); | |
163 } | |
164 | |
165 if (newsz < oldsz) { | |
166 ret = newsz; | |
215 | 167 goto next(ret, ...); |
214 | 168 } |
169 | |
215 | 170 char* mem; |
214 | 171 uint a = align_up(oldsz, PTE_SZ); |
172 | |
215 | 173 goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, mem, a, next(ret, ...)); |
214 | 174 } |
175 | |
215 | 176 __code allocuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, char* mem, uint a, __code next(int ret, ...)){ |
214 | 177 |
178 if (a < newsz) { | |
215 | 179 mem = alloc_page(); |
180 | |
181 if (mem == 0) { | |
182 cprintf("allocuvm out of memory\n"); | |
183 deallocuvm(pgdir, newsz, oldsz); | |
216 | 184 goto next(0, ...); |
215 | 185 } |
186 | |
187 memset(mem, 0, PTE_SZ); | |
188 mappages(pgdir, (char*) a, PTE_SZ, v2p(mem), AP_KU); | |
189 | |
190 goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, a + PTE_SZ, next(ret, ...)); | |
214 | 191 } |
215 | 192 ret = newsz; |
193 goto next(ret, ...); | |
214 | 194 } |
216 | 195 |
196 __code clearpteu_check_ptevm_impl(struct vm_impl* vm_impl, pde_t* pgdir, char* uva, __code next(int ret, ...)) { | |
197 pte_t *pte; | |
198 | |
199 pte = walkpgdir(pgdir, uva, 0); | |
200 if (pte == 0) { | |
317 | 201 char* msg = "clearpteu"; |
202 struct Err* err = createKernelError(&proc->cbc_context); | |
203 Gearef(cbc_context, Err)->msg = msg; | |
318 | 204 goto meta(cbc_context, err->panic); |
216 | 205 } |
206 | |
207 // in ARM, we change the AP field (ap & 0x3) << 4) | |
208 *pte = (*pte & ~(0x03 << 4)) | AP_KO << 4; | |
209 | |
210 goto next(ret, ...); | |
211 } | |
217 | 212 |
213 __code copyuvm_check_nullvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, __code next(int ret, ...)) { | |
214 pde_t *d; | |
215 pte_t *pte; | |
216 uint pa, i, ap; | |
217 char *mem; | |
218 | |
219 // allocate a new first level page directory | |
220 d = kpt_alloc(); | |
221 if (d == NULL ) { | |
222 ret = NULL; | |
223 goto next(ret, ...); | |
224 } | |
225 i = 0; | |
226 | |
227 goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, *d, *pte, pa, i, ap, *mem, next(ret, ...)); | |
228 } | |
229 | |
230 __code copyuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { | |
231 | |
232 if (i < sz) { | |
218 | 233 goto copyuvm_loop_check_walkpgdir(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); |
217 | 234 |
235 } | |
236 ret = d; | |
237 goto next(ret, ...); | |
238 } | |
239 | |
218 | 240 __code copyuvm_loop_check_walkpgdir(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { |
241 if ((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) { | |
317 | 242 char* msg = "copyuvm: pte should exist"; |
243 struct Err* err = createKernelError(&proc->cbc_context); | |
244 Gearef(cbc_context, Err)->msg = msg; | |
318 | 245 goto meta(cbc_context, err->panic); |
218 | 246 } |
247 goto copyuvm_loop_check_pte(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
248 } | |
249 | |
250 __code copyuvm_loop_check_pte(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { | |
251 | |
252 if (!(*pte & PE_TYPES)) { | |
317 | 253 char* msg = "copyuvm: page not present"; |
254 struct Err* err = createKernelError(&proc->cbc_context); | |
255 Gearef(cbc_context, Err)->msg = msg; | |
318 | 256 goto meta(cbc_context, err->panic); |
218 | 257 } |
258 | |
259 goto copyuvm_loop_check_mem(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
260 } | |
217 | 261 |
218 | 262 __code copyuvm_loop_check_mem(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { |
263 pa = PTE_ADDR (*pte); | |
264 ap = PTE_AP (*pte); | |
265 | |
266 if ((mem = alloc_page()) == 0) { | |
267 goto copyuvm_loop_bad(vm_impl, d, next(...)); | |
268 } | |
269 goto copyuvm_loop_check_mappages(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
270 | |
271 } | |
272 | |
273 __code copyuvm_loop_check_mappages(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { | |
274 | |
275 memmove(mem, (char*) p2v(pa), PTE_SZ); | |
276 | |
277 if (mappages(d, (void*) i, PTE_SZ, v2p(mem), ap) < 0) { | |
278 goto copyuvm_loop_bad(vm_impl, d, next(...)); | |
279 } | |
280 goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
281 | |
217 | 282 } |
283 | |
284 __code copyuvm_loop_bad(struct vm_impl* vm_impl, pde_t* d, __code next(int ret, ...)) { | |
285 freevm(d); | |
286 ret = 0; | |
287 goto next(ret, ...); | |
288 } | |
220 | 289 |
290 | |
291 __code uva2ka_check_pe_types(struct vm_impl* vm, pde_t* pgdir, char* uva, __code next(int ret, ...)) { | |
292 pte_t* pte; | |
293 | |
294 pte = walkpgdir(pgdir, uva, 0); | |
295 | |
296 // make sure it exists | |
297 if ((*pte & PE_TYPES) == 0) { | |
298 ret = 0; | |
299 goto next(ret, ...); | |
300 } | |
301 goto uva2ka_check_pte_ap(vm, pgdir, uva, pte, next(...)); | |
302 } | |
303 | |
304 __code uva2ka_check_pte_ap(struct vm_impl* vm, pde_t* pgdir, char* uva, pte_t* pte, __code next(int ret, ...)) { | |
221 | 305 // make sure it is a user page |
306 if (PTE_AP(*pte) != AP_KU) { | |
307 ret = 0; | |
308 goto next(ret, ...); | |
309 } | |
310 ret = (char*) p2v(PTE_ADDR(*pte)); | |
220 | 311 goto next(ret, ...); |
312 } | |
313 | |
222 | 314 // flush all TLB |
315 static void flush_tlb (void) | |
316 { | |
317 uint val = 0; | |
318 asm("MCR p15, 0, %[r], c8, c7, 0" : :[r]"r" (val):); | |
319 | |
320 // invalid entire data and instruction cache | |
321 asm ("MCR p15,0,%[r],c7,c10,0": :[r]"r" (val):); | |
322 asm ("MCR p15,0,%[r],c7,c11,0": :[r]"r" (val):); | |
323 } | |
324 | |
325 __code paging_intvmvm_impl(struct vm_impl* vm_impl, uint phy_low, uint phy_hi, __code next(...)) { | |
326 mappages (P2V(&_kernel_pgtbl), P2V(phy_low), phy_hi - phy_low, phy_low, AP_KU); | |
327 flush_tlb (); | |
328 | |
223 | 329 goto next(...); |
222 | 330 } |
331 | |
224 | 332 __code copyout_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, __code next(int ret, ...)) { |
333 if (len > 0) { | |
334 va0 = align_dn(va, PTE_SZ); | |
335 pa0 = uva2ka(pgdir, (char*) va0); | |
336 goto copyout_loop_check_pa0(vm_impl, pgdir, va, pp, len, va0, pa0, n, next(...)); | |
337 } | |
338 ret = 0; | |
339 goto next(ret, ...); | |
223 | 340 |
341 } | |
342 | |
224 | 343 __code copyout_loop_check_pa0(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, uint n, __code next(int ret, ...)) { |
344 if (pa0 == 0) { | |
345 ret = -1; | |
346 goto next(ret, ...); | |
347 } | |
348 goto copyout_loop_check_n(vm_impl, pgdir, va, pp, len, va0, pa0, n, buf, next(...)); | |
349 } | |
350 __code copyout_loop_check_n(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, uint n, char* buf, __code next(...)) { | |
351 n = PTE_SZ - (va - va0); | |
223 | 352 |
224 | 353 if (n > len) { |
354 n = len; | |
355 } | |
356 | |
357 len -= n; | |
358 buf += n; | |
359 va = va0 + PTE_SZ; | |
308
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
306
diff
changeset
|
360 Gearef(cbc_context, vm_impl)->n = n; |
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
306
diff
changeset
|
361 Gearef(cbc_context, vm_impl)->len = len; |
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
306
diff
changeset
|
362 Gearef(cbc_context, vm_impl)->buf = buf; |
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
306
diff
changeset
|
363 Gearef(cbc_context, vm_impl)->va = va; |
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
306
diff
changeset
|
364 |
224 | 365 goto copyout_loopvm_impl(vm_impl, pgdir, va, pp, len, va0, pa0, next(...)); |
366 } | |
367 | |
226 | 368 typedef struct proc proc_struct; |
369 __code switchuvm_check_pgdirvm_impl(struct vm_impl* vm_impl, proc_struct* p, __code next(...)) { //:skip | |
370 uint val; | |
371 | |
372 pushcli(); | |
373 | |
374 if (p->pgdir == 0) { | |
317 | 375 char* msg = "switchuvm: no pgdir"; |
376 struct Err* err = createKernelError(&proc->cbc_context); | |
377 Gearef(cbc_context, Err)->msg = msg; | |
318 | 378 goto meta(cbc_context, err->panic); |
226 | 379 } |
380 | |
381 val = (uint) V2P(p->pgdir) | 0x00; | |
382 | |
383 asm("MCR p15, 0, %[v], c2, c0, 0": :[v]"r" (val):); | |
384 flush_tlb(); | |
385 | |
386 popcli(); | |
387 | |
388 goto next(...); | |
389 } | |
227 | 390 |
391 __code init_inituvm_check_sz(struct vm_impl* vm_impl, pde_t* pgdir, char* init, uint sz, __code next(...)) { | |
392 char* mem; | |
393 | |
394 if (sz >= PTE_SZ) { | |
317 | 395 char* msg = "inituvm: more than a page"; |
396 struct Err* err = createKernelError(&proc->cbc_context); | |
397 Gearef(cbc_context, Err)->msg = msg; | |
318 | 398 goto meta(cbc_context, err->panic); |
227 | 399 } |
400 | |
401 mem = alloc_page(); | |
402 memset(mem, 0, PTE_SZ); | |
403 mappages(pgdir, 0, PTE_SZ, v2p(mem), AP_KU); | |
404 memmove(mem, init, sz); | |
405 | |
406 goto next(...); | |
407 } | |
269 | 408 |