Mercurial > hg > Members > menikon > CbC_xv6
annotate src/impl/vm_impl_private.cbc @ 304:9fa2e66bc9ed
comment at vm_impl private field
author | anatofuz |
---|---|
date | Wed, 05 Feb 2020 14:11:58 +0900 |
parents | 4d76280758db |
children | 97c6157bac16 |
rev | line source |
---|---|
226 | 1 #include "param.h" |
2 #include "proc.h" | |
206 | 3 #include "mmu.h" |
222 | 4 #include "defs.h" |
206 | 5 #include "memlayout.h" |
200 | 6 #interface "vm_impl.h" |
7 | |
206 | 8 /* |
281
4d76280758db
move context.pm to util.pm ...
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
270
diff
changeset
|
9 vm_impl* createvm_impl2(); //:skip |
206 | 10 */ |
11 | |
304 | 12 __code loaduvm_ptesize_checkvm_impl(struct vm_impl* vm_impl,char* addr, __code next(int ret, ...)) { |
13 //char* addr = vm_impl->addr; | |
206 | 14 |
208 | 15 if ((uint) addr %PTE_SZ != 0) { |
206 | 16 // goto panic |
17 } | |
18 | |
219 | 19 goto loaduvm_loopvm_impl(vm_impl, next(ret, ...)); |
200 | 20 } |
21 | |
304 | 22 __code loaduvm_loopvm_impl(struct vm_impl* vm_impl, uint i, uint sz,__code next(int ret, ...)) { |
23 /* | |
208 | 24 uint i = vm_impl->i; |
25 uint sz = vm_impl->sz; | |
304 | 26 */ |
206 | 27 |
208 | 28 if (i < sz) { |
219 | 29 goto loaduvm_check_pgdir(vm_impl, next(ret, ...)); |
206 | 30 } |
208 | 31 |
219 | 32 goto loaduvm_exit(vm_impl, next(ret, ...)); |
200 | 33 } |
208 | 34 |
35 | |
206 | 36 static pte_t* walkpgdir (pde_t *pgdir, const void *va, int alloc) |
37 { | |
38 pde_t *pde; | |
39 pte_t *pgtab; | |
40 | |
41 // pgdir points to the page directory, get the page direcotry entry (pde) | |
42 pde = &pgdir[PDE_IDX(va)]; | |
43 | |
44 if (*pde & PE_TYPES) { | |
45 pgtab = (pte_t*) p2v(PT_ADDR(*pde)); | |
46 | |
47 } else { | |
48 if (!alloc || (pgtab = (pte_t*) kpt_alloc()) == 0) { | |
49 return 0; | |
50 } | |
51 | |
52 // Make sure all those PTE_P bits are zero. | |
53 memset(pgtab, 0, PT_SZ); | |
54 | |
55 // The permissions here are overly generous, but they can | |
56 // be further restricted by the permissions in the page table | |
57 // entries, if necessary. | |
58 *pde = v2p(pgtab) | UPDE_TYPE; | |
59 } | |
60 | |
61 return &pgtab[PTE_IDX(va)]; | |
62 } | |
63 | |
64 | |
304 | 65 __code loaduvm_check_pgdir(struct vm_impl* vm_impl, pte_t* pte, pde_t* pgdir, uint i, char* addr, uint pa, __code next(int ret, ...)) { |
66 /* | |
208 | 67 pte_t* pte = vm_impl->pte; |
68 pde_t* pgdir = vm_impl->pgdir; | |
69 uint i = vm_impl->i; | |
70 char* addr = vm_impl->addr; | |
71 uint pa = vm_impl->pa; | |
304 | 72 */ |
208 | 73 |
74 if ((pte = walkpgdir(pgdir, addr + i, 0)) == 0) { | |
206 | 75 // goto panic |
76 } | |
208 | 77 pa = PTE_ADDR(*pte); |
78 | |
79 vm_impl->pte = pte; | |
80 vm_impl->pgdir = pgdir; | |
81 vm_impl->addr = addr; | |
82 vm_impl->pa = pa; | |
83 | |
219 | 84 goto loaduvm_check_PTE_SZ(vm_impl, next(ret, ...)); |
206 | 85 } |
86 | |
304 | 87 __code loaduvm_check_PTE_SZ(struct vm_impl* vm_impl, uint sz, uint i, uint n, struct inode* ip, uint pa, uint offset, __code next(int ret, ...)) { |
88 /* | |
208 | 89 uint sz = vm_impl->sz; |
90 uint i = vm_impl->i; | |
91 uint n = vm_impl->n; | |
92 struct inode* ip = vm_impl->ip; | |
93 uint pa = vm_impl->pa; | |
94 uint offset = vm_impl->offset; | |
304 | 95 */ |
208 | 96 |
97 if (sz - i < PTE_SZ) { | |
98 n = sz - i; | |
207 | 99 } else { |
208 | 100 n = PTE_SZ; |
207 | 101 } |
102 | |
208 | 103 if (readi(ip, p2v(pa), offset + i, n) != n) { |
219 | 104 ret = -1; |
105 goto next(ret, ...); | |
207 | 106 } |
107 | |
208 | 108 vm_impl->n = n; |
109 | |
219 | 110 goto loaduvm_loopvm_impl(vm_impl, next(ret, ...)); |
206 | 111 } |
112 | |
219 | 113 __code loaduvm_exit(struct vm_impl* vm_impl, __code next(int ret, ...)) { |
114 ret = 0; | |
115 goto next(ret, ...); | |
206 | 116 } |
211 | 117 |
212 | 118 struct run { |
119 struct run *next; | |
120 }; | |
121 | |
122 struct { | |
123 struct spinlock lock; | |
124 struct run* freelist; | |
125 } kpt_mem; | |
126 | |
215 | 127 |
128 static int mappages (pde_t *pgdir, void *va, uint size, uint pa, int ap) | |
129 { | |
130 char *a, *last; | |
131 pte_t *pte; | |
132 | |
133 a = (char*) align_dn(va, PTE_SZ); | |
134 last = (char*) align_dn((uint)va + size - 1, PTE_SZ); | |
135 | |
136 for (;;) { | |
137 if ((pte = walkpgdir(pgdir, a, 1)) == 0) { | |
138 return -1; | |
139 } | |
140 | |
141 if (*pte & PE_TYPES) { | |
142 panic("remap"); | |
143 } | |
144 | |
145 *pte = pa | ((ap & 0x3) << 4) | PE_CACHE | PE_BUF | PTE_TYPE; | |
146 | |
147 if (a == last) { | |
148 break; | |
149 } | |
150 | |
151 a += PTE_SZ; | |
152 pa += PTE_SZ; | |
153 } | |
154 | |
155 return 0; | |
156 } | |
157 | |
211 | 158 __code kpt_alloc_check_impl(struct vm_impl* vm_impl, __code next(...)) { |
212 | 159 struct run* r; |
160 if ((r = kpt_mem.freelist) != NULL ) { | |
161 kpt_mem.freelist = r->next; | |
162 } | |
213 | 163 release(&kpt_mem.lock); |
211 | 164 |
213 | 165 if ((r == NULL) && ((r = kmalloc (PT_ORDER)) == NULL)) { |
166 // panic("oom: kpt_alloc"); | |
167 // goto panic | |
168 } | |
169 | |
170 memset(r, 0, PT_SZ); | |
211 | 171 goto next((char*)r); |
172 } | |
214 | 173 |
174 __code allocuvm_check_newszvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, __code next(int ret, ...)){ | |
175 if (newsz >= UADDR_SZ) { | |
176 goto next(0, ...); | |
177 } | |
178 | |
179 if (newsz < oldsz) { | |
180 ret = newsz; | |
215 | 181 goto next(ret, ...); |
214 | 182 } |
183 | |
215 | 184 char* mem; |
214 | 185 uint a = align_up(oldsz, PTE_SZ); |
186 | |
215 | 187 goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, mem, a, next(ret, ...)); |
214 | 188 } |
189 | |
215 | 190 __code allocuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, char* mem, uint a, __code next(int ret, ...)){ |
214 | 191 |
192 if (a < newsz) { | |
215 | 193 mem = alloc_page(); |
194 | |
195 if (mem == 0) { | |
196 cprintf("allocuvm out of memory\n"); | |
197 deallocuvm(pgdir, newsz, oldsz); | |
216 | 198 goto next(0, ...); |
215 | 199 } |
200 | |
201 memset(mem, 0, PTE_SZ); | |
202 mappages(pgdir, (char*) a, PTE_SZ, v2p(mem), AP_KU); | |
203 | |
204 goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, a + PTE_SZ, next(ret, ...)); | |
214 | 205 } |
215 | 206 ret = newsz; |
207 goto next(ret, ...); | |
214 | 208 } |
216 | 209 |
210 __code clearpteu_check_ptevm_impl(struct vm_impl* vm_impl, pde_t* pgdir, char* uva, __code next(int ret, ...)) { | |
211 pte_t *pte; | |
212 | |
213 pte = walkpgdir(pgdir, uva, 0); | |
214 if (pte == 0) { | |
215 // panic("clearpteu"); | |
216 // goto panic; | |
217 } | |
218 | |
219 // in ARM, we change the AP field (ap & 0x3) << 4) | |
220 *pte = (*pte & ~(0x03 << 4)) | AP_KO << 4; | |
221 | |
222 goto next(ret, ...); | |
223 } | |
217 | 224 |
225 __code copyuvm_check_nullvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, __code next(int ret, ...)) { | |
226 pde_t *d; | |
227 pte_t *pte; | |
228 uint pa, i, ap; | |
229 char *mem; | |
230 | |
231 // allocate a new first level page directory | |
232 d = kpt_alloc(); | |
233 if (d == NULL ) { | |
234 ret = NULL; | |
235 goto next(ret, ...); | |
236 } | |
237 i = 0; | |
238 | |
239 goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, *d, *pte, pa, i, ap, *mem, next(ret, ...)); | |
240 } | |
241 | |
242 __code copyuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { | |
243 | |
244 if (i < sz) { | |
218 | 245 goto copyuvm_loop_check_walkpgdir(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); |
217 | 246 |
247 } | |
248 ret = d; | |
249 goto next(ret, ...); | |
250 } | |
251 | |
218 | 252 __code copyuvm_loop_check_walkpgdir(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { |
253 if ((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) { | |
254 // panic("copyuvm: pte should exist"); | |
255 // goto panic(); | |
256 } | |
257 goto copyuvm_loop_check_pte(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
258 } | |
259 | |
260 __code copyuvm_loop_check_pte(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { | |
261 | |
262 if (!(*pte & PE_TYPES)) { | |
263 // panic("copyuvm: page not present"); | |
264 // goto panic(); | |
265 } | |
266 | |
267 goto copyuvm_loop_check_mem(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
268 } | |
217 | 269 |
218 | 270 __code copyuvm_loop_check_mem(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { |
271 pa = PTE_ADDR (*pte); | |
272 ap = PTE_AP (*pte); | |
273 | |
274 if ((mem = alloc_page()) == 0) { | |
275 goto copyuvm_loop_bad(vm_impl, d, next(...)); | |
276 } | |
277 goto copyuvm_loop_check_mappages(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
278 | |
279 } | |
280 | |
281 __code copyuvm_loop_check_mappages(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { | |
282 | |
283 memmove(mem, (char*) p2v(pa), PTE_SZ); | |
284 | |
285 if (mappages(d, (void*) i, PTE_SZ, v2p(mem), ap) < 0) { | |
286 goto copyuvm_loop_bad(vm_impl, d, next(...)); | |
287 } | |
288 goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
289 | |
217 | 290 } |
291 | |
292 __code copyuvm_loop_bad(struct vm_impl* vm_impl, pde_t* d, __code next(int ret, ...)) { | |
293 freevm(d); | |
294 ret = 0; | |
295 goto next(ret, ...); | |
296 } | |
220 | 297 |
298 | |
299 __code uva2ka_check_pe_types(struct vm_impl* vm, pde_t* pgdir, char* uva, __code next(int ret, ...)) { | |
300 pte_t* pte; | |
301 | |
302 pte = walkpgdir(pgdir, uva, 0); | |
303 | |
304 // make sure it exists | |
305 if ((*pte & PE_TYPES) == 0) { | |
306 ret = 0; | |
307 goto next(ret, ...); | |
308 } | |
309 goto uva2ka_check_pte_ap(vm, pgdir, uva, pte, next(...)); | |
310 } | |
311 | |
312 __code uva2ka_check_pte_ap(struct vm_impl* vm, pde_t* pgdir, char* uva, pte_t* pte, __code next(int ret, ...)) { | |
221 | 313 // make sure it is a user page |
314 if (PTE_AP(*pte) != AP_KU) { | |
315 ret = 0; | |
316 goto next(ret, ...); | |
317 } | |
318 ret = (char*) p2v(PTE_ADDR(*pte)); | |
220 | 319 goto next(ret, ...); |
320 } | |
321 | |
222 | 322 // flush all TLB |
323 static void flush_tlb (void) | |
324 { | |
325 uint val = 0; | |
326 asm("MCR p15, 0, %[r], c8, c7, 0" : :[r]"r" (val):); | |
327 | |
328 // invalid entire data and instruction cache | |
329 asm ("MCR p15,0,%[r],c7,c10,0": :[r]"r" (val):); | |
330 asm ("MCR p15,0,%[r],c7,c11,0": :[r]"r" (val):); | |
331 } | |
332 | |
333 __code paging_intvmvm_impl(struct vm_impl* vm_impl, uint phy_low, uint phy_hi, __code next(...)) { | |
334 mappages (P2V(&_kernel_pgtbl), P2V(phy_low), phy_hi - phy_low, phy_low, AP_KU); | |
335 flush_tlb (); | |
336 | |
223 | 337 goto next(...); |
222 | 338 } |
339 | |
224 | 340 __code copyout_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, __code next(int ret, ...)) { |
341 if (len > 0) { | |
342 va0 = align_dn(va, PTE_SZ); | |
343 pa0 = uva2ka(pgdir, (char*) va0); | |
344 goto copyout_loop_check_pa0(vm_impl, pgdir, va, pp, len, va0, pa0, n, next(...)); | |
345 } | |
346 ret = 0; | |
347 goto next(ret, ...); | |
223 | 348 |
349 } | |
350 | |
224 | 351 __code copyout_loop_check_pa0(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, uint n, __code next(int ret, ...)) { |
352 if (pa0 == 0) { | |
353 ret = -1; | |
354 goto next(ret, ...); | |
355 } | |
356 goto copyout_loop_check_n(vm_impl, pgdir, va, pp, len, va0, pa0, n, buf, next(...)); | |
357 } | |
358 __code copyout_loop_check_n(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, uint n, char* buf, __code next(...)) { | |
359 n = PTE_SZ - (va - va0); | |
223 | 360 |
224 | 361 if (n > len) { |
362 n = len; | |
363 } | |
364 | |
365 len -= n; | |
366 buf += n; | |
367 va = va0 + PTE_SZ; | |
368 goto copyout_loopvm_impl(vm_impl, pgdir, va, pp, len, va0, pa0, next(...)); | |
369 } | |
370 | |
226 | 371 typedef struct proc proc_struct; |
372 __code switchuvm_check_pgdirvm_impl(struct vm_impl* vm_impl, proc_struct* p, __code next(...)) { //:skip | |
373 uint val; | |
374 | |
375 pushcli(); | |
376 | |
377 if (p->pgdir == 0) { | |
378 panic("switchuvm: no pgdir"); | |
379 } | |
380 | |
381 val = (uint) V2P(p->pgdir) | 0x00; | |
382 | |
383 asm("MCR p15, 0, %[v], c2, c0, 0": :[v]"r" (val):); | |
384 flush_tlb(); | |
385 | |
386 popcli(); | |
387 | |
388 goto next(...); | |
389 } | |
227 | 390 |
391 __code init_inituvm_check_sz(struct vm_impl* vm_impl, pde_t* pgdir, char* init, uint sz, __code next(...)) { | |
392 char* mem; | |
393 | |
394 if (sz >= PTE_SZ) { | |
395 // goto panic; | |
396 // panic("inituvm: more than a page"); | |
397 } | |
398 | |
399 mem = alloc_page(); | |
400 memset(mem, 0, PTE_SZ); | |
401 mappages(pgdir, 0, PTE_SZ, v2p(mem), AP_KU); | |
402 memmove(mem, init, sz); | |
403 | |
404 goto next(...); | |
405 } | |
269 | 406 |