Mercurial > hg > CbC > CbC_xv6
annotate src/impl/vm_impl_private.cbc @ 395:17e8a4bc06a7 default tip
add macOS AR/RANLIB
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Mon, 14 Dec 2020 21:59:50 +0900 |
parents | 2cbaa4c74d15 |
children |
rev | line source |
---|---|
217 | 1 #include "param.h" |
2 #include "proc.h" | |
198 | 3 #include "mmu.h" |
213 | 4 #include "defs.h" |
198 | 5 #include "memlayout.h" |
193 | 6 #interface "vm_impl.h" |
273 | 7 #interface "Err.h" |
193 | 8 |
198 | 9 /* |
338 | 10 vm_impl* createvm_impl2(); |
198 | 11 */ |
12 | |
261 | 13 __code loaduvm_ptesize_checkvm_impl(struct vm_impl* vm_impl,char* addr, __code next(int ret, ...)) { |
200 | 14 if ((uint) addr %PTE_SZ != 0) { |
274 | 15 char* msg = "addr % PTE_SZ != 0"; |
16 struct Err* err = createKernelError(&proc->cbc_context); | |
17 Gearef(cbc_context, Err)->msg = msg; | |
275 | 18 goto meta(cbc_context, err->panic); |
198 | 19 } |
20 | |
210 | 21 goto loaduvm_loopvm_impl(vm_impl, next(ret, ...)); |
193 | 22 } |
23 | |
261 | 24 __code loaduvm_loopvm_impl(struct vm_impl* vm_impl, uint i, uint sz,__code next(int ret, ...)) { |
200 | 25 if (i < sz) { |
210 | 26 goto loaduvm_check_pgdir(vm_impl, next(ret, ...)); |
198 | 27 } |
200 | 28 |
210 | 29 goto loaduvm_exit(vm_impl, next(ret, ...)); |
193 | 30 } |
200 | 31 |
32 | |
198 | 33 static pte_t* walkpgdir (pde_t *pgdir, const void *va, int alloc) |
34 { | |
35 pde_t *pde; | |
36 pte_t *pgtab; | |
37 | |
38 // pgdir points to the page directory, get the page direcotry entry (pde) | |
39 pde = &pgdir[PDE_IDX(va)]; | |
40 | |
41 if (*pde & PE_TYPES) { | |
42 pgtab = (pte_t*) p2v(PT_ADDR(*pde)); | |
43 | |
44 } else { | |
45 if (!alloc || (pgtab = (pte_t*) kpt_alloc()) == 0) { | |
46 return 0; | |
47 } | |
48 | |
49 // Make sure all those PTE_P bits are zero. | |
50 memset(pgtab, 0, PT_SZ); | |
51 | |
52 // The permissions here are overly generous, but they can | |
53 // be further restricted by the permissions in the page table | |
54 // entries, if necessary. | |
55 *pde = v2p(pgtab) | UPDE_TYPE; | |
56 } | |
57 | |
58 return &pgtab[PTE_IDX(va)]; | |
59 } | |
60 | |
61 | |
261 | 62 __code loaduvm_check_pgdir(struct vm_impl* vm_impl, pte_t* pte, pde_t* pgdir, uint i, char* addr, uint pa, __code next(int ret, ...)) { |
200 | 63 if ((pte = walkpgdir(pgdir, addr + i, 0)) == 0) { |
274 | 64 char* msg = "pte != walkpgdir..."; |
65 struct Err* err = createKernelError(&proc->cbc_context); | |
66 Gearef(cbc_context, Err)->msg = msg; | |
275 | 67 goto meta(cbc_context, err->panic); |
198 | 68 } |
200 | 69 pa = PTE_ADDR(*pte); |
70 | |
265
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
263
diff
changeset
|
71 Gearef(cbc_context, vm_impl)->pte = pte; |
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
263
diff
changeset
|
72 Gearef(cbc_context, vm_impl)->pgdir = pgdir; |
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
263
diff
changeset
|
73 Gearef(cbc_context, vm_impl)->addr = addr; |
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
263
diff
changeset
|
74 Gearef(cbc_context, vm_impl)->pa = pa; |
200 | 75 |
210 | 76 goto loaduvm_check_PTE_SZ(vm_impl, next(ret, ...)); |
198 | 77 } |
78 | |
337 | 79 typedef struct inode inode; |
261 | 80 __code loaduvm_check_PTE_SZ(struct vm_impl* vm_impl, uint sz, uint i, uint n, struct inode* ip, uint pa, uint offset, __code next(int ret, ...)) { |
200 | 81 |
82 if (sz - i < PTE_SZ) { | |
83 n = sz - i; | |
199 | 84 } else { |
200 | 85 n = PTE_SZ; |
199 | 86 } |
87 | |
200 | 88 if (readi(ip, p2v(pa), offset + i, n) != n) { |
210 | 89 ret = -1; |
90 goto next(ret, ...); | |
199 | 91 } |
92 | |
265
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
263
diff
changeset
|
93 Gearef(cbc_context, vm_impl)->n = n; |
200 | 94 |
210 | 95 goto loaduvm_loopvm_impl(vm_impl, next(ret, ...)); |
198 | 96 } |
97 | |
210 | 98 __code loaduvm_exit(struct vm_impl* vm_impl, __code next(int ret, ...)) { |
99 ret = 0; | |
100 goto next(ret, ...); | |
198 | 101 } |
202 | 102 |
203 | 103 struct run { |
104 struct run *next; | |
105 }; | |
106 | |
107 struct { | |
108 struct spinlock lock; | |
109 struct run* freelist; | |
110 } kpt_mem; | |
111 | |
206 | 112 |
113 static int mappages (pde_t *pgdir, void *va, uint size, uint pa, int ap) | |
114 { | |
115 char *a, *last; | |
116 pte_t *pte; | |
117 | |
118 a = (char*) align_dn(va, PTE_SZ); | |
119 last = (char*) align_dn((uint)va + size - 1, PTE_SZ); | |
120 | |
121 for (;;) { | |
122 if ((pte = walkpgdir(pgdir, a, 1)) == 0) { | |
123 return -1; | |
124 } | |
125 | |
126 if (*pte & PE_TYPES) { | |
127 panic("remap"); | |
128 } | |
129 | |
130 *pte = pa | ((ap & 0x3) << 4) | PE_CACHE | PE_BUF | PTE_TYPE; | |
131 | |
132 if (a == last) { | |
133 break; | |
134 } | |
135 | |
136 a += PTE_SZ; | |
137 pa += PTE_SZ; | |
138 } | |
139 | |
140 return 0; | |
141 } | |
142 | |
202 | 143 __code kpt_alloc_check_impl(struct vm_impl* vm_impl, __code next(...)) { |
203 | 144 struct run* r; |
145 if ((r = kpt_mem.freelist) != NULL ) { | |
146 kpt_mem.freelist = r->next; | |
147 } | |
204 | 148 release(&kpt_mem.lock); |
202 | 149 |
204 | 150 if ((r == NULL) && ((r = kmalloc (PT_ORDER)) == NULL)) { |
273 | 151 char* msg = "oom: kpt_alloc"; |
152 struct Err* err = createKernelError(&proc->cbc_context); | |
153 Gearef(cbc_context, Err)->msg = msg; | |
275 | 154 goto meta(cbc_context, err->panic); |
204 | 155 } |
156 | |
157 memset(r, 0, PT_SZ); | |
202 | 158 goto next((char*)r); |
159 } | |
205 | 160 |
161 __code allocuvm_check_newszvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, __code next(int ret, ...)){ | |
162 if (newsz >= UADDR_SZ) { | |
163 goto next(0, ...); | |
164 } | |
165 | |
166 if (newsz < oldsz) { | |
167 ret = newsz; | |
206 | 168 goto next(ret, ...); |
205 | 169 } |
170 | |
206 | 171 char* mem; |
205 | 172 uint a = align_up(oldsz, PTE_SZ); |
173 | |
206 | 174 goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, mem, a, next(ret, ...)); |
205 | 175 } |
176 | |
206 | 177 __code allocuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, char* mem, uint a, __code next(int ret, ...)){ |
205 | 178 |
179 if (a < newsz) { | |
206 | 180 mem = alloc_page(); |
181 | |
182 if (mem == 0) { | |
183 cprintf("allocuvm out of memory\n"); | |
184 deallocuvm(pgdir, newsz, oldsz); | |
207 | 185 goto next(0, ...); |
206 | 186 } |
187 | |
188 memset(mem, 0, PTE_SZ); | |
189 mappages(pgdir, (char*) a, PTE_SZ, v2p(mem), AP_KU); | |
190 | |
191 goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, a + PTE_SZ, next(ret, ...)); | |
205 | 192 } |
206 | 193 ret = newsz; |
194 goto next(ret, ...); | |
205 | 195 } |
207 | 196 |
197 __code clearpteu_check_ptevm_impl(struct vm_impl* vm_impl, pde_t* pgdir, char* uva, __code next(int ret, ...)) { | |
198 pte_t *pte; | |
199 | |
200 pte = walkpgdir(pgdir, uva, 0); | |
201 if (pte == 0) { | |
274 | 202 char* msg = "clearpteu"; |
203 struct Err* err = createKernelError(&proc->cbc_context); | |
204 Gearef(cbc_context, Err)->msg = msg; | |
275 | 205 goto meta(cbc_context, err->panic); |
207 | 206 } |
207 | |
208 // in ARM, we change the AP field (ap & 0x3) << 4) | |
209 *pte = (*pte & ~(0x03 << 4)) | AP_KO << 4; | |
210 | |
211 goto next(ret, ...); | |
212 } | |
208 | 213 |
214 __code copyuvm_check_nullvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, __code next(int ret, ...)) { | |
215 pde_t *d; | |
216 pte_t *pte; | |
217 uint pa, i, ap; | |
218 char *mem; | |
219 | |
220 // allocate a new first level page directory | |
221 d = kpt_alloc(); | |
222 if (d == NULL ) { | |
223 ret = NULL; | |
224 goto next(ret, ...); | |
225 } | |
226 i = 0; | |
227 | |
228 goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, *d, *pte, pa, i, ap, *mem, next(ret, ...)); | |
229 } | |
230 | |
231 __code copyuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { | |
232 | |
233 if (i < sz) { | |
209 | 234 goto copyuvm_loop_check_walkpgdir(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); |
208 | 235 |
236 } | |
237 ret = d; | |
238 goto next(ret, ...); | |
239 } | |
240 | |
209 | 241 __code copyuvm_loop_check_walkpgdir(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { |
242 if ((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) { | |
274 | 243 char* msg = "copyuvm: pte should exist"; |
244 struct Err* err = createKernelError(&proc->cbc_context); | |
245 Gearef(cbc_context, Err)->msg = msg; | |
275 | 246 goto meta(cbc_context, err->panic); |
209 | 247 } |
248 goto copyuvm_loop_check_pte(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
249 } | |
250 | |
251 __code copyuvm_loop_check_pte(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { | |
252 | |
253 if (!(*pte & PE_TYPES)) { | |
274 | 254 char* msg = "copyuvm: page not present"; |
255 struct Err* err = createKernelError(&proc->cbc_context); | |
256 Gearef(cbc_context, Err)->msg = msg; | |
275 | 257 goto meta(cbc_context, err->panic); |
209 | 258 } |
259 | |
260 goto copyuvm_loop_check_mem(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
261 } | |
208 | 262 |
209 | 263 __code copyuvm_loop_check_mem(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { |
264 pa = PTE_ADDR (*pte); | |
265 ap = PTE_AP (*pte); | |
266 | |
267 if ((mem = alloc_page()) == 0) { | |
268 goto copyuvm_loop_bad(vm_impl, d, next(...)); | |
269 } | |
270 goto copyuvm_loop_check_mappages(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
271 | |
272 } | |
273 | |
274 __code copyuvm_loop_check_mappages(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { | |
275 | |
276 memmove(mem, (char*) p2v(pa), PTE_SZ); | |
277 | |
278 if (mappages(d, (void*) i, PTE_SZ, v2p(mem), ap) < 0) { | |
279 goto copyuvm_loop_bad(vm_impl, d, next(...)); | |
280 } | |
281 goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); | |
282 | |
208 | 283 } |
284 | |
285 __code copyuvm_loop_bad(struct vm_impl* vm_impl, pde_t* d, __code next(int ret, ...)) { | |
286 freevm(d); | |
287 ret = 0; | |
288 goto next(ret, ...); | |
289 } | |
211 | 290 |
291 | |
292 __code uva2ka_check_pe_types(struct vm_impl* vm, pde_t* pgdir, char* uva, __code next(int ret, ...)) { | |
293 pte_t* pte; | |
294 | |
295 pte = walkpgdir(pgdir, uva, 0); | |
296 | |
297 // make sure it exists | |
298 if ((*pte & PE_TYPES) == 0) { | |
299 ret = 0; | |
300 goto next(ret, ...); | |
301 } | |
302 goto uva2ka_check_pte_ap(vm, pgdir, uva, pte, next(...)); | |
303 } | |
304 | |
305 __code uva2ka_check_pte_ap(struct vm_impl* vm, pde_t* pgdir, char* uva, pte_t* pte, __code next(int ret, ...)) { | |
212 | 306 // make sure it is a user page |
307 if (PTE_AP(*pte) != AP_KU) { | |
308 ret = 0; | |
309 goto next(ret, ...); | |
310 } | |
311 ret = (char*) p2v(PTE_ADDR(*pte)); | |
211 | 312 goto next(ret, ...); |
313 } | |
314 | |
213 | 315 // flush all TLB |
316 static void flush_tlb (void) | |
317 { | |
318 uint val = 0; | |
319 asm("MCR p15, 0, %[r], c8, c7, 0" : :[r]"r" (val):); | |
320 | |
321 // invalid entire data and instruction cache | |
322 asm ("MCR p15,0,%[r],c7,c10,0": :[r]"r" (val):); | |
323 asm ("MCR p15,0,%[r],c7,c11,0": :[r]"r" (val):); | |
324 } | |
325 | |
326 __code paging_intvmvm_impl(struct vm_impl* vm_impl, uint phy_low, uint phy_hi, __code next(...)) { | |
327 mappages (P2V(&_kernel_pgtbl), P2V(phy_low), phy_hi - phy_low, phy_low, AP_KU); | |
328 flush_tlb (); | |
329 | |
214 | 330 goto next(...); |
213 | 331 } |
332 | |
215 | 333 __code copyout_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, __code next(int ret, ...)) { |
334 if (len > 0) { | |
335 va0 = align_dn(va, PTE_SZ); | |
336 pa0 = uva2ka(pgdir, (char*) va0); | |
337 goto copyout_loop_check_pa0(vm_impl, pgdir, va, pp, len, va0, pa0, n, next(...)); | |
338 } | |
339 ret = 0; | |
340 goto next(ret, ...); | |
214 | 341 |
342 } | |
343 | |
215 | 344 __code copyout_loop_check_pa0(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, uint n, __code next(int ret, ...)) { |
345 if (pa0 == 0) { | |
346 ret = -1; | |
347 goto next(ret, ...); | |
348 } | |
349 goto copyout_loop_check_n(vm_impl, pgdir, va, pp, len, va0, pa0, n, buf, next(...)); | |
350 } | |
351 __code copyout_loop_check_n(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, uint n, char* buf, __code next(...)) { | |
352 n = PTE_SZ - (va - va0); | |
214 | 353 |
215 | 354 if (n > len) { |
355 n = len; | |
356 } | |
357 | |
358 len -= n; | |
359 buf += n; | |
360 va = va0 + PTE_SZ; | |
265
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
263
diff
changeset
|
361 Gearef(cbc_context, vm_impl)->n = n; |
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
263
diff
changeset
|
362 Gearef(cbc_context, vm_impl)->len = len; |
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
263
diff
changeset
|
363 Gearef(cbc_context, vm_impl)->buf = buf; |
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
263
diff
changeset
|
364 Gearef(cbc_context, vm_impl)->va = va; |
50fd5d414066
add write back context at vm_impl_private
anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp>
parents:
263
diff
changeset
|
365 |
215 | 366 goto copyout_loopvm_impl(vm_impl, pgdir, va, pp, len, va0, pa0, next(...)); |
367 } | |
368 | |
217 | 369 typedef struct proc proc_struct; |
338 | 370 __code switchuvm_check_pgdirvm_impl(struct vm_impl* vm_impl, proc_struct* p, __code next(...)) { |
217 | 371 uint val; |
372 | |
373 pushcli(); | |
374 | |
375 if (p->pgdir == 0) { | |
274 | 376 char* msg = "switchuvm: no pgdir"; |
377 struct Err* err = createKernelError(&proc->cbc_context); | |
378 Gearef(cbc_context, Err)->msg = msg; | |
275 | 379 goto meta(cbc_context, err->panic); |
217 | 380 } |
381 | |
382 val = (uint) V2P(p->pgdir) | 0x00; | |
383 | |
384 asm("MCR p15, 0, %[v], c2, c0, 0": :[v]"r" (val):); | |
385 flush_tlb(); | |
386 | |
387 popcli(); | |
388 | |
389 goto next(...); | |
390 } | |
218 | 391 |
392 __code init_inituvm_check_sz(struct vm_impl* vm_impl, pde_t* pgdir, char* init, uint sz, __code next(...)) { | |
393 char* mem; | |
394 | |
395 if (sz >= PTE_SZ) { | |
274 | 396 char* msg = "inituvm: more than a page"; |
397 struct Err* err = createKernelError(&proc->cbc_context); | |
398 Gearef(cbc_context, Err)->msg = msg; | |
275 | 399 goto meta(cbc_context, err->panic); |
218 | 400 } |
401 | |
402 mem = alloc_page(); | |
403 memset(mem, 0, PTE_SZ); | |
404 mappages(pgdir, 0, PTE_SZ, v2p(mem), AP_KU); | |
405 memmove(mem, init, sz); | |
406 | |
407 goto next(...); | |
408 } | |
226 | 409 |