193
|
1 #include "../../context.h"
|
198
|
2 #include "mmu.h"
|
213
|
3 #include "defs.h"
|
198
|
4 #include "memlayout.h"
|
193
|
5 #interface "vm_impl.h"
|
|
6
|
198
|
7 /*
|
|
8 vm_impl* createvm_impl2();
|
|
9 */
|
|
10
|
210
|
11 __code loaduvm_ptesize_checkvm_impl(struct vm_impl* vm_impl, __code next(int ret, ...)) {
|
200
|
12 char* addr = vm_impl->addr;
|
198
|
13
|
200
|
14 if ((uint) addr %PTE_SZ != 0) {
|
198
|
15 // goto panic
|
|
16 }
|
|
17
|
210
|
18 goto loaduvm_loopvm_impl(vm_impl, next(ret, ...));
|
193
|
19 }
|
|
20
|
210
|
21 __code loaduvm_loopvm_impl(struct vm_impl* vm_impl, __code next(int ret, ...)) {
|
200
|
22 uint i = vm_impl->i;
|
|
23 uint sz = vm_impl->sz;
|
198
|
24
|
200
|
25 if (i < sz) {
|
210
|
26 goto loaduvm_check_pgdir(vm_impl, next(ret, ...));
|
198
|
27 }
|
200
|
28
|
210
|
29 goto loaduvm_exit(vm_impl, next(ret, ...));
|
193
|
30 }
|
200
|
31
|
|
32
|
198
|
33 static pte_t* walkpgdir (pde_t *pgdir, const void *va, int alloc)
|
|
34 {
|
|
35 pde_t *pde;
|
|
36 pte_t *pgtab;
|
|
37
|
|
38 // pgdir points to the page directory, get the page direcotry entry (pde)
|
|
39 pde = &pgdir[PDE_IDX(va)];
|
|
40
|
|
41 if (*pde & PE_TYPES) {
|
|
42 pgtab = (pte_t*) p2v(PT_ADDR(*pde));
|
|
43
|
|
44 } else {
|
|
45 if (!alloc || (pgtab = (pte_t*) kpt_alloc()) == 0) {
|
|
46 return 0;
|
|
47 }
|
|
48
|
|
49 // Make sure all those PTE_P bits are zero.
|
|
50 memset(pgtab, 0, PT_SZ);
|
|
51
|
|
52 // The permissions here are overly generous, but they can
|
|
53 // be further restricted by the permissions in the page table
|
|
54 // entries, if necessary.
|
|
55 *pde = v2p(pgtab) | UPDE_TYPE;
|
|
56 }
|
|
57
|
|
58 return &pgtab[PTE_IDX(va)];
|
|
59 }
|
|
60
|
|
61
|
210
|
62 __code loaduvm_check_pgdir(struct vm_impl* vm_impl, __code next(int ret, ...)) {
|
200
|
63 pte_t* pte = vm_impl->pte;
|
|
64 pde_t* pgdir = vm_impl->pgdir;
|
|
65 uint i = vm_impl->i;
|
|
66 char* addr = vm_impl->addr;
|
|
67 uint pa = vm_impl->pa;
|
|
68
|
|
69 if ((pte = walkpgdir(pgdir, addr + i, 0)) == 0) {
|
198
|
70 // goto panic
|
|
71 }
|
200
|
72 pa = PTE_ADDR(*pte);
|
|
73
|
|
74 vm_impl->pte = pte;
|
|
75 vm_impl->pgdir = pgdir;
|
|
76 vm_impl->addr = addr;
|
|
77 vm_impl->pa = pa;
|
|
78
|
210
|
79 goto loaduvm_check_PTE_SZ(vm_impl, next(ret, ...));
|
198
|
80 }
|
|
81
|
210
|
82 __code loaduvm_check_PTE_SZ(struct vm_impl* vm_impl, __code next(int ret, ...)) {
|
200
|
83 uint sz = vm_impl->sz;
|
|
84 uint i = vm_impl->i;
|
|
85 uint n = vm_impl->n;
|
|
86 struct inode* ip = vm_impl->ip;
|
|
87 uint pa = vm_impl->pa;
|
|
88 uint offset = vm_impl->offset;
|
|
89
|
|
90 if (sz - i < PTE_SZ) {
|
|
91 n = sz - i;
|
199
|
92 } else {
|
200
|
93 n = PTE_SZ;
|
199
|
94 }
|
|
95
|
200
|
96 if (readi(ip, p2v(pa), offset + i, n) != n) {
|
210
|
97 ret = -1;
|
|
98 goto next(ret, ...);
|
199
|
99 }
|
|
100
|
200
|
101 vm_impl->n = n;
|
|
102
|
210
|
103 goto loaduvm_loopvm_impl(vm_impl, next(ret, ...));
|
198
|
104 }
|
|
105
|
210
|
106 __code loaduvm_exit(struct vm_impl* vm_impl, __code next(int ret, ...)) {
|
|
107 ret = 0;
|
|
108 goto next(ret, ...);
|
198
|
109 }
|
202
|
110
|
203
|
111 struct run {
|
|
112 struct run *next;
|
|
113 };
|
|
114
|
|
115 struct {
|
|
116 struct spinlock lock;
|
|
117 struct run* freelist;
|
|
118 } kpt_mem;
|
|
119
|
206
|
120
|
|
121 static int mappages (pde_t *pgdir, void *va, uint size, uint pa, int ap)
|
|
122 {
|
|
123 char *a, *last;
|
|
124 pte_t *pte;
|
|
125
|
|
126 a = (char*) align_dn(va, PTE_SZ);
|
|
127 last = (char*) align_dn((uint)va + size - 1, PTE_SZ);
|
|
128
|
|
129 for (;;) {
|
|
130 if ((pte = walkpgdir(pgdir, a, 1)) == 0) {
|
|
131 return -1;
|
|
132 }
|
|
133
|
|
134 if (*pte & PE_TYPES) {
|
|
135 panic("remap");
|
|
136 }
|
|
137
|
|
138 *pte = pa | ((ap & 0x3) << 4) | PE_CACHE | PE_BUF | PTE_TYPE;
|
|
139
|
|
140 if (a == last) {
|
|
141 break;
|
|
142 }
|
|
143
|
|
144 a += PTE_SZ;
|
|
145 pa += PTE_SZ;
|
|
146 }
|
|
147
|
|
148 return 0;
|
|
149 }
|
|
150
|
202
|
151 __code kpt_alloc_check_impl(struct vm_impl* vm_impl, __code next(...)) {
|
203
|
152 struct run* r;
|
|
153 if ((r = kpt_mem.freelist) != NULL ) {
|
|
154 kpt_mem.freelist = r->next;
|
|
155 }
|
204
|
156 release(&kpt_mem.lock);
|
202
|
157
|
204
|
158 if ((r == NULL) && ((r = kmalloc (PT_ORDER)) == NULL)) {
|
|
159 // panic("oom: kpt_alloc");
|
|
160 // goto panic
|
|
161 }
|
|
162
|
|
163 memset(r, 0, PT_SZ);
|
202
|
164 goto next((char*)r);
|
|
165 }
|
205
|
166
|
|
167 __code allocuvm_check_newszvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, __code next(int ret, ...)){
|
|
168 if (newsz >= UADDR_SZ) {
|
|
169 goto next(0, ...);
|
|
170 }
|
|
171
|
|
172 if (newsz < oldsz) {
|
|
173 ret = newsz;
|
206
|
174 goto next(ret, ...);
|
205
|
175 }
|
|
176
|
206
|
177 char* mem;
|
205
|
178 uint a = align_up(oldsz, PTE_SZ);
|
|
179
|
206
|
180 goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, mem, a, next(ret, ...));
|
205
|
181 }
|
|
182
|
206
|
183 __code allocuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, char* mem, uint a, __code next(int ret, ...)){
|
205
|
184
|
|
185 if (a < newsz) {
|
206
|
186 mem = alloc_page();
|
|
187
|
|
188 if (mem == 0) {
|
|
189 cprintf("allocuvm out of memory\n");
|
|
190 deallocuvm(pgdir, newsz, oldsz);
|
207
|
191 goto next(0, ...);
|
206
|
192 }
|
|
193
|
|
194 memset(mem, 0, PTE_SZ);
|
|
195 mappages(pgdir, (char*) a, PTE_SZ, v2p(mem), AP_KU);
|
|
196
|
|
197 goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, a + PTE_SZ, next(ret, ...));
|
205
|
198 }
|
206
|
199 ret = newsz;
|
|
200 goto next(ret, ...);
|
205
|
201 }
|
207
|
202
|
|
203 __code clearpteu_check_ptevm_impl(struct vm_impl* vm_impl, pde_t* pgdir, char* uva, __code next(int ret, ...)) {
|
|
204 pte_t *pte;
|
|
205
|
|
206 pte = walkpgdir(pgdir, uva, 0);
|
|
207 if (pte == 0) {
|
|
208 // panic("clearpteu");
|
|
209 // goto panic;
|
|
210 }
|
|
211
|
|
212 // in ARM, we change the AP field (ap & 0x3) << 4)
|
|
213 *pte = (*pte & ~(0x03 << 4)) | AP_KO << 4;
|
|
214
|
|
215 goto next(ret, ...);
|
|
216 }
|
208
|
217
|
|
218 __code copyuvm_check_nullvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, __code next(int ret, ...)) {
|
|
219 pde_t *d;
|
|
220 pte_t *pte;
|
|
221 uint pa, i, ap;
|
|
222 char *mem;
|
|
223
|
|
224 // allocate a new first level page directory
|
|
225 d = kpt_alloc();
|
|
226 if (d == NULL ) {
|
|
227 ret = NULL;
|
|
228 goto next(ret, ...);
|
|
229 }
|
|
230 i = 0;
|
|
231
|
|
232 goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, *d, *pte, pa, i, ap, *mem, next(ret, ...));
|
|
233 }
|
|
234
|
|
235 __code copyuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
|
|
236
|
|
237 if (i < sz) {
|
209
|
238 goto copyuvm_loop_check_walkpgdir(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
|
208
|
239
|
|
240 }
|
|
241 ret = d;
|
|
242 goto next(ret, ...);
|
|
243 }
|
|
244
|
209
|
245 __code copyuvm_loop_check_walkpgdir(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
|
|
246 if ((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) {
|
|
247 // panic("copyuvm: pte should exist");
|
|
248 // goto panic();
|
|
249 }
|
|
250 goto copyuvm_loop_check_pte(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
|
|
251 }
|
|
252
|
|
253 __code copyuvm_loop_check_pte(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
|
|
254
|
|
255 if (!(*pte & PE_TYPES)) {
|
|
256 // panic("copyuvm: page not present");
|
|
257 // goto panic();
|
|
258 }
|
|
259
|
|
260 goto copyuvm_loop_check_mem(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
|
|
261 }
|
208
|
262
|
209
|
263 __code copyuvm_loop_check_mem(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
|
|
264 pa = PTE_ADDR (*pte);
|
|
265 ap = PTE_AP (*pte);
|
|
266
|
|
267 if ((mem = alloc_page()) == 0) {
|
|
268 goto copyuvm_loop_bad(vm_impl, d, next(...));
|
|
269 }
|
|
270 goto copyuvm_loop_check_mappages(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
|
|
271
|
|
272 }
|
|
273
|
|
274 __code copyuvm_loop_check_mappages(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
|
|
275
|
|
276 memmove(mem, (char*) p2v(pa), PTE_SZ);
|
|
277
|
|
278 if (mappages(d, (void*) i, PTE_SZ, v2p(mem), ap) < 0) {
|
|
279 goto copyuvm_loop_bad(vm_impl, d, next(...));
|
|
280 }
|
|
281 goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
|
|
282
|
208
|
283 }
|
|
284
|
|
285 __code copyuvm_loop_bad(struct vm_impl* vm_impl, pde_t* d, __code next(int ret, ...)) {
|
|
286 freevm(d);
|
|
287 ret = 0;
|
|
288 goto next(ret, ...);
|
|
289 }
|
211
|
290
|
|
291
|
|
292 __code uva2ka_check_pe_types(struct vm_impl* vm, pde_t* pgdir, char* uva, __code next(int ret, ...)) {
|
|
293 pte_t* pte;
|
|
294
|
|
295 pte = walkpgdir(pgdir, uva, 0);
|
|
296
|
|
297 // make sure it exists
|
|
298 if ((*pte & PE_TYPES) == 0) {
|
|
299 ret = 0;
|
|
300 goto next(ret, ...);
|
|
301 }
|
|
302 goto uva2ka_check_pte_ap(vm, pgdir, uva, pte, next(...));
|
|
303 }
|
|
304
|
|
305 __code uva2ka_check_pte_ap(struct vm_impl* vm, pde_t* pgdir, char* uva, pte_t* pte, __code next(int ret, ...)) {
|
212
|
306 // make sure it is a user page
|
|
307 if (PTE_AP(*pte) != AP_KU) {
|
|
308 ret = 0;
|
|
309 goto next(ret, ...);
|
|
310 }
|
|
311 ret = (char*) p2v(PTE_ADDR(*pte));
|
211
|
312 goto next(ret, ...);
|
|
313 }
|
|
314
|
213
|
315 // flush all TLB
|
|
316 static void flush_tlb (void)
|
|
317 {
|
|
318 uint val = 0;
|
|
319 asm("MCR p15, 0, %[r], c8, c7, 0" : :[r]"r" (val):);
|
|
320
|
|
321 // invalid entire data and instruction cache
|
|
322 asm ("MCR p15,0,%[r],c7,c10,0": :[r]"r" (val):);
|
|
323 asm ("MCR p15,0,%[r],c7,c11,0": :[r]"r" (val):);
|
|
324 }
|
|
325
|
|
326 __code paging_intvmvm_impl(struct vm_impl* vm_impl, uint phy_low, uint phy_hi, __code next(...)) {
|
|
327 mappages (P2V(&_kernel_pgtbl), P2V(phy_low), phy_hi - phy_low, phy_low, AP_KU);
|
|
328 flush_tlb ();
|
|
329
|
|
330 goto next(...));
|
|
331 }
|
|
332
|