193
|
1 #include "../../context.h"
|
198
|
2 #include "mmu.h"
|
|
3 #include "memlayout.h"
|
193
|
4 #interface "vm_impl.h"
|
|
5
|
198
|
6 /*
|
|
7 vm_impl* createvm_impl2();
|
|
8 */
|
|
9
|
210
|
10 __code loaduvm_ptesize_checkvm_impl(struct vm_impl* vm_impl, __code next(int ret, ...)) {
|
200
|
11 char* addr = vm_impl->addr;
|
198
|
12
|
200
|
13 if ((uint) addr %PTE_SZ != 0) {
|
198
|
14 // goto panic
|
|
15 }
|
|
16
|
210
|
17 goto loaduvm_loopvm_impl(vm_impl, next(ret, ...));
|
193
|
18 }
|
|
19
|
210
|
20 __code loaduvm_loopvm_impl(struct vm_impl* vm_impl, __code next(int ret, ...)) {
|
200
|
21 uint i = vm_impl->i;
|
|
22 uint sz = vm_impl->sz;
|
198
|
23
|
200
|
24 if (i < sz) {
|
210
|
25 goto loaduvm_check_pgdir(vm_impl, next(ret, ...));
|
198
|
26 }
|
200
|
27
|
210
|
28 goto loaduvm_exit(vm_impl, next(ret, ...));
|
193
|
29 }
|
200
|
30
|
|
31
|
198
|
32 static pte_t* walkpgdir (pde_t *pgdir, const void *va, int alloc)
|
|
33 {
|
|
34 pde_t *pde;
|
|
35 pte_t *pgtab;
|
|
36
|
|
37 // pgdir points to the page directory, get the page direcotry entry (pde)
|
|
38 pde = &pgdir[PDE_IDX(va)];
|
|
39
|
|
40 if (*pde & PE_TYPES) {
|
|
41 pgtab = (pte_t*) p2v(PT_ADDR(*pde));
|
|
42
|
|
43 } else {
|
|
44 if (!alloc || (pgtab = (pte_t*) kpt_alloc()) == 0) {
|
|
45 return 0;
|
|
46 }
|
|
47
|
|
48 // Make sure all those PTE_P bits are zero.
|
|
49 memset(pgtab, 0, PT_SZ);
|
|
50
|
|
51 // The permissions here are overly generous, but they can
|
|
52 // be further restricted by the permissions in the page table
|
|
53 // entries, if necessary.
|
|
54 *pde = v2p(pgtab) | UPDE_TYPE;
|
|
55 }
|
|
56
|
|
57 return &pgtab[PTE_IDX(va)];
|
|
58 }
|
|
59
|
|
60
|
210
|
61 __code loaduvm_check_pgdir(struct vm_impl* vm_impl, __code next(int ret, ...)) {
|
200
|
62 pte_t* pte = vm_impl->pte;
|
|
63 pde_t* pgdir = vm_impl->pgdir;
|
|
64 uint i = vm_impl->i;
|
|
65 char* addr = vm_impl->addr;
|
|
66 uint pa = vm_impl->pa;
|
|
67
|
|
68 if ((pte = walkpgdir(pgdir, addr + i, 0)) == 0) {
|
198
|
69 // goto panic
|
|
70 }
|
200
|
71 pa = PTE_ADDR(*pte);
|
|
72
|
|
73 vm_impl->pte = pte;
|
|
74 vm_impl->pgdir = pgdir;
|
|
75 vm_impl->addr = addr;
|
|
76 vm_impl->pa = pa;
|
|
77
|
210
|
78 goto loaduvm_check_PTE_SZ(vm_impl, next(ret, ...));
|
198
|
79 }
|
|
80
|
210
|
81 __code loaduvm_check_PTE_SZ(struct vm_impl* vm_impl, __code next(int ret, ...)) {
|
200
|
82 uint sz = vm_impl->sz;
|
|
83 uint i = vm_impl->i;
|
|
84 uint n = vm_impl->n;
|
|
85 struct inode* ip = vm_impl->ip;
|
|
86 uint pa = vm_impl->pa;
|
|
87 uint offset = vm_impl->offset;
|
|
88
|
|
89 if (sz - i < PTE_SZ) {
|
|
90 n = sz - i;
|
199
|
91 } else {
|
200
|
92 n = PTE_SZ;
|
199
|
93 }
|
|
94
|
200
|
95 if (readi(ip, p2v(pa), offset + i, n) != n) {
|
210
|
96 ret = -1;
|
|
97 goto next(ret, ...);
|
199
|
98 }
|
|
99
|
200
|
100 vm_impl->n = n;
|
|
101
|
210
|
102 goto loaduvm_loopvm_impl(vm_impl, next(ret, ...));
|
198
|
103 }
|
|
104
|
210
|
105 __code loaduvm_exit(struct vm_impl* vm_impl, __code next(int ret, ...)) {
|
|
106 ret = 0;
|
|
107 goto next(ret, ...);
|
198
|
108 }
|
202
|
109
|
203
|
110 struct run {
|
|
111 struct run *next;
|
|
112 };
|
|
113
|
|
114 struct {
|
|
115 struct spinlock lock;
|
|
116 struct run* freelist;
|
|
117 } kpt_mem;
|
|
118
|
206
|
119
|
|
120 static int mappages (pde_t *pgdir, void *va, uint size, uint pa, int ap)
|
|
121 {
|
|
122 char *a, *last;
|
|
123 pte_t *pte;
|
|
124
|
|
125 a = (char*) align_dn(va, PTE_SZ);
|
|
126 last = (char*) align_dn((uint)va + size - 1, PTE_SZ);
|
|
127
|
|
128 for (;;) {
|
|
129 if ((pte = walkpgdir(pgdir, a, 1)) == 0) {
|
|
130 return -1;
|
|
131 }
|
|
132
|
|
133 if (*pte & PE_TYPES) {
|
|
134 panic("remap");
|
|
135 }
|
|
136
|
|
137 *pte = pa | ((ap & 0x3) << 4) | PE_CACHE | PE_BUF | PTE_TYPE;
|
|
138
|
|
139 if (a == last) {
|
|
140 break;
|
|
141 }
|
|
142
|
|
143 a += PTE_SZ;
|
|
144 pa += PTE_SZ;
|
|
145 }
|
|
146
|
|
147 return 0;
|
|
148 }
|
|
149
|
202
|
150 __code kpt_alloc_check_impl(struct vm_impl* vm_impl, __code next(...)) {
|
203
|
151 struct run* r;
|
|
152 if ((r = kpt_mem.freelist) != NULL ) {
|
|
153 kpt_mem.freelist = r->next;
|
|
154 }
|
204
|
155 release(&kpt_mem.lock);
|
202
|
156
|
204
|
157 if ((r == NULL) && ((r = kmalloc (PT_ORDER)) == NULL)) {
|
|
158 // panic("oom: kpt_alloc");
|
|
159 // goto panic
|
|
160 }
|
|
161
|
|
162 memset(r, 0, PT_SZ);
|
202
|
163 goto next((char*)r);
|
|
164 }
|
205
|
165
|
|
166 __code allocuvm_check_newszvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, __code next(int ret, ...)){
|
|
167 if (newsz >= UADDR_SZ) {
|
|
168 goto next(0, ...);
|
|
169 }
|
|
170
|
|
171 if (newsz < oldsz) {
|
|
172 ret = newsz;
|
206
|
173 goto next(ret, ...);
|
205
|
174 }
|
|
175
|
206
|
176 char* mem;
|
205
|
177 uint a = align_up(oldsz, PTE_SZ);
|
|
178
|
206
|
179 goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, mem, a, next(ret, ...));
|
205
|
180 }
|
|
181
|
206
|
182 __code allocuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, char* mem, uint a, __code next(int ret, ...)){
|
205
|
183
|
|
184 if (a < newsz) {
|
206
|
185 mem = alloc_page();
|
|
186
|
|
187 if (mem == 0) {
|
|
188 cprintf("allocuvm out of memory\n");
|
|
189 deallocuvm(pgdir, newsz, oldsz);
|
207
|
190 goto next(0, ...);
|
206
|
191 }
|
|
192
|
|
193 memset(mem, 0, PTE_SZ);
|
|
194 mappages(pgdir, (char*) a, PTE_SZ, v2p(mem), AP_KU);
|
|
195
|
|
196 goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, a + PTE_SZ, next(ret, ...));
|
205
|
197 }
|
206
|
198 ret = newsz;
|
|
199 goto next(ret, ...);
|
205
|
200 }
|
207
|
201
|
|
202 __code clearpteu_check_ptevm_impl(struct vm_impl* vm_impl, pde_t* pgdir, char* uva, __code next(int ret, ...)) {
|
|
203 pte_t *pte;
|
|
204
|
|
205 pte = walkpgdir(pgdir, uva, 0);
|
|
206 if (pte == 0) {
|
|
207 // panic("clearpteu");
|
|
208 // goto panic;
|
|
209 }
|
|
210
|
|
211 // in ARM, we change the AP field (ap & 0x3) << 4)
|
|
212 *pte = (*pte & ~(0x03 << 4)) | AP_KO << 4;
|
|
213
|
|
214 goto next(ret, ...);
|
|
215 }
|
208
|
216
|
|
217 __code copyuvm_check_nullvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, __code next(int ret, ...)) {
|
|
218 pde_t *d;
|
|
219 pte_t *pte;
|
|
220 uint pa, i, ap;
|
|
221 char *mem;
|
|
222
|
|
223 // allocate a new first level page directory
|
|
224 d = kpt_alloc();
|
|
225 if (d == NULL ) {
|
|
226 ret = NULL;
|
|
227 goto next(ret, ...);
|
|
228 }
|
|
229 i = 0;
|
|
230
|
|
231 goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, *d, *pte, pa, i, ap, *mem, next(ret, ...));
|
|
232 }
|
|
233
|
|
234 __code copyuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
|
|
235
|
|
236 if (i < sz) {
|
209
|
237 goto copyuvm_loop_check_walkpgdir(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
|
208
|
238
|
|
239 }
|
|
240 ret = d;
|
|
241 goto next(ret, ...);
|
|
242 }
|
|
243
|
209
|
244 __code copyuvm_loop_check_walkpgdir(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
|
|
245 if ((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) {
|
|
246 // panic("copyuvm: pte should exist");
|
|
247 // goto panic();
|
|
248 }
|
|
249 goto copyuvm_loop_check_pte(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
|
|
250 }
|
|
251
|
|
252 __code copyuvm_loop_check_pte(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
|
|
253
|
|
254 if (!(*pte & PE_TYPES)) {
|
|
255 // panic("copyuvm: page not present");
|
|
256 // goto panic();
|
|
257 }
|
|
258
|
|
259 goto copyuvm_loop_check_mem(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
|
|
260 }
|
208
|
261
|
209
|
262 __code copyuvm_loop_check_mem(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
|
|
263 pa = PTE_ADDR (*pte);
|
|
264 ap = PTE_AP (*pte);
|
|
265
|
|
266 if ((mem = alloc_page()) == 0) {
|
|
267 goto copyuvm_loop_bad(vm_impl, d, next(...));
|
|
268 }
|
|
269 goto copyuvm_loop_check_mappages(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
|
|
270
|
|
271 }
|
|
272
|
|
273 __code copyuvm_loop_check_mappages(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
|
|
274
|
|
275 memmove(mem, (char*) p2v(pa), PTE_SZ);
|
|
276
|
|
277 if (mappages(d, (void*) i, PTE_SZ, v2p(mem), ap) < 0) {
|
|
278 goto copyuvm_loop_bad(vm_impl, d, next(...));
|
|
279 }
|
|
280 goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
|
|
281
|
208
|
282 }
|
|
283
|
|
284 __code copyuvm_loop_bad(struct vm_impl* vm_impl, pde_t* d, __code next(int ret, ...)) {
|
|
285 freevm(d);
|
|
286 ret = 0;
|
|
287 goto next(ret, ...);
|
|
288 }
|