changeset 228:e2520f609094

merge
author menikon <e165723@ie.u-ryukyu.ac.jp>
date Fri, 24 Jan 2020 21:19:48 +0900
parents ab5a80021afe (current diff) 80398e02ae72 (diff)
children 696c6bdc1074
files
diffstat 5 files changed, 383 insertions(+), 36 deletions(-) [+]
line wrap: on
line diff
--- a/src/gearsTools/lib/Gears/Context/Template/XV6.pm	Thu Jan 23 20:33:39 2020 +0900
+++ b/src/gearsTools/lib/Gears/Context/Template/XV6.pm	Fri Jan 24 21:19:48 2020 +0900
@@ -161,6 +161,7 @@
 #ifndef USE_CUDAWorker
 typedef unsigned long long CUdeviceptr;
 #endif
+typedef struct proc proc_struct;
 typedef uint32  pte_t;
 typedef uint32  pde_t;
 EOFEOF
--- a/src/impl/vm_impl.cbc	Thu Jan 23 20:33:39 2020 +0900
+++ b/src/impl/vm_impl.cbc	Fri Jan 24 21:19:48 2020 +0900
@@ -26,6 +26,15 @@
     vm_impl->sz  = 0;
     vm_impl->loaduvm_ptesize_check = C_loaduvm_ptesize_checkvm_impl;
     vm_impl->loaduvm_loop = C_loaduvm_loopvm_impl;
+    vm_impl->allocuvm_check_newsz = C_allocuvm_check_newszvm_impl;
+    vm_impl->allocuvm_loop = C_allocuvm_loopvm_impl;
+    vm_impl->copyuvm_check_null = C_copyuvm_check_nullvm_impl;
+    vm_impl->copyuvm_loop = C_copyuvm_loopvm_impl;
+    vm_impl->uva2ka_check_pe_types = C_uva2ka_check_pe_types;
+    vm_impl->paging_intvm_impl = C_paging_intvmvm_impl;
+    vm_impl->copyout_loopvm_impl = C_copyout_loopvm_impl;
+    vm_impl->switchuvm_check_pgdirvm_impl = C_switchuvm_check_pgdirvm_impl;
+    vm_impl->init_inituvm_check_sz = C_init_inituvm_check_sz;
     vm->init_vmm = C_init_vmmvm_impl;
     vm->kpt_freerange = C_kpt_freerangevm_impl;
     vm->kpt_alloc = C_kpt_allocvm_impl;
@@ -37,7 +46,7 @@
     vm->copyuvm = C_copyuvmvm_impl;
     vm->uva2ka = C_uva2kavm_impl;
     vm->copyout = C_copyoutvm_impl;
-    vm->pagind_int = C_pagind_intvm_impl;
+    vm->paging_int = C_paging_intvm_impl;
     return vm;
 }
 
@@ -53,25 +62,43 @@
     goto next(...);
 }
 
-__code kpt_freerangevm_impl(struct vm_impl* vm, uint low, uint hi, __code next(...)) {
+extern struct run {
+    struct run *next;
+};
 
-    goto next(...);
+static void _kpt_free (char *v)
+{
+    struct run *r;
+
+    r = (struct run*) v;
+    r->next = kpt_mem.freelist;
+    kpt_mem.freelist = r;
 }
 
-__code kpt_allocvm_impl(struct vm_impl* vm ,__code next(...)) {
+__code kpt_freerangevm_impl(struct vm_impl* vm, uint low, uint hi, __code next(...)) { 
+ 
+   if (low < hi) { 
+     _kpt_free((char*)low);
+     goto kpt_freerangevm_impl(vm, low + PT_SZ, hi, next(...));  
+  
+   } 
+  goto next(...);
+}
+__code kpt_allocvm_impl(struct vm_impl* vm, __code next(...)) {
+  acquire(&kpt_mem.lock);
 
-    goto next(...);
+  goto kpt_alloc_check_impl(vm_impl, next(...));
 }
 
 typedef struct proc proc;
-__code switchuvmvm_impl(struct vm_impl* vm ,struct proc* p, __code next(...)) { //:skip
+__code switchuvmvm_impl(struct vm_impl* vm , struct proc* p, __code next(...)) { //:skip
 
-    goto next(...);
+    goto switchuvm_check_pgdirvm_impl(...);
 }
 
-__code init_inituvmvm_impl(struct vm_impl* vm, pde_t* pgdir, char* init, uint sz, __code next(...)) { //:skip
+__code init_inituvmvm_impl(struct vm_impl* vm, pde_t* pgdir, char* init, uint sz, __code next(...)) { 
 
-    goto next(...);
+    goto init_inituvm_check_sz(vm, pgdir, init, sz, next(...));
 }
 
 __code loaduvmvm_impl(struct vm_impl* vm, pde_t* pgdir, char* addr, struct inode* ip, uint offset, uint sz,  __code next(...)) {
@@ -81,36 +108,38 @@
     vm->offset = offset;
     vm->sz = sz;
     
-    goto loaduvm_ptesize_checkvm_impl(vm, next);
+    goto loaduvm_ptesize_checkvm_impl(vm, next(...));
 }
 
 __code allocuvmvm_impl(struct vm_impl* vm, pde_t* pgdir, uint oldsz, uint newsz, __code next(...)) {
 
-    goto next(...);
+    goto allocuvm_check_newszvm_impl(vm, pgdir, oldsz, newsz, next(...));
 }
 
 __code clearpteuvm_impl(struct vm_impl* vm, pde_t* pgdir, char* uva,  __code next(...)) {
 
-    goto next(...);
+    goto clearpteu_check_ptevm_impl(vm, pgdir, uva, next(...));
 }
 
 __code copyuvmvm_impl(struct vm_impl* vm, pde_t* pgdir, uint sz, __code next(...)) {
 
-    goto next(...);
+    goto copyuvm_check_nullvm_impl(vm, pgdir, sz, __code next(...));
 }
 
 __code uva2kavm_impl(struct vm_impl* vm, pde_t* pgdir, char* uva, __code next(...)) {
 
-    goto next(...);
+    goto uva2ka_check_pe_types(vm, pgdir, uva, next(...));
 }
 
 __code copyoutvm_impl(struct vm_impl* vm, pde_t* pgdir, uint va, void* pp, uint len, __code next(...)) {
 
-    goto next(...);
+    vm->buf = (char*) pp;    
+
+    goto copyout_loopvm_impl(vm, pgdir, va, pp, len, va0, pa0, next(...));
 }
 
-__code pagind_intvm_impl(struct vm_impl* vm, uint phy_low, uint phy_hi, __code next(...)) {
+__code paging_intvm_impl(struct vm_impl* vm, uint phy_low, uint phy_hi, __code next(...)) {
 
-    goto next(...);
+    goto paging_intvmvm_impl(vm, phy_low, phy_hi, next(...));
 }
 
--- a/src/impl/vm_impl.h	Thu Jan 23 20:33:39 2020 +0900
+++ b/src/impl/vm_impl.h	Fri Jan 24 21:19:48 2020 +0900
@@ -9,13 +9,39 @@
     uint offset;
     uint pa;
     uint n;
+    uint oldsz;
+    uint newsz;
+    uint a;
+    int ret;
+    char* mem;
+    char* uva;
+    pde_t* d;
+    uint ap;
+    uint phy_low;
+    uint phy_hi;
+    uint va;
+    void* pp;
+    uint len;
+    char* buf;
+    char* pa0;
+    uint va0;
+    proc_struct* p;
+    char* init;
 
-    __code init_vmm(Type* vm_impl, __code next(...));
-    __code loaduvm_ptesize_check(Type* vm_impl, __code next(...));
-    __code loaduvm_loop(Type* vm_impl, uint i, pte_t* pte, uint sz, __code next(...));
+    __code kpt_alloc_check_impl(Type* vm_impl, __code next(...));
+    __code loaduvm_ptesize_check(Type* vm_impl, __code next(int ret, ...));
+    __code loaduvm_loop(Type* vm_impl, uint i, pte_t* pte, uint sz, __code next(int ret, ...));
+    __code allocuvm_check_newsz(Type* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, __code next(...));
+    __code allocuvm_loop(Type* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, uint a, __code next(...));
+    __code copyuvm_check_null(Type* vm_impl, pde_t* pgdir, uint sz, __code next(...));
+    __code copyuvm_loop(Type* vm_impl,pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...));
+    __code clearpteu_check_ptevm_impl(Type* vm_impl, pde_t* pgdir, char* uva,  __code next(...));
+    __code uva2ka_check_pe_types(Type* vm_impl, pde_t* pgdir, char* uva, __code next(...));
+    __code paging_intvm_impl(Type* vm_impl, uint phy_low, uint phy_hi, __code next(...));
+    __code copyout_loopvm_impl(Type* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, __code next(...));
+    __code switchuvm_check_pgdirvm_impl(struct vm_impl* vm_impl, struct proc* p, __code next(...));
+    __code init_inituvm_check_sz(struct vm_impl* vm_impl, pde_t* pgdir, char* init, uint sz, __code next(...));
     __code next(...);
- 
-
 } vm_impl;
 
 
--- a/src/impl/vm_impl_private.cbc	Thu Jan 23 20:33:39 2020 +0900
+++ b/src/impl/vm_impl_private.cbc	Fri Jan 24 21:19:48 2020 +0900
@@ -1,5 +1,7 @@
-#include "../../context.h"
+#include "param.h"
+#include "proc.h"
 #include "mmu.h"
+#include "defs.h"
 #include "memlayout.h"
 #interface "vm_impl.h"
 
@@ -7,25 +9,25 @@
 vm_impl* createvm_impl2();
 */
 
-__code loaduvm_ptesize_checkvm_impl(struct vm_impl* vm_impl, __code next(...)) {
+__code loaduvm_ptesize_checkvm_impl(struct vm_impl* vm_impl, __code next(int ret, ...)) {
     char* addr = vm_impl->addr;
 
     if ((uint) addr %PTE_SZ != 0) {
        // goto panic 
     }
 
-    goto loaduvm_loopvm_impl(vm_impl, next(...));
+    goto loaduvm_loopvm_impl(vm_impl, next(ret, ...));
 }
 
-__code loaduvm_loopvm_impl(struct vm_impl* vm_impl, __code next(...)) {
+__code loaduvm_loopvm_impl(struct vm_impl* vm_impl, __code next(int ret, ...)) {
     uint i = vm_impl->i;
     uint sz = vm_impl->sz;
 
     if (i < sz) {
-        goto loaduvm_check_pgdir(vm_impl, next(...));  
+        goto loaduvm_check_pgdir(vm_impl, next(ret, ...));  
     } 
 
-    goto loaduvm_exit(vm_impl, next(...));
+    goto loaduvm_exit(vm_impl, next(ret, ...));
 }
 
 
@@ -58,7 +60,7 @@
 }
 
 
-__code loaduvm_check_pgdir(struct vm_impl* vm_impl, __code next(...)) {
+__code loaduvm_check_pgdir(struct vm_impl* vm_impl, __code next(int ret, ...)) {
     pte_t* pte = vm_impl->pte;
     pde_t* pgdir = vm_impl->pgdir;
     uint i = vm_impl->i;
@@ -75,10 +77,10 @@
     vm_impl->addr = addr; 
     vm_impl->pa = pa; 
 
-    goto loaduvm_check_PTE_SZ(vm_impl, next(...));
+    goto loaduvm_check_PTE_SZ(vm_impl, next(ret, ...));
 }
 
-__code loaduvm_check_PTE_SZ(struct vm_impl* vm_impl, __code next(...)) {
+__code loaduvm_check_PTE_SZ(struct vm_impl* vm_impl, __code next(int ret, ...)) {
     uint sz = vm_impl->sz;
     uint i = vm_impl->i;
     uint n = vm_impl->n;
@@ -93,16 +95,305 @@
     }
 
     if (readi(ip, p2v(pa), offset + i, n) != n) {
-        // panic 
-        // return -1;
+        ret = -1;
+        goto next(ret, ...);
     }
 
     vm_impl->n = n;
  
-    goto loaduvm_exit(vm_impl, next(...));
+    goto loaduvm_loopvm_impl(vm_impl, next(ret, ...));
+}
+
+__code loaduvm_exit(struct vm_impl* vm_impl, __code next(int ret, ...)) {
+    ret = 0;
+    goto next(ret, ...);
+}
+
+struct run {
+    struct run *next;
+};
+
+struct {
+    struct spinlock lock;
+    struct run* freelist;
+} kpt_mem;
+
+
+static int mappages (pde_t *pgdir, void *va, uint size, uint pa, int ap)
+{
+    char *a, *last;
+    pte_t *pte;
+
+    a = (char*) align_dn(va, PTE_SZ);
+    last = (char*) align_dn((uint)va + size - 1, PTE_SZ);
+
+    for (;;) {
+        if ((pte = walkpgdir(pgdir, a, 1)) == 0) {
+            return -1;
+        }
+
+        if (*pte & PE_TYPES) {
+            panic("remap");
+        }
+
+        *pte = pa | ((ap & 0x3) << 4) | PE_CACHE | PE_BUF | PTE_TYPE;
+
+        if (a == last) {
+            break;
+        }
+
+        a += PTE_SZ;
+        pa += PTE_SZ;
+    }
+
+    return 0;
+}
+
+__code kpt_alloc_check_impl(struct vm_impl* vm_impl, __code next(...)) { 
+    struct run* r;    
+    if ((r = kpt_mem.freelist) != NULL ) {
+        kpt_mem.freelist = r->next;
+    }
+    release(&kpt_mem.lock);
+
+    if ((r == NULL) && ((r = kmalloc (PT_ORDER)) == NULL)) {
+        // panic("oom: kpt_alloc");
+        // goto panic
+    }
+
+    memset(r, 0, PT_SZ);
+    goto next((char*)r);
+}
+
+__code allocuvm_check_newszvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, __code next(int ret, ...)){
+    if (newsz >= UADDR_SZ) {
+       goto next(0, ...);
+    }
+
+    if (newsz < oldsz) {
+       ret = newsz;
+       goto next(ret, ...);
+    }
+
+    char* mem;
+    uint a = align_up(oldsz, PTE_SZ);
+
+    goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, mem, a, next(ret, ...));
+}
+
+__code allocuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, char* mem, uint a, __code next(int ret, ...)){
+
+    if (a < newsz) {
+        mem = alloc_page();
+
+        if (mem == 0) {
+            cprintf("allocuvm out of memory\n");
+            deallocuvm(pgdir, newsz, oldsz);
+            goto next(0, ...);
+        }
+
+        memset(mem, 0, PTE_SZ);
+        mappages(pgdir, (char*) a, PTE_SZ, v2p(mem), AP_KU);
+
+        goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, a + PTE_SZ, next(ret, ...)); 
+    }
+    ret = newsz; 
+    goto next(ret, ...);
 }
 
-__code loaduvm_exit(struct vm_impl* vm_impl, __code next(...)) {
+__code clearpteu_check_ptevm_impl(struct vm_impl* vm_impl, pde_t* pgdir, char* uva, __code next(int ret, ...)) {
+    pte_t *pte;
+
+    pte = walkpgdir(pgdir, uva, 0);
+    if (pte == 0) {
+        // panic("clearpteu");
+        // goto panic;
+    }
+
+    // in ARM, we change the AP field (ap & 0x3) << 4)
+    *pte = (*pte & ~(0x03 << 4)) | AP_KO << 4;
+
+    goto next(ret, ...);
+}
+
+__code copyuvm_check_nullvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, __code next(int ret, ...)) {
+    pde_t *d;
+    pte_t *pte;
+    uint pa, i, ap;
+    char *mem;
+
+    // allocate a new first level page directory
+    d = kpt_alloc();
+    if (d == NULL ) {
+        ret = NULL;
+        goto next(ret, ...);
+    }
+    i = 0;
+
+    goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, *d, *pte, pa, i, ap, *mem, next(ret, ...));
+}
+
+__code copyuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
+
+    if (i < sz) { 
+        goto copyuvm_loop_check_walkpgdir(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
+
+    }
+    ret = d;
+    goto next(ret, ...);
+}
+
+__code copyuvm_loop_check_walkpgdir(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
+        if ((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) {
+            // panic("copyuvm: pte should exist");
+            // goto panic();
+        }
+    goto copyuvm_loop_check_pte(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
+}
+
+__code copyuvm_loop_check_pte(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
+
+        if (!(*pte & PE_TYPES)) {
+            // panic("copyuvm: page not present");
+            // goto panic();
+        }
+
+    goto copyuvm_loop_check_mem(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
+}
+
+__code copyuvm_loop_check_mem(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
+    pa = PTE_ADDR (*pte);
+    ap = PTE_AP (*pte);
+
+    if ((mem = alloc_page()) == 0) {
+        goto copyuvm_loop_bad(vm_impl, d, next(...));
+    }
+    goto copyuvm_loop_check_mappages(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
+    
+}
+
+__code copyuvm_loop_check_mappages(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
+
+    memmove(mem, (char*) p2v(pa), PTE_SZ);
+
+    if (mappages(d, (void*) i, PTE_SZ, v2p(mem), ap) < 0) {
+        goto copyuvm_loop_bad(vm_impl, d, next(...));
+    }
+    goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
+ 
+}
+
+__code copyuvm_loop_bad(struct vm_impl* vm_impl, pde_t* d, __code next(int ret, ...)) {
+    freevm(d);
+    ret = 0;
+    goto next(ret, ...);
+}
+
+
+__code uva2ka_check_pe_types(struct vm_impl* vm, pde_t* pgdir, char* uva, __code next(int ret, ...)) {
+    pte_t* pte;
+
+    pte = walkpgdir(pgdir, uva, 0);
+
+    // make sure it exists
+    if ((*pte & PE_TYPES) == 0) {
+        ret = 0;
+        goto next(ret, ...);
+    }
+    goto uva2ka_check_pte_ap(vm, pgdir, uva, pte, next(...));
+}
+
+__code uva2ka_check_pte_ap(struct vm_impl* vm, pde_t* pgdir, char* uva, pte_t* pte, __code next(int ret, ...)) {
+    // make sure it is a user page
+    if (PTE_AP(*pte) != AP_KU) {
+        ret = 0;
+        goto next(ret, ...);
+    }
+    ret = (char*) p2v(PTE_ADDR(*pte));
+    goto next(ret, ...);
+}
+
+// flush all TLB
+static void flush_tlb (void)
+{
+    uint val = 0;
+    asm("MCR p15, 0, %[r], c8, c7, 0" : :[r]"r" (val):);
+
+    // invalid entire data and instruction cache
+    asm ("MCR p15,0,%[r],c7,c10,0": :[r]"r" (val):);
+    asm ("MCR p15,0,%[r],c7,c11,0": :[r]"r" (val):);
+}
+
+__code paging_intvmvm_impl(struct vm_impl* vm_impl, uint phy_low, uint phy_hi, __code next(...)) {
+    mappages (P2V(&_kernel_pgtbl), P2V(phy_low), phy_hi - phy_low, phy_low, AP_KU);
+    flush_tlb ();
 
     goto next(...);
 }
+
+__code copyout_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0,  __code next(int ret, ...)) {
+    if (len > 0) {
+        va0 = align_dn(va, PTE_SZ);
+        pa0 = uva2ka(pgdir, (char*) va0);
+        goto copyout_loop_check_pa0(vm_impl, pgdir, va, pp, len, va0, pa0, n, next(...));
+    }
+    ret = 0;
+    goto next(ret, ...);
+
+}
+
+__code copyout_loop_check_pa0(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, uint n, __code next(int ret, ...)) {
+    if (pa0 == 0) {
+        ret = -1;
+        goto next(ret, ...);
+    }
+    goto copyout_loop_check_n(vm_impl, pgdir, va, pp, len, va0, pa0, n, buf, next(...));
+}
+__code copyout_loop_check_n(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, uint n, char* buf, __code next(...)) {
+    n = PTE_SZ - (va - va0);
+
+    if (n > len) {
+        n = len;
+    }
+
+    len -= n;
+    buf += n;
+    va = va0 + PTE_SZ;
+    goto copyout_loopvm_impl(vm_impl, pgdir, va, pp, len, va0, pa0, next(...));
+}
+
+typedef struct proc proc_struct;
+__code switchuvm_check_pgdirvm_impl(struct vm_impl* vm_impl, proc_struct* p, __code next(...)) { //:skip
+    uint val;
+
+    pushcli();
+
+    if (p->pgdir == 0) {
+        panic("switchuvm: no pgdir");
+    }
+
+    val = (uint) V2P(p->pgdir) | 0x00;
+
+    asm("MCR p15, 0, %[v], c2, c0, 0": :[v]"r" (val):);
+    flush_tlb();
+
+    popcli();
+
+    goto next(...);
+}
+
+__code init_inituvm_check_sz(struct vm_impl* vm_impl, pde_t* pgdir, char* init, uint sz, __code next(...)) {
+    char* mem;
+
+    if (sz >= PTE_SZ) {
+        // goto panic;
+        // panic("inituvm: more than a page");
+    }
+
+    mem = alloc_page();
+    memset(mem, 0, PTE_SZ);
+    mappages(pgdir, 0, PTE_SZ, v2p(mem), AP_KU);
+    memmove(mem, init, sz);
+
+    goto next(...);
+}
--- a/src/interface/vm.h	Thu Jan 23 20:33:39 2020 +0900
+++ b/src/interface/vm.h	Fri Jan 24 21:19:48 2020 +0900
@@ -28,6 +28,6 @@
     __code copyuvm(Impl* vm, pde_t* pgdir, uint sz, __code next(...));
     __code uva2ka(Impl* vm, pde_t* pgdir, char* uva, __code next(...));
     __code copyout(Impl* vm, pde_t* pgdir, uint va, void* pp, uint len, __code next(...));
-    __code pagind_int(Impl* vm, uint phy_low, uint phy_hi, __code next(...));
+    __code paging_int(Impl* vm, uint phy_low, uint phy_hi, __code next(...));
     __code next(...);
 } vm;