view src/impl/vm_impl_private.cbc @ 213:f4effd36aefc

kpt_alloc
author tobaru
date Thu, 23 Jan 2020 21:24:30 +0900
parents 6e03cee9733e
children 2ecf1e09e981
line wrap: on
line source

#include "../../context.h"
#include "mmu.h"
#include "memlayout.h"
#interface "vm_impl.h"

/*
vm_impl* createvm_impl2();
*/

__code loaduvm_ptesize_checkvm_impl(struct vm_impl* vm_impl, __code next(...)) {
    char* addr = vm_impl->addr;

    if ((uint) addr %PTE_SZ != 0) {
       // goto panic 
    }

    goto loaduvm_loopvm_impl(vm_impl, next(...));
}

__code loaduvm_loopvm_impl(struct vm_impl* vm_impl, __code next(...)) {
    uint i = vm_impl->i;
    uint sz = vm_impl->sz;

    if (i < sz) {
        goto loaduvm_check_pgdir(vm_impl, next(...));  
    } 

    goto loaduvm_exit(vm_impl, next(...));
}


static pte_t* walkpgdir (pde_t *pgdir, const void *va, int alloc)
{
    pde_t *pde;
    pte_t *pgtab;

    // pgdir points to the page directory, get the page direcotry entry (pde)
    pde = &pgdir[PDE_IDX(va)];

    if (*pde & PE_TYPES) {
        pgtab = (pte_t*) p2v(PT_ADDR(*pde));

    } else {
        if (!alloc || (pgtab = (pte_t*) kpt_alloc()) == 0) {
            return 0;
        }

        // Make sure all those PTE_P bits are zero.
        memset(pgtab, 0, PT_SZ);

        // The permissions here are overly generous, but they can
        // be further restricted by the permissions in the page table
        // entries, if necessary.
        *pde = v2p(pgtab) | UPDE_TYPE;
    }

    return &pgtab[PTE_IDX(va)];
}


__code loaduvm_check_pgdir(struct vm_impl* vm_impl, __code next(...)) {
    pte_t* pte = vm_impl->pte;
    pde_t* pgdir = vm_impl->pgdir;
    uint i = vm_impl->i;
    char* addr = vm_impl->addr;
    uint pa = vm_impl->pa;

    if ((pte = walkpgdir(pgdir, addr + i, 0)) == 0) {
        // goto panic
    } 
    pa = PTE_ADDR(*pte);

    vm_impl->pte = pte; 
    vm_impl->pgdir = pgdir; 
    vm_impl->addr = addr; 
    vm_impl->pa = pa; 

    goto loaduvm_check_PTE_SZ(vm_impl, next(...));
}

__code loaduvm_check_PTE_SZ(struct vm_impl* vm_impl, __code next(...)) {
    uint sz = vm_impl->sz;
    uint i = vm_impl->i;
    uint n = vm_impl->n;
    struct inode* ip = vm_impl->ip;
    uint pa = vm_impl->pa;
    uint offset = vm_impl->offset;
    
    if (sz - i < PTE_SZ) {
        n = sz - i;
    } else {
        n = PTE_SZ;
    }

    if (readi(ip, p2v(pa), offset + i, n) != n) {
        // panic 
        // return -1;
    }

    vm_impl->n = n;
 
    goto loaduvm_exit(vm_impl, next(...));
}

__code loaduvm_exit(struct vm_impl* vm_impl, __code next(...)) {

    goto next(...);
}

struct run {
    struct run *next;
};

struct {
    struct spinlock lock;
    struct run* freelist;
} kpt_mem;

__code kpt_alloc_check_impl(struct vm_impl* vm_impl, __code next(...)) { 
    struct run* r;    
    if ((r = kpt_mem.freelist) != NULL ) {
        kpt_mem.freelist = r->next;
    }
    release(&kpt_mem.lock);

    if ((r == NULL) && ((r = kmalloc (PT_ORDER)) == NULL)) {
        // panic("oom: kpt_alloc");
        // goto panic
    }

    memset(r, 0, PT_SZ);
    goto next((char*)r);
}