view x86_64/ready_kernel.c @ 10:28550dbb2579

add new files
author taiki
date Tue, 26 Mar 2013 18:15:35 +0900
parents
children 6eb39a8c991f
line wrap: on
line source

#include <efi.h>
#include <efilib.h>

#include "elilo.h"
#include "pgtable_flags.h"
#include "sysdeps.h"
#include "registers.h"

#define ALIGN_4K 12 /* use 4KB aligned */

#define MEMCPY(to, from, cnt) { \
        UINT8 *t = (UINT8 *)(to); \
        UINT8 *f = (UINT8 *)(from); \
        UINTN n = cnt; \
        if (t && f && n && (t<f)) { \
                    while (n--) { \
                                    *t++ = *f++; \
                                } \
                } else if (t && f && n && (t>f)) { \
                            t += n; \
                            f += n; \
                            while (n--) { \
                                            *t-- = *f--; \
                                        } \
                        } \
}

#define MEMSET(ptr, size, val) { \
        UINT8 *p = (UINT8 *)(ptr); \
        UINTN n = (UINTN)(size); \
        UINT8 v = (UINT8)(val); \
        if (p && n) { \
                    while (n--) { \
                                    *p++ = v; \
                                } \
                } \
}

/*
VOID
enable_efer_flags()
{
    
    asm volatile ("movl %0, %%ecx \n\t rdmsr \n\t btsl %1, %%eax\n\t wrmsr " :: "r"(msr_efer) ,"r"(efer_flg));
}

disable_efer_flags()
{
    asm volatile ("movl %0, %%ecx \n\t rdmsr \n\t btsl %1, %%eax\n\t wrmsr " :: "r"(msr_efer) ,"r"(efer_flg));
}
*/


extern pml4_t *pml4;
extern pdpte_t *pdpte;

VOID 
cpuid(UINTN *eax, UINTN *ebx, UINTN *ecx, UINTN *edx)
{
    asm volatile ("cpuid" : "=a"(*eax), "=b"(*ebx), "=c"(*ecx), "=d"(*edx)
            : "0" (*eax), "2" (*ecx) : "memory");
}

INTN
enable_cr4_pae()
{   
    cr4_t cr4;
    MEMSET(&cr4, sizeof(UINT64), 0);

    asm volatile("mov %%cr4,%0\n\t" : "=r" (cr4));

    cr4.pae = ENABLE;
    cr4.pcide = DISABLE;

    asm volatile("mov %0,%%cr4": : "r" (cr4));
    //asm volatile("movq %%rax, %%cr4"::"a"(cr4_flag));
    return 0;
}

UINTN
insert_addr_to_cr3(UINT64 addr)
{
    cr3_t cr3;
    MEMSET(&cr3, sizeof(UINT64), 0);

    // asm volatile ("movq %0, %%rax \n\tmovq %%rax, %%cr3" :: "m"(addr) );

    /* write cr3 */
    Print(L"Read cr3.\n");
    asm volatile("mov %%cr3,%0\n\t" : "=r" (cr3));
    Print(L"Getting cr3 is %lx pwt:%d, pcd:%d pdb 0x%lx\n addr:%lx \n", cr3, cr3.pwt, cr3.pcd, cr3.pdb, addr);
    Print(L"%lx\n", cr3);
    //addr = addr >> ALIGN_4K;
    //cr3.pdb = addr;
    Print(L"Write addr:%lx to cr3 / cr3.pdb: %lx.\n", addr, cr3.pdb);
    asm volatile("mov %0,%%cr3": : "r" (cr3));
    Print(L"Written cr3.\n");
    return 0;
}

INTN
disable_paging_cr0()
{   
    cr0_t cr0;
    MEMSET(&cr0, sizeof(UINT64), 0);

    asm volatile("mov %%cr0,%0\n\t" : "=r" (cr0));

    cr0.pg = DISABLE;

    asm volatile("mov %0,%%cr0": : "r" (cr0));
    // asm volatile("movl %0, %%eax \n\t movq %%rax, %%cr0"::"m"(cr0_flag));
    return 0;
}

INTN
enable_paging_cr0()
{   
    cr0_t cr0;
    MEMSET(&cr0, sizeof(UINT64), 0);

    asm volatile("mov %%cr0,%0\n\t" : "=r" (cr0));
    Print(L"Register cr0 : %lx \n", cr0);
    cr0.pg = ENABLE;
    asm volatile("mov %0,%%cr0": : "r" (cr0));
    // asm volatile("movl %0, %%eax \n\t movq %%rax, %%cr0"::"m"(cr0_flag));
    return 0;
}

VOID
init_pgtable_value(VOID *addr, UINT32 size, UINT64 value)
{
    if (addr == NULL) {
        Print(L"addr is not using.\n");
        return;
    }

    UINT64 *tmp = (UINT64 *)addr;

    while(size--) {
        *tmp++ = value;
    }
}

#define PML4_START 0x00101000
#define PDPTE_START PML4_START + PML4_SIZE

/* alloc pages use how many pages for 4KiB */
#define PGCNT_BYTE 4096

/* PDPTE indicate original page is 1GB */
#define ORIG_PAGE_SIZE 1048576 


/* init_pgtable()
 * init pagetable. use IA-32e page table 
 * This function initialize PML4 and PDPTE 
 */
UINT64
init_pgtable()
{
    pml4 = (pml4_t *)PML4_START;
    UINTN pml4_size = PML4_SIZE * sizeof(pml4_t) / PGCNT_BYTE;
    Print(L"allocate pml4 ::%lx", pml4);
    pml4 = (pml4_t *)alloc_pages(pml4_size, EfiLoaderData, AllocateAddress, pml4);
    if (pml4 == NULL) {
        Print(L"can not allocate pml4.\n");
        return -1;
    }

    pdpte = (pdpte_t *)PDPTE_START;
    Print(L"allocate pdpte ::%lx", pdpte);
    UINTN pdpte_size = PDPTE_SIZE * PML4_SIZE * sizeof(pdpte_t) / PGCNT_BYTE;
    pdpte = (pdpte_t *)alloc_pages(pdpte_size , EfiLoaderData, AllocateAddress, pdpte);
    Print(L"pdpte :%lx", pdpte);

    if (pdpte == NULL) {
        Print(L"can not allocate pdpte.\n");
        return -1;
    }

    init_pgtable_value((VOID *)pml4, PML4_SIZE * sizeof(pml4_t), 0);
    init_pgtable_value((VOID *)pdpte, PDPTE_SIZE * PML4_SIZE * sizeof(pdpte_t), 0);

    UINT64 orig_addr_start = PDPTE_START + (PDPTE_SIZE * PML4_SIZE * sizeof(UINT64));
    UINTN i = 0;
    for (; i<PML4_SIZE ;i++) {
        UINT64 tmp_pdpte_addr = (UINT64)&pdpte[PDPTE_SIZE * i];
        tmp_pdpte_addr = tmp_pdpte_addr >> ALIGN_4K;
        pml4[i].paddr = tmp_pdpte_addr;
        pml4[i].p = ENABLE;
        UINTN j = 0;
        for (;j < PDPTE_SIZE; j++) {
            pdpte[(PDPTE_SIZE * i) + j].p = ENABLE;
            pdpte[(PDPTE_SIZE * i) + j].ps = ENABLE;
            pdpte[(PDPTE_SIZE * i) + j].paddr = orig_addr_start + (PDPTE_SIZE * i + j * ORIG_PAGE_SIZE) * sizeof(UINT64);
        }
    }

    return (UINT64)pml4;
}

VOID 
stop_kernel()
{
    Print(L"stop\n");
    while(1) {
    }
}

typedef struct _estatus {
    unsigned long long estart;
    unsigned long esize;
    unsigned int etype;
    int merge;
} estatus_t;

enum MAP_DATA {
    MAP_MAX = 128
};


static VOID
fill_memory_map(struct e820entry *map, 
        UINTN *nr_map,
        UINT64 start,
        UINT64 size,
        UINT64 type,
        estatus_t *est)
{
    UINTN x = *nr_map;
    if ((x > 0) &&
            (map[x-1].addr + map[x-1].size == start) &&
            (map[x-1].type == type)) {
        est->estart = map[x-1].addr;
        est->esize = map[x-1].size;
        est->etype = map[x-1].type;
        est->merge++; 
        return;
    }

    if (x<MAP_MAX) {
        map[x].addr = start;
        map[x].size = size;
        map[x].type = type;
        (*nr_map)++;
        est->merge=0;
        return;
    }

    if ((est->etype != type) || (est->estart + est->esize) != start) {
        est->merge = 0;
        est->estart = start;
        est->esize = size;
        est->etype = type;
    }

    est->estart += est->esize;
    est->esize += size;
    est->merge++;
    
    return;
}

VOID
set_memory_map(mmap_desc_t *mmapd)
{
    UINTN nr_map;
    UINT64 start, end, size, type;
    nr_map = mmapd->map_size/mmapd->desc_size;
    estatus_t *est;
    struct e820entry *map;

    UINTN i;
    for (i=0; i <nr_map ;i++) {
        EFI_MEMORY_DESCRIPTOR p = mmapd->md[i];
        switch (p.Type) {
            case EfiReservedMemoryType:
                Print(L"| reserved memory.");
                break;
            case EfiLoaderCode:
                Print(L"| loader code.");
                break;
            case EfiLoaderData:
                Print(L"| loader data.");
                break;
            case EfiBootServicesCode:
                Print(L"| boot services code.");
                break;
            case EfiBootServicesData:
                Print(L"| boot services data.");
                break;
            case EfiConventionalMemory:
                Print(L"| conventional memory.");
                break;
            case EfiUnusableMemory:
                Print(L"| unusable memory.");
                break;
            case EfiACPIReclaimMemory:
                Print(L"| ACPI reclaim memory.");
                break;
            case EfiACPIMemoryNVS:
                Print(L"| ACPI memory NVS.");
                break;
            case EfiMemoryMappedIO:
                Print(L"| memory mapped IO.");
                break;
            case EfiMemoryMappedIOPortSpace:
                Print(L"| memory mapped IO port space.");
                break;
            case EfiPalCode:
                Print(L"| pal code. ");
                break;
            case EfiMaxMemoryType:
                Print(L"| max memory type.");
                break;
        }
    }
    return;
}


EFI_STATUS
start_elilo_kernel(EFI_HANDLE image)
{
    Print(L"Start original ELILO kernel.\n");
    close_devices();

    /*
    mmap_desc_t mmapd;
    get_memmap(&mmapd);
    set_memory_map(&mmapd);
    */

    UINTN map_size, cookie, size, version;
    map_size = EFI_PAGE_SIZE * 2;
    EFI_MEMORY_DESCRIPTOR *md;

    EFI_STATUS status = uefi_call_wrapper(BS->AllocatePool, 3, EfiLoaderData, map_size, &md);
    if (EFI_ERROR(status)) {
        Print(L"error 'allocate pool' %r \n", status);
    }

    status = uefi_call_wrapper(BS->GetMemoryMap, 5, &map_size, md, &cookie, &size, &version);
    if (EFI_ERROR(status)) {
        Print(L"error 'get memory map' %r \n", status);
    }

    status = uefi_call_wrapper(BS->ExitBootServices, 2, image, cookie);
    if (EFI_ERROR(status)) {
        Print(L"error 'exit boot services' %r \n", status);
    }

    asm volatile ("cli"::);

    MEMSET(gdt_addr.base, gdt_addr.limit, 0);
    MEMCPY(gdt_addr.base, init_gdt, sizeof_init_gdt);

    asm volatile ( "lidt %0" : : "m" (idt_addr) );
    asm volatile ( "lgdt %0" : : "m" (gdt_addr) );

    UINTN eax = 0, ebx = 0, ecx = 0, edx = 0;
    eax = 0x80000008;
    cpuid(&eax, &ebx, &ecx, &edx);    
    Print(L"eax %x\n", eax);
    eax &= PHYADDR_WIDTH;
    Print(L"use pagetable wise %d\n", eax);

    Print(L"disable cr0...\n");
    disable_paging_cr0();

    Print(L"init pagetable...\n");
    init_pgtable();

    UINT64 addr = PML4_START;
    Print(L"insert addr %lx to cr3...\n", addr);
    insert_addr_to_cr3(addr);

    Print(L"enable paging cr0...\n");
    enable_cr4_pae(); 
    enable_paging_cr0();

    Print(L"finish to initialize...\n");

    asm volatile ("hlt" : : );

    Print(L"finish internal kernel\n");
    return EFI_SUCCESS;
}