First steps of new VMM.

master
q3k 2013-09-01 23:06:00 +02:00
parent c45c4a5f6b
commit 4958d0b6f1
15 changed files with 293 additions and 42 deletions

View File

@ -0,0 +1,65 @@
#ifndef __ELF_H__
#define __ELF_H__
#include "types.h"
#define ELF_IDENT_MAGIC 0x464c457f // \x7FELF
typedef struct elf_ident {
u32 Magic; // \x7FELF
u8 Class;
u8 Data;
u8 Version;
u8 Padding[9];
} __attribute__((packed)) _elf_ident;
typedef struct {
_elf_ident Identification;
u16 Type;
u16 Machine;
u32 Version;
u64 Entry;
u64 ProgramHeaderOffset;
u64 SectionHeaderOffset;
u32 Flags;
u16 HeaderSize;
u16 ProgramHeaderEntrySize;
u16 NumProgramHeaderEntries;
u16 SectionHeaderEntrySize;
u16 NumSectionHeaderEntries;
u16 SectionEntryStrings;
} __attribute__((packed)) TELFHeader;
#define SHT_PROGBITS 1
#define SHT_NOBITS 8
typedef struct {
u32 Name;
u32 Type;
u64 Flags;
u64 Address;
u64 Offset;
u64 Size;
u32 Link;
u32 Info;
u64 Alignment;
u64 FixedSize;
} __attribute__((packed)) TELFSectionHeader;
typedef struct {
void *Address;
u64 Size;
TELFHeader *Header;
u64 SectionCount;
TELFSectionHeader *Sections;
} TELF;
u8 elf_open(TELF *elf, void *Address, u64 Size);
s8* elf_section_get_name(TELF *elf, u64 Section);
u64 elf_section_get_physical_address(TELF *elf, u64 Section);
u64 elf_section_get_virtual_address(TELF *elf, u64 Section);
u64 elf_section_get_size(TELF *elf, u64 Section);
// Returns whether this section has actual bits to load (is data or text)
u64 elf_section_has_bits(TELF *elf, u64 SectionID);
#endif

View File

@ -5,8 +5,8 @@
#include "load_context.h"
// Some helpful macros
#define PAGING_GET_ML4_INDEX(x) (((u64)x >> 39) & 0x1FF)
#define PAGING_GET_DPT_INDEX(x) (((u64)x >> 30) & 0x1FF)
#define PAGING_GET_PML4_INDEX(x) (((u64)x >> 39) & 0x1FF)
#define PAGING_GET_PDP_INDEX(x) (((u64)x >> 30) & 0x1FF)
#define PAGING_GET_DIR_INDEX(x) (((u64)x >> 21) & 0x1FF)
#define PAGING_GET_TAB_INDEX(x) (((u64)x >> 12) & 0x1FF)
#define PAGING_GET_PAGE_OFFSET(x) (x & 0xFFF)
@ -34,7 +34,7 @@ struct S_PAGING_DIR_ENTRY {
} __attribute__((packed));
typedef struct S_PAGING_DIR_ENTRY T_PAGING_DIR_ENTRY;
struct S_PAGING_DPT_ENTRY {
struct S_PAGING_PDP_ENTRY {
u8 Present : 1;
u8 RW : 1;
u8 User : 1;
@ -43,7 +43,7 @@ struct S_PAGING_DPT_ENTRY {
u64 Physical : 40; // The physical address is limited by MAXPHYADDR
u64 Zero : 12;
} __attribute__((packed));
typedef struct S_PAGING_DPT_ENTRY T_PAGING_DPT_ENTRY;
typedef struct S_PAGING_PDP_ENTRY T_PAGING_PDP_ENTRY;
struct S_PAGING_ML4_ENTRY {
u8 Present : 1;
@ -67,14 +67,22 @@ typedef struct {
} __attribute__((packed)) T_PAGING_DIR;
typedef struct {
T_PAGING_DPT_ENTRY Entries[512]; // For use by the CPU
} __attribute__((packed)) T_PAGING_DPT;
T_PAGING_PDP_ENTRY Entries[512]; // For use by the CPU
} __attribute__((packed)) T_PAGING_PDP;
typedef struct {
T_PAGING_ML4_ENTRY Entries[512]; // For use by the CPU
} __attribute__((packed)) T_PAGING_ML4;
// Generic funcitons
T_PAGING_ML4 * paging_get_ml4(void);
void paging_set_ml4(u64 ML4Physical);
// Management of kernel paging structures
void paging_kernel_init(void);
// Map a 4k page from Physical to Virtual. AccessBits is undefined right now.
void paging_map_page(u64 Virtual, u64 Physical, void *AccessBits);
// Map an arbitrary Size range from Physical to Virtual. Must still be kinda aligned.
void paging_map_area(u64 PhysicalStart, u64 VirtualStart, u64 Size, void *AccessBits);
#endif

View File

@ -7,7 +7,11 @@
// Page frame allocation
void physmem_init(void);
// This retrns a frame number, not an address!
u64 physmem_allocate_page(void);
// But this returns an address:
u64 physmem_allocate_physical(void);
u64 physmem_page_to_physical(u64 Page);
u64 physmem_physical_to_page(u64 Physical);

View File

@ -2,6 +2,7 @@
#define __SYSTEM_H__
#include "load_context.h"
#include "Tier0/elf.h"
#define SYSTEM_KERNEL_VIRTUAL 0xFFFFFFFF80000000
@ -108,10 +109,7 @@ typedef struct {
T_SYSTEM_INVALID_RAM InvalidMemoryAreas[256];
u8 NumInvalidAreas;
// kernel code size and location
u64 KernelSize;
u64 KernelPhysicalStart;
u64 KernelVirtualStart;
TELF KernelELF;
} T_SYSTEM_INFO;
u64 system_cpuid(u32 Code);

View File

@ -25,6 +25,7 @@ struct S_LOAD_CONTEXT {
// Kernel ELF
void *KernelELF;
u64 KernelELFSize;
} __attribute__((packed));
typedef struct S_LOAD_CONTEXT T_LOAD_CONTEXT;

View File

@ -13,7 +13,8 @@ void cpp_call_ctors(void)
kprintf("[i] Calling %i constructors before jumping to Tier1..\n", Number);
for(u64 *C = (u64*)&_start_ctors; C < (u64*)&_end_ctors; ++C)
{
((void (*) (void)) (*C)) ();
kprintf("should've called %x\n", *C);
// ((void (*) (void)) (*C)) ();
}
}

57
Kernel/src/Tier0/elf.c Normal file
View File

@ -0,0 +1,57 @@
#include "Tier0/elf.h"
#include "Tier0/kstdio.h"
u8 elf_open(TELF *elf, void *Address, u64 Size)
{
elf->Address = Address;
elf->Size = Size;
TELFHeader *Header = (TELFHeader *)Address;
if (Header->Identification.Magic != ELF_IDENT_MAGIC)
{
kprintf("ELF: bad magic (0x%x)\n", Header->Identification.Magic);
return 1;
}
elf->Header = Header;
elf->Sections = (TELFSectionHeader *)((u64)Header + (u64)Header->SectionHeaderOffset);
elf->SectionCount = Header->NumSectionHeaderEntries;
if (Header->SectionHeaderOffset + elf->SectionCount * sizeof(TELFSectionHeader) > elf->Size)
{
kprintf("ELF: invalid section count/offset.\n");
return 1;
}
return 0;
}
s8* elf_section_get_name(TELF *elf, u64 SectionID)
{
u64 StringSectionID = elf->Header->SectionEntryStrings;
TELFSectionHeader* StringSection = &elf->Sections[StringSectionID];
TELFSectionHeader* RequestedSection = &elf->Sections[SectionID];
return (s8*)elf->Header + StringSection->Offset + RequestedSection->Name;
}
u64 elf_section_get_physical_address(TELF *elf, u64 SectionID)
{
TELFSectionHeader* RequestedSection = &elf->Sections[SectionID];
return (u64)elf->Header + RequestedSection->Offset;
}
u64 elf_section_get_virtual_address(TELF *elf, u64 SectionID)
{
TELFSectionHeader* RequestedSection = &elf->Sections[SectionID];
return (u64)RequestedSection->Address;
}
u64 elf_section_get_size(TELF *elf, u64 SectionID)
{
TELFSectionHeader* RequestedSection = &elf->Sections[SectionID];
return (u64)RequestedSection->Size;
}
u64 elf_section_has_bits(TELF *elf, u64 SectionID)
{
TELFSectionHeader* RequestedSection = &elf->Sections[SectionID];
return !(RequestedSection->Type & SHT_NOBITS);
}

View File

@ -55,10 +55,16 @@ void kmain(u32 LoadContextAddress)
if (!CPUID_HAS(APIC))
PANIC("CPU doesn't support APIC!");
interrupts_init_simple();
exceptions_init_simple();
system_parse_load_context(LoadContext);
kprintf("[i] Booting via %s.\n", LoadContext->LoaderName);
kprintf("[i] Memory available: %uk.\n", system_get_memory_upper());
physmem_init();
paging_kernel_init();
for(;;){}
// // Let's create a new kernel stack
// u64 StackVirtual = (u64)paging_scratch_allocate();
@ -80,20 +86,18 @@ void kmain(u32 LoadContextAddress)
if (RSDPAddress == 0)
PANIC("ACPI not supported! What is this, 1999?");
//smp_initialize();
interrupts_init_simple();
exceptions_init_simple();
apic_enable_lapic();
smp_initialize();
// apic_enable_lapic();
heap_init_simple();
// enable FPU/SSE...
__asm__ volatile(
"movq %cr0, %rax;"
"and $0xfffb, %ax;"
"or $0x2, %rax;"
"movq %rax, %cr0;"
"movq %cr4, %rax;"
"orq $0x600, %rax;"
"movq %rax, %cr4;");
// // enable FPU/SSE...
// __asm__ volatile(
// "movq %cr0, %rax;"
// "and $0xfffb, %ax;"
// "or $0x2, %rax;"
// "movq %rax, %cr0;"
// "movq %cr4, %rax;"
// "orq $0x600, %rax;"
// "movq %rax, %cr4;");
cpp_call_ctors();
cpp_start_ckernel();

View File

@ -4,13 +4,9 @@
#include "Tier0/panic.h"
#include "Tier0/system.h"
#include "Tier0/physmem.h"
#include "Tier0/elf.h"
#include "types.h"
struct {
T_PAGING_TAB_ENTRY *TempPage; // For temp page mapping.
u64 TempPageVirtual;
} g_KernelPaging;
T_PAGING_ML4 *paging_get_ml4(void)
{
u64 Address;
@ -21,4 +17,94 @@ T_PAGING_ML4 *paging_get_ml4(void)
void paging_set_ml4(u64 ML4Physical)
{
__asm volatile ( "mov %%rax, %%cr3\n" :: "a" (ML4Physical));
}
void _zero_paging_structure(void *Structure)
{
for (unsigned i = 0; i < 512; i++)
((u64 *)Structure)[i] = 0;
}
struct {
T_PAGING_ML4 *ML4;
} g_KernelPaging;
// allocate a page frame and make sure that it is accessible before
// our main paging is running - check whether it fits the extended
// memory area (up to 0xEFFFFF), as this is the area the bootloader
// identity mapped up to from 0x0.
void *_early_alloc(void)
{
u64 Address = physmem_allocate_physical();
ASSERT(Address < 0x00EFFFFF - 0x1000);
return (void*)Address;
}
// AccessBits is reserved for future use
void paging_map_page(u64 Virtual, u64 Physical, void *AccessBits)
{
if (Virtual % 0x1000 || Physical % 0x1000)
PANIC("BUG: Requsted allocation of unaligned address.\n");
u64 PML4I = PAGING_GET_PML4_INDEX(Virtual);
u64 PDPI = PAGING_GET_PDP_INDEX(Virtual);
u64 DIRI = PAGING_GET_DIR_INDEX(Virtual);
u64 TABI = PAGING_GET_TAB_INDEX(Virtual);
T_PAGING_ML4_ENTRY *ML4E = &g_KernelPaging.ML4->Entries[PML4I];
T_PAGING_PDP *PDP = (T_PAGING_PDP *)(ML4E->Physical << 12);
if (!ML4E->Present)
{
PDP = (T_PAGING_PDP *)_early_alloc();
_zero_paging_structure(PDP);
ML4E->Present = 1;
ML4E->Physical = (u64)PDP >> 12;
}
T_PAGING_PDP_ENTRY *PDPE = &PDP->Entries[PDPI];
T_PAGING_DIR *Dir = (T_PAGING_DIR *)(PDPE->Physical << 12);
if (!PDPE->Present)
{
Dir = (T_PAGING_DIR *)_early_alloc();
_zero_paging_structure(Dir);
PDPE->Present = 1;
PDPE->Physical = (u64)Dir >> 12;
}
T_PAGING_DIR_ENTRY *DIRE = &Dir->Entries[DIRI];
T_PAGING_TAB *Tab = (T_PAGING_TAB *)(DIRE->Physical << 12);
if (!DIRE->Present)
{
Tab = (T_PAGING_TAB *)_early_alloc();
_zero_paging_structure(Tab);
DIRE->Present = 1;
DIRE->Physical = (u64)Tab >> 12;
}
T_PAGING_TAB_ENTRY *TABE = &Tab->Entries[TABI];
TABE->Physical = Physical >> 12;
TABE->Present = 1;
}
void paging_map_area(u64 PhysicalStart, u64 VirtualStart, u64 Size, void *AccessBits)
{
if (VirtualStart % 0x1000 || PhysicalStart % 0x1000)
PANIC("BUG: Requsted allocation of unaligned address.\n");
u64 AlignedSize = Size;
if (AlignedSize % 0x1000)
AlignedSize = (AlignedSize + 0x1000) & 0xFFFFF000;
for (u64 i = 0; i < AlignedSize; i += 0x1000)
paging_map_page(VirtualStart + i, PhysicalStart + i, AccessBits);
}
void paging_kernel_init(void)
{
g_KernelPaging.ML4 = _early_alloc();
// Identity map all the BIOS EMM (extended memory). This covers a lot of
// classic PC I/O mapped stuff (eg. video RAM) and probably our kernel and
// loader artifacts.
paging_map_area(0x0, 0x0, 0x00EFFFFF, 0);
}

View File

@ -47,7 +47,6 @@ u64 __physmem_allocate_first_page(void)
if (NextPageStart > g_PhysicalMemory.MemorySize)
PANIC("Out of memory!");
}
kprintf("%x\n", NextPageStart);
return NextPageStart;
}
@ -116,6 +115,11 @@ u64 physmem_allocate_page(void)
return 0;
}
u64 physmem_allocate_physical(void)
{
return (physmem_allocate_page() * PHYSMEM_PAGE_SIZE);
}
void physmem_free_page(u64 Page)
{
if (Page > PHYSMEM_METADATA_COVERS_BITS)

View File

@ -111,7 +111,7 @@ void system_parse_load_context(T_LOAD_CONTEXT *LoadContext)
BIOSArea->Size = 1024 *1024;
g_SystemInfo.NumInvalidAreas++;
// Mark the loader physical location ad unavailable.
// Mark the loader physical location as unavailable.
T_SYSTEM_INVALID_RAM *LoaderArea = &g_SystemInfo.InvalidMemoryAreas[g_SystemInfo.NumInvalidAreas];
LoaderArea->Base = LoadContext->ReservedPhysicalStart;
LoaderArea->Size = LoadContext->ReservedPhysicalEnd - LoadContext->ReservedPhysicalStart;
@ -128,6 +128,31 @@ void system_parse_load_context(T_LOAD_CONTEXT *LoadContext)
LAPICArea->Base = 0xFEE00000;
LAPICArea->Size = 0xFEEFFFFF - 0xFEE00000;
g_SystemInfo.NumInvalidAreas++;
// Parse the kernel ELF
TELF *ELF = &g_SystemInfo.KernelELF;
if (elf_open(ELF, LoadContext->KernelELF, LoadContext->KernelELFSize))
PANIC("Error parsing kernel ELF file.\n");
else
{
kprintf("[i] Kernel has the following sections:\n");
for (u32 i = 0; i < ELF->SectionCount; i++)
{
u64 Virtual = elf_section_get_virtual_address(ELF, i);
u64 HasBits = elf_section_has_bits(ELF, i);
if (Virtual && HasBits)
kprintf(" - %s (virt: 0x%X, phys: 0x%x, %i bytes)\n",
elf_section_get_name(ELF, i),
Virtual,
elf_section_get_physical_address(ELF, i),
elf_section_get_size(ELF, i));
else if (Virtual)
kprintf(" - %s (virt: 0x%X, (zero'd bits), %i bytes)\n",
elf_section_get_name(ELF, i),
Virtual,
elf_section_get_size(ELF, i));
}
}
kprintf("[i] Highest unavailable address is %x.\n", HighestUnavailable);
g_SystemInfo.MemoryTop = HighestUnavailable;
@ -197,16 +222,6 @@ void system_msr_set(u32 MSR, u64 Data)
__asm__ volatile("wrmsr" :: "a"((u32)(Data & 0xFFFFFFFF)), "d"((u32)(Data >> 32)), "c"(MSR));
}
u64 system_get_kernel_size(void)
{
return g_SystemInfo.KernelSize;
}
u64 system_get_kernel_virtual_start(void)
{
return g_SystemInfo.KernelVirtualStart;
}
u64 system_get_memory_top(void)
{
return g_SystemInfo.MemoryTop;

View File

@ -36,7 +36,7 @@ CLogger &CKernel::Logger(void)
void CKernel::Start(void)
{
kprintf("[i] Hello from C++ land!\n");
for (;;) {}
if (m_dwMagic != CKERNEL_MAGIC)
{
kprintf("[e] Error! My constructor wasn't called properly.\n");

View File

@ -1,26 +1,31 @@
#include "types.h"
extern "C" {
#include "Tier0/heap.h"
#include "Tier0/panic.h"
};
typedef long unsigned int size_t;
void *operator new(size_t size)
{
PANIC("nonew4u");
return kmalloc(size);
}
void *operator new[](size_t size)
{
PANIC("nonew4u");
return kmalloc(size);
}
void operator delete(void *p)
{
PANIC("nodel4u");
kfree(p);
}
void operator delete[](void *p)
{
PANIC("nodel4u");
kfree(p);
}

View File

@ -19,6 +19,7 @@ struct S_LOAD_CONTEXT {
// Kernel ELF
u64 KernelELF;
u64 KernelELFSize;
} __attribute__((packed));
typedef struct S_LOAD_CONTEXT T_LOAD_CONTEXT;

View File

@ -181,6 +181,8 @@ u32 load(void *Multiboot, unsigned int Magic)
g_Context.VGATextModeUsed = 1;
g_Context.MultibootUsed = 1;
g_Context.MultibootHeader = (u32)Multiboot;
g_Context.KernelELF = (u64)KernelStart;
g_Context.KernelELFSize = (u64)(KernelEnd - KernelStart);
printf("Load context at 0x%x\n", (u64)&g_Context);