Initial commit

This commit is contained in:
2021-10-10 14:39:17 +08:00
commit d25da95e1e
135 changed files with 19184 additions and 0 deletions

41
memory/liballoc_impl.c Normal file
View File

@ -0,0 +1,41 @@
#include "memory.h"
#include "paging_internal.h"
#include "../runtime/stdio.h"
#include "../interrupt/interrupt.h"
#include <stdint.h>
#include "../extlib/liballoc/liballoc_1_1.h"
int liballoc_lock() {
if (interrupt_Enabled)
asm volatile("cli");
return 0;
}
int liballoc_unlock() {
if (interrupt_Enabled)
asm volatile("sti");
return 0;
}
static uint64_t heapBreak = KERNEL_HEAP_VIRTUAL;
void *liballoc_alloc(size_t pages) {
void *ret = (void *)heapBreak;
heapBreak += SYSTEM_PAGE_SIZE * pages;
paging_map_PageAllocated((uint64_t)ret, pages, MAP_PROT_READ | MAP_PROT_WRITE);
io_Printf("liballoc_alloc: allocated %u pages at HEAP+%llx (%llx)\n", pages, ret - KERNEL_HEAP_VIRTUAL, ret);
return ret;
}
int liballoc_free(void *ptr, size_t pages) {
paging_map_FreeAllocated((uint64_t)ptr, (uint64_t)ptr + SYSTEM_PAGE_SIZE * pages);
io_Printf("liballoc_free: freed %u pages at HEAP+%llx (%llx)\n", pages, ptr - KERNEL_HEAP_VIRTUAL, ptr);
return 0;
}

40
memory/memory.c Normal file
View File

@ -0,0 +1,40 @@
#include "../main.h"
#include "memory.h"
#include "../runtime/stdio.h"
#include "../extlib/liballoc/liballoc_1_1.h"
#include <string.h>
void *efiMallocTyped(size_t size, EFI_MEMORY_TYPE type) {
void *data;
efiBootServices->AllocatePool(type, size, &data);
memset(data, 0, size);
return data;
}
void *efiMalloc(size_t size) {
return efiMallocTyped(size, EfiLoaderData);
}
void efiFree(void *data) {
efiBootServices->FreePool(data);
}
void *kMalloc(size_t size) {
void *mem = liballoc_kmalloc(size);
io_Printf("kMalloc: size=%llu, pos=0x%llx\n", size, mem);
return mem;
}
void kFree(void *data) {
io_Printf("kFree: 0x%llx\n", data);
liballoc_kfree(data);
}
void *kMemoryMap(void *desiredVirtual, int pageCount, int protectionFlags, int flags, int fd) {
}
void kMemoryUnmap(void *pageStart, int pageCount) {
}

100
memory/memory.h Normal file
View File

@ -0,0 +1,100 @@
#pragma once
#include "../main.h"
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
#define KERNEL_CODE_VIRTUAL 0xFFFFFFFFC0000000ull // 2^64 - 1GiB
#define KERNEL_STACK_END_VIRTUAL (KERNEL_CODE_VIRTUAL) // kernel stack sits right below kernel code
#define KERNEL_STACK_INITIAL_SIZE SYSTEM_PAGE_2M_SIZE // currently must be 2 MiB
#define KERNEL_HEAP_VIRTUAL 0xFFFFFFFF00000000ull // 2^64 - 4GiB
#define KERNEL_FRAMEBUFFER_MAPPING 0xFFFFFFFEE0000000ull // 2^64 - 4GiB - 512MiB
#define KERNEL_MISC_MAPPING 0xFFFFFFFEC0000000ull // 2^64 - 5GiB
#define KERNEL_IDT_MAPPING KERNEL_MISC_MAPPING
#define KERNEL_IDT_SIZE (256ull * 16) // fill the 256 interrupt vectors
#define KERNEL_GDT_MAPPING (KERNEL_MISC_MAPPING + KERNEL_IDT_SIZE)
#define KERNEL_GDT_SIZE (16ull * 8)
#define KERNEL_IDTR_MAPPING (KERNEL_MISC_MAPPING + KERNEL_IDT_SIZE + KERNEL_GDT_SIZE)
#define KERNEL_GDTR_MAPPING (KERNEL_MISC_MAPPING + KERNEL_IDT_SIZE + KERNEL_GDT_SIZE + 12)
#define KERNEL_MISC_NEXT (KERNEL_MISC_MAPPING + KERNEL_IDT_SIZE + KERNEL_GDT_SIZE + 24)
#define KERNEL_MISC_SIZE (KERNEL_IDT_SIZE + KERNEL_GDT_SIZE + 24) // add all the misc sizes
extern uint64_t paging_LoaderCodeAddress, paging_LoaderCodeSize; // physical address for loader code section
static inline uint64_t paging_MapFunction(void *func) {
return ((uint64_t)func - paging_LoaderCodeAddress) + KERNEL_CODE_VIRTUAL;
}
// efiMallocTyped allocates from EFI_BOOT_SERVICES.AllocatePool.
void *efiMallocTyped(size_t size, EFI_MEMORY_TYPE type);
// efiMallocTyped allocates from EFI_BOOT_SERVICES.AllocatePool
// with a memory type of EfiLoaderData.
void *efiMalloc(size_t size);
// efiFree frees data allocated from efiMalloc.
void efiFree(void *data);
// kMalloc allocates from system memory directly after paging has been set up
void *kMalloc(size_t size);
// kFree frees data allocated from kMalloc.
void kFree(void *data);
extern EFI_MEMORY_DESCRIPTOR *efiMemoryMap;
extern UINTN efiMemoryMapSize;
extern UINTN efiMemoryMapKey;
extern UINTN efiDescriptorSize;
extern UINT32 efiDescriptorVertion;
// runtime_InitPaging initializes paging and kMalloc/kFree allocator.
// This function calls ExitBootServices()!!! which is great
// Furthermore, it sets up a new stack, calls kMain() and does not return.
//
// If it fails, Panic() is called.
noreturn void runtime_InitPaging();
#define SYSTEM_PAGE_SIZE 4096ull // UEFI uses 4KiB pages by default
#define SYSTEM_PAGE_2M_SIZE 2097152ull // 2 MiB page size
#define SYSTEM_PAGE_1G_SIZE 1073741824ull // 1 GiB page size
#define MAX_SYSTEM_MEMORY_PAGES 16777216ull // 64 GiB
#define MAP_PROT_NONE 0
#define MAP_PROT_EXEC 1
#define MAP_PROT_WRITE 2
#define MAP_PROT_READ 4
#define MAP_DATA 1 // Data, not initialized (kept as-is)
#define MAP_INITIALIZED_DATA 2 // Data, zeroed
#define MAP_FILE 3 // Memory-mapped file IO
// kMemoryPageSize returns the size, in bytes, of the physical memory page.
inline static size_t kMemoryPageSize() {
return SYSTEM_PAGE_SIZE;
}
// kMemoryMap maps new physical pages into the kernel virtual memory space.
//
// The memory is not cleared.
void *kMemoryMap(void *desiredVirtual, int pageCount, int protectionFlags, int dataType, int fd);
// kMemoryUnmap unmaps previously mapped physical memory of the kernel space.
void kMemoryUnmap(void *pageStart, int pageCount);
#ifdef __cplusplus
} // extern "C"
#endif

60
memory/memory.hpp Normal file
View File

@ -0,0 +1,60 @@
#pragma once
#include <cstddef>
#include <limits>
#include <new>
#include "memory.h"
namespace helos {
class bad_alloc: public std::exception {
public:
bad_alloc() noexcept {}
bad_alloc(const bad_alloc &other) noexcept {}
bad_alloc &operator=(const bad_alloc &other) noexcept { return *this; }
virtual const char *what() const noexcept override { return "helos::bad_alloc"; }
};
// kAllocator is a class wrapper for kMalloc/Free satisfying the named requirement Allocator.
template<typename Type>
class kAllocator {
public:
typedef Type value_type;
kAllocator() = default;
template<typename Other>
constexpr kAllocator(const kAllocator<Other> &) {}
Type *allocate(std::size_t n) {
return kMalloc(n * sizeof(Type));
}
void deallocate(Type *p, std::size_t n) {
kFree(p);
}
};
template<class T, class U>
bool operator==(const kAllocator<T> &, const kAllocator<U> &) {
return true;
}
template<class T, class U>
bool operator!=(const kAllocator<T> &, const kAllocator<U> &) {
return false;
}
} // namespace helos
// globally overload the new and delete operators
// so keep this header at the top of every source file
//
// operators new and delete only call kMalloc/kFree, so C++ code
// must stay after paging setup
void *operator new(std::size_t size);
void operator delete(void *ptr) noexcept;

13
memory/memory_cpp.cpp Normal file
View File

@ -0,0 +1,13 @@
#include "memory.hpp"
void *operator new(std::size_t size) {
void *data = kMalloc(size);
/*if (!data) {
throw helos::bad_alloc();
}*/
return data;
}
void operator delete(void *ptr) noexcept {
kFree(ptr);
}

212
memory/paging_init.c Normal file
View File

@ -0,0 +1,212 @@
#include "../main.h"
#include "memory.h"
#include "../runtime/panic_assert.h"
#include "../runtime/stdio.h"
#include "../graphics/graphics.h"
#include "../kernel/kmain.h"
#include "../interrupt/interrupt.h"
#include "../execformat/pe/reloc.h"
void execformat_pe_ReadSystemHeader(execformat_pe_PortableExecutable *pe);
#include <stdint.h>
#include <string.h>
#include "paging_internal.h"
EFI_MEMORY_DESCRIPTOR *efiMemoryMap;
UINTN efiMemoryMapSize;
UINTN efiMemoryMapKey;
UINTN efiDescriptorSize;
UINT32 efiDescriptorVertion;
uint64_t paging_TotalBytes, paging_UsableBytes;
bool paging_SupportExecuteDisable;
uint64_t paging_EndPhysicalAddress; // past-the-end marker (and length) for physical memory
int paging_EndPhysicalPage; // past-the-end for physical pages (EndPhysicalAddress/SYSTEM_PAGE_SIZE)
uint64_t paging_PML4Table[512] ALIGNED(4096); // Kernel-mode virtual memory paging directory pointer table (Level 4 paging)
uint64_t paging_LoaderCodeAddress, paging_LoaderCodeSize; // physical address for loader code section
int paging_LoaderCodePageCount; // page count for loader code section
void runtime_InitPaging() {
// TODO Obtain Execute Disable support status instead of assumpting its existence
paging_SupportExecuteDisable = true;
// obtain the UEFI memory mapping
EFI_STATUS status;
efiMemoryMapSize = 0;
efiMemoryMap = NULL;
status = efiBootServices->GetMemoryMap(
&efiMemoryMapSize,
efiMemoryMap,
&efiMemoryMapKey,
&efiDescriptorSize,
&efiDescriptorVertion);
assert(status == EFI_BUFFER_TOO_SMALL && "What? An empty buffer is not too small?");
efiMemoryMapSize += 2 * sizeof(EFI_MEMORY_DESCRIPTOR);
efiMemoryMap = (EFI_MEMORY_DESCRIPTOR *)efiMalloc(efiMemoryMapSize);
assert(efiMemoryMap && "efiMemoryMap allocate failed");
status = efiBootServices->GetMemoryMap(
&efiMemoryMapSize,
efiMemoryMap,
&efiMemoryMapKey,
&efiDescriptorSize,
&efiDescriptorVertion);
assert(!EFI_ERROR(status) && "GetMemoryMap() with buffer allocated failed");
io_Printf(" .text: [%08x-%08x] len=%d (%d pages)\n", link_TextStart, link_TextEnd, link_TextEnd - link_TextStart, roundUpToPageCount(link_TextEnd - link_TextStart));
io_Printf(" .data: [%08x-%08x] len=%d (%d pages)\n", link_DataStart, link_DataEnd, link_DataEnd - link_DataStart, roundUpToPageCount(link_DataEnd - link_DataStart));
io_Printf(".rodata: [%08x-%08x] len=%d (%d pages)\n", link_RodataStart, link_RodataEnd, link_RodataEnd - link_RodataStart, roundUpToPageCount(link_RodataEnd - link_RodataStart));
io_Printf(" .bss: [%08x-%08x] len=%d (%d pages)\n\n", link_BssStart, link_BssEnd, link_BssEnd - link_BssStart, roundUpToPageCount(link_BssEnd - link_BssStart));
// iterate the listing, accumlate counters and print info
paging_LoaderCodeAddress = paging_TotalBytes = paging_UsableBytes = 0;
memset(paging_physical_Bitmap, 0xff, sizeof(paging_physical_Bitmap));
io_WriteConsoleASCII("EFI Memory mapping:\n");
for (EFI_MEMORY_DESCRIPTOR *entry = efiMemoryMap;
(char *)entry < (char *)efiMemoryMap + efiMemoryMapSize;
entry = NEXT_MEMORY_DESCRITOR(entry, efiDescriptorSize) {
io_Printf(
" [%08x-%08x] -> [%08x] %s (%d)\n",
entry->PhysicalStart,
entry->PhysicalStart + entry->NumberOfPages * SYSTEM_PAGE_SIZE,
entry->VirtualStart,
memoryTypeName(entry->Type),
entry->Type);
paging_TotalBytes += SYSTEM_PAGE_SIZE * entry->NumberOfPages;
if (entry->Type == EfiConventionalMemory) {
// TODO include EfiBootServicesCode/Data as usable
paging_physical_BitmapWriteZero(
entry->PhysicalStart / SYSTEM_PAGE_SIZE,
entry->PhysicalStart / SYSTEM_PAGE_SIZE + entry->NumberOfPages);
paging_UsableBytes += SYSTEM_PAGE_SIZE * entry->NumberOfPages;
} else // page unusable
/*paging_physical_BitmapWriteOne(
entry->PhysicalStart / SYSTEM_PAGE_SIZE,
entry->PhysicalStart / SYSTEM_PAGE_SIZE + entry->NumberOfPages);*/
;
if (entry->Type == EfiLoaderCode) {
assert(!paging_LoaderCodeAddress && "Two EfiLoaderCode mappings at the same time");
paging_LoaderCodeAddress = entry->PhysicalStart;
paging_LoaderCodeSize = entry->NumberOfPages * SYSTEM_PAGE_SIZE;
paging_LoaderCodePageCount = entry->NumberOfPages;
}
if (paging_EndPhysicalAddress < entry->PhysicalStart + entry->NumberOfPages * SYSTEM_PAGE_SIZE)
paging_EndPhysicalAddress = entry->PhysicalStart + entry->NumberOfPages * SYSTEM_PAGE_SIZE;
}
paging_EndPhysicalPage = paging_EndPhysicalAddress / SYSTEM_PAGE_SIZE;
io_Printf(
" Total memory: %llu (%.2lf MB, %.2lf GB), EndPhyAddr %08llx\n",
paging_TotalBytes,
paging_TotalBytes / 1024.0 / 1024.0,
paging_TotalBytes / 1024.0 / 1024.0 / 1024.0,
paging_EndPhysicalAddress);
io_Printf(
"Usable memory: %llu (%.2lf MB, %.2lf GB)\n",
paging_UsableBytes,
paging_UsableBytes / 1024.0 / 1024.0,
paging_UsableBytes / 1024.0 / 1024.0 / 1024.0);
io_PauseForKeystroke();
assert(paging_LoaderCodeAddress && "EfiLoaderCode mapping not found");
io_WriteConsoleASCII("Mapping kernel memory:\n");
// map kernel code
io_Printf(" .Text... %d 4K pages\n",roundUpToPageCount(link_TextEnd-link_TextStart));
paging_map_Page( // map .text
(uint64_t)link_TextStart,
KERNEL_CODE_VIRTUAL + ((uint64_t)link_TextStart - paging_LoaderCodeAddress),
roundUpToPageCount(link_TextEnd - link_TextStart),
MAP_PROT_READ | MAP_PROT_EXEC);
io_Printf(" .Data... %d 4K pages\n",roundUpToPageCount(link_DataEnd-link_DataStart));
paging_map_Page( // map .data
(uint64_t)link_DataStart,
KERNEL_CODE_VIRTUAL + ((uint64_t)link_DataStart - paging_LoaderCodeAddress),
roundUpToPageCount(link_DataEnd - link_DataStart),
MAP_PROT_READ | MAP_PROT_WRITE);
io_Printf(" .Rodata... %d 4K pages\n",roundUpToPageCount(link_RodataEnd-link_RodataStart));
paging_map_Page( // map .rodata
(uint64_t)link_RodataStart,
KERNEL_CODE_VIRTUAL + ((uint64_t)link_RodataStart - paging_LoaderCodeAddress),
roundUpToPageCount(link_RodataEnd - link_RodataStart),
MAP_PROT_READ);
io_Printf(" .Bss... %d 4K pages\n",roundUpToPageCount(link_BssEnd-link_BssStart));
paging_map_Page( // map .bss
(uint64_t)link_BssStart,
KERNEL_CODE_VIRTUAL + ((uint64_t)link_BssStart - paging_LoaderCodeAddress),
roundUpToPageCount(link_BssEnd - link_BssStart),
MAP_PROT_READ | MAP_PROT_WRITE);
//paging_map_Page(paging_LoaderCodeAddress,KERNEL_CODE_VIRTUAL,paging_LoaderCodePageCount,MAP_PROT_READ|MAP_PROT_WRITE|MAP_PROT_EXEC);
// map other VM data
io_Printf(" Framebuffer... %d 2M pages\n",roundUpToPageCount2M(graphics_FramebufferSize));
paging_map_Page2M( // map the framebuffer output
(uint64_t)graphics_DeviceFramebuffer,
KERNEL_FRAMEBUFFER_MAPPING,
roundUpToPageCount2M(graphics_FramebufferSize),
MAP_PROT_READ | MAP_PROT_WRITE);
io_Printf(" Physical... %d 2M pages\n",roundUpToPageCount2M(paging_EndPhysicalAddress));
paging_map_Page2M( // map the physical memory
0,
0,
roundUpToPageCount2M(paging_EndPhysicalAddress),
MAP_PROT_READ | MAP_PROT_WRITE | MAP_PROT_EXEC);
io_Printf(" Stack\n");
paging_map_Page(paging_physical_AllocateOneFrame(),KERNEL_STACK_END_VIRTUAL-SYSTEM_PAGE_SIZE,1,MAP_PROT_READ|MAP_PROT_WRITE);
/*paging_map_Page2M( // stack, allocate a fresh new 2M
paging_physical_AllocateOneFrame2M(),
KERNEL_STACK_END_VIRTUAL - KERNEL_STACK_INITIAL_SIZE,
KERNEL_STACK_INITIAL_SIZE / SYSTEM_PAGE_2M_SIZE,
MAP_PROT_READ | MAP_PROT_WRITE);*/
kMain_StackPosition = KERNEL_STACK_END_VIRTUAL;
/*paging_map_PageAllocated(
KERNEL_MISC_MAPPING,
roundUpToPageCount(KERNEL_MISC_SIZE),
MAP_PROT_READ|MAP_PROT_WRITE);*/
io_Printf(" Misc... %d 2M pages\n",roundUpToPageCount2M(KERNEL_MISC_SIZE));
paging_map_PageAllocated2M( // misc data
KERNEL_MISC_MAPPING,
roundUpToPageCount2M(KERNEL_MISC_SIZE),
MAP_PROT_READ|MAP_PROT_WRITE);
//paging_map_PageAllocated(KERNEL_HEAP_VIRTUAL,512,MAP_PROT_READ|MAP_PROT_WRITE);
io_WriteConsoleASCII("Mapping completed\n");
// woohoo byebye!
efiBootServices->ExitBootServices(efiImageHandle, efiMemoryMapKey);
io_WriteConsoleASCII("Goodbye BootServices!\n");
// so now we're in unmanaged mode, we need to set up heap and stack, and jump to the new entry point kMain_Init.
// disable interrupts asap
asm volatile("cli":::"memory");
interrupt_Enabled=false;
// set the new virtual memory mapping
if (paging_SupportExecuteDisable)
paging_modeswitch_4LevelPagingNX(paging_PML4Table, 0);
else
paging_modeswitch_4LevelPaging(paging_PML4Table, 0);
graphics_DeviceFramebuffer = (void *)KERNEL_FRAMEBUFFER_MAPPING;
io_WriteConsoleASCII("Virtual Memory mapping switched\n");
// relocate the hardcoded symbols
execformat_pe_PortableExecutable pe;
execformat_pe_ReadSystemHeader(&pe);
execformat_pe_BaseRelocate(&pe, (void *)link_RelocStart, (void *)link_RelocEnd, paging_LoaderCodeAddress, KERNEL_CODE_VIRTUAL);
io_WriteConsoleASCII("Relocation OK\n");
// find the symbol kMain_Init
//uint64_t target_kmain = KERNEL_CODE_VIRTUAL + ((uint64_t)kMain_Init - paging_LoaderCodeAddress);
// call it, once and for all
//((kMainType)target_kmain)();
kMain_Init();
__builtin_unreachable(); // execution cannot reach here
}

138
memory/paging_internal.h Normal file
View File

@ -0,0 +1,138 @@
#pragma once
#include "memory.h"
#include "stdbool.h"
#ifdef __cplusplus
extern "C" {
#endif
// defined in paging_init.c
extern EFI_MEMORY_DESCRIPTOR *efiMemoryMap;
extern UINTN efiMemoryMapSize;
extern UINTN efiMemoryMapKey;
extern UINTN efiDescriptorSize;
extern UINT32 efiDescriptorVertion;
// defined in paging_init.c
extern uint64_t paging_TotalBytes, paging_UsableBytes;
extern bool paging_SupportExecuteDisable;
extern uint64_t paging_EndPhysicalAddress; // past-the-end marker (and length) for physical memory
extern int paging_EndPhysicalPage; // past-the-end for physical pages (EndPhysicalAddress/SYSTEM_PAGE_SIZE)
extern uint64_t paging_PML4Table[512]; // Kernel-mode virtual memory paging directory pointer table (PAE/Level 4 paging)
extern uint64_t paging_LoaderCodeAddress; // physical address for loader code section
extern int paging_LoaderCodePageCount; // page count for loader code section
// defined in paging_physical.c
#define BITMAP_BITS 64
extern uint64_t paging_physical_Bitmap[MAX_SYSTEM_MEMORY_PAGES / BITMAP_BITS]; // mapped with Bitmap[i/64] | 1<<(i%64), unlike convention
void paging_physical_BitmapWriteOne(int begin, int end);
void paging_physical_BitmapWriteZero(int begin, int end);
// these functions do not do any bookkeeping so use with care
uint64_t paging_physical_AllocateOneFrame(); // zeros the returned page
uint64_t paging_physical_AllocateOneFrame2M(); // zeros the returned page; this is 512 normal frames
int paging_physical_AllocateFrames(int pageCount, uint64_t frames[]); // allocate frames, not continuous, ret allloced cnt
int paging_physical_AllocateFrames2M(int pageCount, uint64_t frames[]); // allocate 2M frames, not continuous
void paging_physical_FreeFrame(uint64_t frame, int pageCount); // frees continuous frames in physical addr
// defined in paging_map.c
#define PML_PRESENT (1ull << 0)
#define PML_WRITEABLE (1ull << 1)
#define PML_USER (1ull << 2)
#define PML_PAGE_WRITETHROUGH (1ull << 3)
#define PML_PAGE_CACHE_DISABLE (1ull << 4)
#define PML_ACCESSED (1ull << 5)
#define PML_DIRTY (1ull << 6)
#define PML_PAGE_SIZE (1ull << 7)
#define PML_GLOBAL (1ull << 8)
#define PML_EXECUTE_DISABLE (1ull << 63)
#define PML_ADDR_MASK (0xFFFFFFFFFF000ull) // 51:12 at max length
#define PML_ADDR_MASK_2M (0xFFFFFFFE00000ull) // 51:21 at max length
#define PML_ADDR_MASK_1G (0xFFFFFC0000000ull) // 51:30 at max length
void paging_map_Page(uint64_t physical, uint64_t virt, int pageCount, int protectionFlags);
void paging_map_Page2M(uint64_t physical, uint64_t virt, int pageCount, int protectionFlags);
void paging_map_Page1G(uint64_t physical, uint64_t virt, int pageCount, int protectionFlags);
// Allocates pageCount fresh new 4K pages with paging_physical and maps them continuously to virtual
void paging_map_PageAllocated(uint64_t virt, int pageCount, int protectionFlags);
// Allocates pageCount fresh new 2M pages with paging_physical and maps them continuously to virtual
void paging_map_PageAllocated2M(uint64_t virt, int pageCount, int protectionFlags);
// Unmaps the pages at virtual and free the underlying physical frames, with past-the-end of the memory addr
void paging_map_FreeAllocated(uint64_t virt, uint64_t virt_end);
// defined in paging_modeswitch.S
FASTCALL_ABI void paging_modeswitch_4LevelPaging(void *pml4, int pcid);
FASTCALL_ABI void paging_modeswitch_4LevelPagingNX(void *pml4, int pcid); // with setting the Execute-Disalbe bit
FASTCALL_ABI void paging_modeswitch_Table(void *pml, int pcid);
static inline const char *
memoryTypeName(EFI_MEMORY_TYPE type) {
#define CASE(c) \
case c: \
return #c;
switch (type) {
CASE(EfiReservedMemoryType)
CASE(EfiLoaderCode)
CASE(EfiLoaderData)
CASE(EfiBootServicesCode)
CASE(EfiBootServicesData)
CASE(EfiRuntimeServicesCode)
CASE(EfiRuntimeServicesData)
CASE(EfiConventionalMemory)
CASE(EfiUnusableMemory)
CASE(EfiACPIReclaimMemory)
CASE(EfiACPIMemoryNVS)
CASE(EfiMemoryMappedIO)
CASE(EfiMemoryMappedIOPortSpace)
CASE(EfiPalCode)
case EfiMaxMemoryType:
return "EfiPersistentMemory";
}
return "(unknown)";
#undef CASE
}
#ifndef NEXT_MEMORY_DESCRITOR
#define NEXT_MEMORY_DESCRITOR(desc, size) ((EFI_MEMORY_DESCRIPTOR *)((char *)desc + size)))
#endif
inline static uint64_t roundUpTo2Exponent(uint64_t v) {
uint64_t s = 1;
while (s < v)
s <<= 1;
return s;
}
inline static uint64_t takeBitfield(uint64_t v, int high, int low) {
return (v >> low) & ((1 << (high - low + 1)) - 1);
}
inline static void flush_tlb_single(uint64_t addr) {
asm volatile(
"invlpg (%0)" ::"r"(addr)
: "memory");
}
inline static int roundUpToPageCount(uint64_t size) {
if (size % SYSTEM_PAGE_SIZE == 0)
return size / SYSTEM_PAGE_SIZE;
return size / SYSTEM_PAGE_SIZE + 1;
}
inline static int roundUpToPageCount2M(uint64_t size) {
if (size % SYSTEM_PAGE_2M_SIZE == 0)
return size / SYSTEM_PAGE_2M_SIZE;
return size / SYSTEM_PAGE_2M_SIZE + 1;
}
inline static int roundUpToPageCount1G(uint64_t size) {
if (size % SYSTEM_PAGE_1G_SIZE == 0)
return size / SYSTEM_PAGE_1G_SIZE;
return size / SYSTEM_PAGE_1G_SIZE + 1;
}
#ifdef __cplusplus
} // extern "C"
#endif

189
memory/paging_map.c Normal file
View File

@ -0,0 +1,189 @@
#include "memory.h"
#include "paging_internal.h"
#include "../runtime/panic_assert.h"
#include "string.h"
// some learning:
// https://forum.osdev.org/viewtopic.php?f=1&t=51392 (Confirmative question about 4-level and 5-level paging)
void paging_map_Page(uint64_t physical, uint64_t virtual, int pageCount, int protectionFlags) {
assert(physical % SYSTEM_PAGE_SIZE == 0 && "Physical address not page-aligned");
assert(virtual % SYSTEM_PAGE_SIZE == 0 && "Virtual address not page-aligned");
while (pageCount--) {
// PML4E index pointing to a PML3 table (Page-Directory-Pointer Table)
uint64_t *table = paging_PML4Table;
uint64_t i = takeBitfield(virtual, 47, 39);
if ((table[i] & PML_PRESENT) == 0) // allocate a new page as PML3 table
table[i] = paging_physical_AllocateOneFrame() | PML_PRESENT | PML_WRITEABLE;
// PML3E(PDPTE) index pointing to a PML2 table (Page-Directory)
table = (uint64_t *)(table[i] & PML_ADDR_MASK);
i = takeBitfield(virtual, 38, 30);
if ((table[i] & PML_PRESENT) == 0) // allocate page as page directory
table[i] = paging_physical_AllocateOneFrame() | PML_PRESENT | PML_WRITEABLE;
// PML2E(PD) index pointing to a PML1 table (Page Table)
table = (uint64_t *)(table[i] & PML_ADDR_MASK);
i = takeBitfield(virtual, 29, 21);
if ((table[i] & PML_PRESENT) == 0) // allocate page as page table
table[i] = paging_physical_AllocateOneFrame() | PML_PRESENT | PML_WRITEABLE;
// Finally, the page table.
table = (uint64_t *)(table[i] & PML_ADDR_MASK);
i = takeBitfield(virtual, 20, 12);
table[i] = physical | PML_PRESENT |
((protectionFlags & MAP_PROT_WRITE) ? PML_WRITEABLE : 0) |
((paging_SupportExecuteDisable && !(protectionFlags & MAP_PROT_EXEC)) ? PML_EXECUTE_DISABLE : 0);
flush_tlb_single(virtual);
physical += SYSTEM_PAGE_SIZE;
virtual += SYSTEM_PAGE_SIZE;
}
}
void paging_map_Page2M(uint64_t physical, uint64_t virtual, int pageCount, int protectionFlags) {
assert(physical % SYSTEM_PAGE_2M_SIZE == 0 && "Physical address not page-aligned");
assert(virtual % SYSTEM_PAGE_2M_SIZE == 0 && "Virtual address not page-aligned");
while (pageCount--) {
// PML4E index pointing to a PML3 table (Page-Directory-Pointer Table)
uint64_t *table = paging_PML4Table;
uint64_t i = takeBitfield(virtual, 47, 39);
if ((table[i] & PML_PRESENT) == 0) // allocate a new page as PML3 table
table[i] = paging_physical_AllocateOneFrame() | PML_PRESENT | PML_WRITEABLE;
// PML3E(PDPTE) index pointing to a PML2 table (Page-Directory)
table = (uint64_t *)(table[i] & PML_ADDR_MASK);
i = takeBitfield(virtual, 38, 30);
if ((table[i] & PML_PRESENT) == 0) // allocate page as page directory
table[i] = paging_physical_AllocateOneFrame() | PML_PRESENT | PML_WRITEABLE;
// PML2E(PD) index pointing to a PML1 table (Page Table)
table = (uint64_t *)(table[i] & PML_ADDR_MASK);
i = takeBitfield(virtual, 29, 21);
if ((table[i] & PML_PRESENT) != 0 && (table[i] & PML_PAGE_SIZE) == 0) // deallocate the page if present
paging_physical_FreeFrame(table[i] & PML_ADDR_MASK, 1);
// 2MB pages in Page Tables
table[i] = physical | PML_PRESENT | PML_PAGE_SIZE |
((protectionFlags & MAP_PROT_WRITE) ? PML_WRITEABLE : 0) |
((paging_SupportExecuteDisable && !(protectionFlags & MAP_PROT_EXEC)) ? PML_EXECUTE_DISABLE : 0);
flush_tlb_single(virtual);
physical += SYSTEM_PAGE_2M_SIZE;
virtual += SYSTEM_PAGE_2M_SIZE;
}
}
void paging_map_Page1G(uint64_t physical, uint64_t virtual, int pageCount, int protectionFlags) {
assert(physical % SYSTEM_PAGE_1G_SIZE == 0 && "Physical address not page-aligned");
assert(virtual % SYSTEM_PAGE_1G_SIZE == 0 && "Virtual address not page-aligned");
while (pageCount--) {
// PML4E index pointing to a PML3 table (Page-Directory-Pointer Table)
uint64_t *table = paging_PML4Table;
uint64_t i = takeBitfield(virtual, 47, 39);
if ((table[i] & PML_PRESENT) == 0) // allocate a new page as PML3 table
table[i] = paging_physical_AllocateOneFrame() | PML_PRESENT | PML_WRITEABLE;
// PML3E(PDPTE) index pointing to a PML2 table (Page-Directory)
table = (uint64_t *)(table[i] & PML_ADDR_MASK);
i = takeBitfield(virtual, 38, 30);
if ((table[i] & PML_PRESENT) != 0 && (table[i] & PML_PAGE_SIZE) == 0) // deallocate the page if present
paging_physical_FreeFrame(table[i] & PML_ADDR_MASK, 1);
// 1GB pages in Page Directories
table[i] = physical | PML_PRESENT | PML_PAGE_SIZE |
((protectionFlags & MAP_PROT_WRITE) ? PML_WRITEABLE : 0) |
((paging_SupportExecuteDisable && !(protectionFlags & MAP_PROT_EXEC)) ? PML_EXECUTE_DISABLE : 0);
flush_tlb_single(virtual);
physical += SYSTEM_PAGE_1G_SIZE;
virtual += SYSTEM_PAGE_1G_SIZE;
}
}
void paging_map_PageAllocated(uint64_t virtual, int pageCount, int protectionFlags) {
assert(virtual % SYSTEM_PAGE_SIZE == 0 && "Virtual address not page-aligned");
// skip the first 1M
int i = 1024 / 4 / BITMAP_BITS;
while (pageCount--) {
uint64_t freshPage = 0;
// this code is in sync with paging_physical.c, paging_physical_AllocateOneFrame, so you have to modify both
for (; i < paging_EndPhysicalPage / BITMAP_BITS; i++)
if (paging_physical_Bitmap[i] != ~0ull)
for (int j = 0; j < BITMAP_BITS; j++)
if ((paging_physical_Bitmap[i] & (1ull << j)) == 0) {
paging_physical_Bitmap[i] |= (1ull << j);
freshPage = (((uint64_t)i) * BITMAP_BITS + j) * SYSTEM_PAGE_SIZE;
memset((void *)freshPage, 0, SYSTEM_PAGE_SIZE);
}
paging_map_Page(freshPage, virtual, 1, protectionFlags);
virtual += SYSTEM_PAGE_SIZE;
}
}
void paging_map_PageAllocated2M(uint64_t virtual, int pageCount, int protectionFlags) {
assert(pageCount > HELOS_BUFFER_SIZE / SYSTEM_PAGE_2M_SIZE * 8 && "helos_Buffer unable to hold all pointers");
assert(virtual % SYSTEM_PAGE_2M_SIZE == 0 && "Virtual address not page-aligned");
uint64_t *buf = (uint64_t *)Buffer;
int allocated = paging_physical_AllocateFrames2M(pageCount, buf);
for (int i = 0; i < allocated; i++)
paging_map_Page2M(buf[i], virtual + SYSTEM_PAGE_2M_SIZE * i, 1, protectionFlags);
}
void paging_map_FreeAllocated(uint64_t virtual, uint64_t end) {
assert(virtual % SYSTEM_PAGE_SIZE == 0 && "Virtual address not page-aligned");
while (virtual < end) {
uint64_t *table = paging_PML4Table;
uint64_t i = takeBitfield(virtual, 47, 39);
if ((table[i] & PML_PRESENT) == 0)
goto loop_end;
table = (uint64_t *)(table[i] & PML_ADDR_MASK);
i = takeBitfield(virtual, 38, 30);
if ((table[i] & PML_PRESENT) == 0)
goto loop_end;
if (table[i] & PML_PAGE_SIZE) { // 1G mapping
paging_physical_FreeFrame(table[i] & PML_ADDR_MASK_1G, 512 * 512);
table[i] = 0;
virtual += SYSTEM_PAGE_1G_SIZE;
continue;
}
table = (uint64_t *)(table[i] & PML_ADDR_MASK);
i = takeBitfield(virtual, 29, 21);
if ((table[i] & PML_PRESENT) == 0)
goto loop_end;
if (table[i] & PML_PAGE_SIZE) { // 2M mapping
paging_physical_FreeFrame(table[i] & PML_ADDR_MASK_2M, 512);
table[i] = 0;
virtual += SYSTEM_PAGE_2M_SIZE;
continue;
}
table = (uint64_t *)(table[i] & PML_ADDR_MASK);
i = takeBitfield(virtual, 20, 12);
if (table[i] & PML_PRESENT) {
paging_physical_FreeFrame(table[i] & PML_ADDR_MASK, 1);
table[i] = 0;
}
loop_end:
virtual += SYSTEM_PAGE_SIZE;
}
}

View File

@ -0,0 +1,86 @@
format elf64
section '.text' executable
; Details on Control Registers and MSRs can be found here:
; https://wiki.osdev.org/CPU_Registers_x86-64
; x64fastcall void paging_modeswitch_4LevelPaging(void* pml4, int pcid)
;
; This function assumes that the program is now in 64-bit mode (long mode).
; Paging(CR0.PG) in general and long mode (MSR EFER.LME) must already be enabled in this state.
;
; Input: (void* rcx, int rdx)
; Clobbers: rax, flags
public paging_modeswitch_4LevelPaging
paging_modeswitch_4LevelPaging:
; 4 Level paging: CR0.PG (bit 31) = 1 (Protected mode enable)
; CR4.PAE (bit 05) = 1 (PAE enable)
; MSR IA32_EFER.LME (bit 10) = 1 (IA32e 64-bit mode enable)
; CR4.LA57 (bit 12) = 0 (4-level paging instead of 5)
; We only need to set CR4.LA57
; Let's also set CR4.PCIDE(bit 17)=1, enabling process-context identifiers
mov rax, cr0
and rax, 0xFFFFFFFFFFFFEFFF ; unset CR4.LA57
or rax, 0x20000 ; set CR4.PCIDE
mov cr0, rax
and rdx, 0xFFF ; take only the 11:0 bits of the PCID
or rcx, rdx ; construct the full CR3
mov cr3, rcx ; set CR3, invalidate all TLB cache
ret
; x64fastcall void paging_modeswitch_4LevelPagingNX(void* pml4, int pcid)
;
; This function assumes that the program is now in 64-bit mode (long mode).
; Paging(CR0.PG) in general and long mode (MSR EFER.LME) must already be enabled in this state.
;
; This function also sets the IA32_EFER.NXE bit, enabling No-Execute feature.
;
; Input: (void* rcx, int rdx)
; Clobbers: rax, r8, r9, flags
public paging_modeswitch_4LevelPagingNX
paging_modeswitch_4LevelPagingNX:
; 4 Level paging: CR0.PG (bit 31) = 1 (Protected mode enable)
; CR4.PAE (bit 05) = 1 (PAE enable)
; MSR IA32_EFER.LME (bit 10) = 1 (IA32e 64-bit mode enable)
; CR4.LA57 (bit 12) = 0 (4-level paging instead of 5)
; We only need to set CR4.LA57
; Let's also set CR4.PCIDE(bit 17)=1, enabling process-context identifiers
mov rax, cr0
and rax, 0xFFFFFFFFFFFFEFFF ; unset CR4.LA57
or rax, 0x20000 ; set CR4.PCIDE (bit 17)
mov cr0, rax
; save rcx and rdx, RDMSR/WRMSR uses these
mov r8, rcx
mov r9, rdx
mov ecx, 0xC0000080 ; operate on the IA32_EFER MSR
rdmsr ; read the MSR into edx:eax
or eax, (1 shl 11) ; set No-Execute Enable (bit 11)
wrmsr ; write the MSR back
; restore rcx and rdx
mov rcx, r8
mov rdx, r9
and rdx, 0xFFF ; take only the 11:0 bits of the PCID
or rcx, rdx ; construct the full CR3
mov cr3, rcx ; set CR3, invalidate all TLB cache
ret
; x64fastcall void paging_modeswitch_Table(void* pml, int pcid)
;
; This function simply sets CR3 and run INVLPG, flushing the TLB cache.
;
; Input: (void* rcx, int rdx)
; Clobbers: none
public paging_modeswitch_Table
paging_modeswitch_Table:
and rdx, 0xFFF ; take only the 11:0 bits of the PCID
or rcx, rdx ; construct the full CR3
mov cr3, rcx ; set CR3, invalidate all TLB cache
ret

136
memory/paging_physical.c Normal file
View File

@ -0,0 +1,136 @@
#include "memory.h"
#include "paging_internal.h"
#include "../runtime/stdio.h"
#include <string.h>
uint64_t paging_physical_Bitmap[MAX_SYSTEM_MEMORY_PAGES / BITMAP_BITS];
static inline uint64_t fillBits(int begin, int last) {
if (last == BITMAP_BITS - 1)
return ~((1ull << begin) - 1ull);
if (begin == 0)
return (1ull << (last + 1ull)) - 1ull;
return (~((1ull << begin) - 1ull)) & ((1ull << (last + 1ull)) - 1ull);
}
void paging_physical_BitmapWriteOne(int begin, int end) {
int whereBegin = begin / BITMAP_BITS, whereLast = (end - 1) / BITMAP_BITS;
if (whereBegin == whereLast)
paging_physical_Bitmap[whereBegin] |= fillBits(begin % BITMAP_BITS, (end - 1) % BITMAP_BITS);
else {
paging_physical_Bitmap[whereBegin] |= fillBits(begin % BITMAP_BITS, BITMAP_BITS - 1);
paging_physical_Bitmap[whereLast] |= fillBits(0, (end - 1) % BITMAP_BITS);
for (int i = whereBegin + 1; i < whereLast; i++)
paging_physical_Bitmap[i] = (~0ull);
}
}
void paging_physical_BitmapWriteZero(int begin, int end) {
int whereBegin = begin / BITMAP_BITS, whereLast = (end - 1) / BITMAP_BITS;
if (whereBegin == whereLast)
paging_physical_Bitmap[whereBegin] &= ~fillBits(begin % BITMAP_BITS, (end - 1) % BITMAP_BITS);
else {
paging_physical_Bitmap[whereBegin] &= ~fillBits(begin % BITMAP_BITS, BITMAP_BITS - 1);
paging_physical_Bitmap[whereLast] &= ~fillBits(0, (end - 1) % BITMAP_BITS);
for (int i = whereBegin + 1; i < whereLast; i++)
paging_physical_Bitmap[i] = 0;
}
}
static int phy_i = 1024 / 4 / BITMAP_BITS;
uint64_t paging_physical_AllocateOneFrame() {
// skip the first 1M
/*for (; phy_i < paging_EndPhysicalPage / BITMAP_BITS; phy_i++)
if (paging_physical_Bitmap[phy_i] != ~0ull)
for (int j = 0; j < BITMAP_BITS; j++)
if ((paging_physical_Bitmap[phy_i] & (1ull << j)) == 0) {
paging_physical_Bitmap[phy_i] |= (1ull << j);
uint64_t addr = (((uint64_t)phy_i) * BITMAP_BITS + j) * SYSTEM_PAGE_SIZE;
memset((void *)addr, 0, SYSTEM_PAGE_SIZE);
return addr;
}*/
uint64_t addr;
paging_physical_AllocateFrames(1, &addr);
return addr;
}
// skip the first 2M
static int phy_i_2M = 2048 / 4 / BITMAP_BITS;
uint64_t paging_physical_AllocateOneFrame2M() {
// skip the first 2M
/*for (; phy_i_2M < paging_EndPhysicalPage / BITMAP_BITS; phy_i_2M += 8) {
for (int j = 0; j < 8; j++)
if (paging_physical_Bitmap[phy_i_2M + j] != 0)
goto for_end;
// now here we have a whole chunk at [i, i+8)
for (int j = 0; j < 8; j++)
paging_physical_Bitmap[phy_i_2M + j] = ~0ull;
uint64_t addr = (((uint64_t)phy_i_2M) * BITMAP_BITS) * SYSTEM_PAGE_SIZE;
memset((void *)addr, 0, SYSTEM_PAGE_2M_SIZE);
return addr;
for_end:;
}*/
uint64_t addr;
paging_physical_AllocateFrames2M(1, &addr);
return addr;
}
int paging_physical_AllocateFrames(int pageCount, uint64_t frames[]) {
// this code is in sync with paging_map.c, paging_map_PageAllocated, so you have to modify both
int page = 0;
for (; page < pageCount; page++) {
frames[page] = 0;
for (; phy_i < paging_EndPhysicalPage / BITMAP_BITS; phy_i++)
if (paging_physical_Bitmap[phy_i] != ~0ull)
for (int j = 0; j < BITMAP_BITS; j++)
if ((paging_physical_Bitmap[phy_i] & (1ull << j)) == 0) {
paging_physical_Bitmap[phy_i] |= (1ull << j);
uint64_t addr = (((uint64_t)phy_i) * BITMAP_BITS + j) * SYSTEM_PAGE_SIZE;
memset((void *)addr, 0, SYSTEM_PAGE_SIZE);
frames[page] = addr;
goto for_end; // break the phy_i loop
}
if (frames[page] == 0)
break;
for_end:;
}
return page;
}
int paging_physical_AllocateFrames2M(int pageCount, uint64_t frames[]) {
int page = 0;
for (; page < pageCount; page++) {
frames[page] = 0;
for (; phy_i_2M < paging_EndPhysicalPage / BITMAP_BITS; phy_i_2M += 8) {
for (int j = 0; j < 8; j++)
if (paging_physical_Bitmap[phy_i_2M + j] != 0)
goto for_end; // continue the outer for loop
// now here we have a whole chunk at [i, i+8)
for (int j = 0; j < 8; j++)
paging_physical_Bitmap[phy_i_2M + j] = ~0ull;
uint64_t addr = (((uint64_t)phy_i_2M) * BITMAP_BITS) * SYSTEM_PAGE_SIZE;
memset((void *)addr, 0, SYSTEM_PAGE_2M_SIZE);
frames[page] = addr;
break;
for_end:;
}
if (frames[page] == 0)
break;
}
return page;
}
void paging_physical_FreeFrame(uint64_t frame, int pageCount) {
if (frame % SYSTEM_PAGE_SIZE != 0) {
io_Printf("paging_physical_FreeFrame: frame %08llx is not aligned\n", frame);
return;
}
paging_physical_BitmapWriteZero(frame / SYSTEM_PAGE_SIZE, frame / SYSTEM_PAGE_SIZE + pageCount);
}

38
memory/test_fillbits.c Normal file
View File

@ -0,0 +1,38 @@
#include <stdio.h>
#include <stdint.h>
static inline uint64_t fillBits(int begin, int last) {
if (last == 63)
return ~((1ull << begin) - 1ull);
if (begin == 0)
return (1ull << (last + 1ull)) - 1ull;
return (~((1ull << begin) - 1ull)) & ((1ull << (last + 1ull)) - 1ull);
}
void printit(int a, int b) {
printf("[%d,%d] = %08lx\n", a, b, fillBits(a, b));
}
int main() {
printit(0, 63);
printit(1, 63);
printit(2, 63);
printit(3, 63);
printit(4, 63);
printit(5, 63);
printit(6, 63);
printit(7, 63);
printit(8, 63);
printit(9, 63);
printit(10, 63);
printit(7, 62);
printit(8, 61);
printit(9, 60);
printit(10, 59);
printit(10, 58);
printit(10, 57);
printit(10, 56);
printit(10, 55);
printit(10, 54);
printit(4, 4);
}

View File

@ -0,0 +1,16 @@
#include <stdio.h>
#include <stdint.h>
inline static uint64_t takeBitfield(uint64_t v, int high, int low) {
return (v >> low) & ((1 << (high - low + 1)) - 1);
}
int main() {
uint64_t val = 0x400CD696E37AE;
printf("val = %lX\n", val);
printf("val[21:5] = %lX\n", takeBitfield(val, 21, 5));
printf("val[33:12] = %lX\n", takeBitfield(val, 33, 12));
printf("val[21:5] = %lX\n", takeBitfield(val, 21, 5));
printf("val[21:5] = %lX\n", takeBitfield(val, 21, 5));
}