diff --git a/kernel/kmain.c b/kernel/kmain.c index b761a9c..ec9acfa 100644 --- a/kernel/kmain.c +++ b/kernel/kmain.c @@ -11,6 +11,7 @@ #include "../interrupt/syscall.h" #include "../driver/irq/pic/pic.h" #include "../driver/irq/pic/ps2/ps2.h" +#include "../smp/kthread.h" #include "../execformat/pe/reloc.h" void execformat_pe_ReadSystemHeader(execformat_pe_PortableExecutable *pe); @@ -70,8 +71,12 @@ SYSV_ABI void kMain() { } else io_WriteConsoleASCII("xcursor_Default: failed to load\n"); + io_WriteConsoleASCII("kMain: Initializing threading\n"); + smp_thread_ID tid = smp_thread_Init(); + io_WriteConsoleASCII("kMain: Threading OK\n"); + for (;;) { - asm volatile("hlt"); + //asm volatile("hlt"); //io_WriteConsoleASCII("kMain: Interrupt hit\n"); graphics_SwapBuffer(); diff --git a/smp/internal.c b/smp/internal.c new file mode 100644 index 0000000..32d30ab --- /dev/null +++ b/smp/internal.c @@ -0,0 +1,12 @@ + +#include "internal.h" + + +uint64_t __smp_Now = 1; +int __smp_Count = 1; +bool __smp_PauseTicker = false; + +__smp_Thread **__smp_Current; + +tree_Tree *__smp_Threads; +tree_Tree *__smp_ThreadsWaiting; diff --git a/smp/internal.h b/smp/internal.h new file mode 100644 index 0000000..5f3455c --- /dev/null +++ b/smp/internal.h @@ -0,0 +1,44 @@ +#pragma once + +#include "kthread.h" +#include "kthread_switch.h" +#include "../util/tree.h" + + +// holds internal data about a thread +typedef struct { + smp_thread_ID id; // thread id + + // Niceness, less means higher priority. Cannot be negative + // A thread can wait nice ticks more than another thread waiting for the same time + unsigned int nice; + + // Last tick at which the thread started waiting + // More than Now means the thread is actively sleeping and is not to be resumed. + uint64_t lastTick; + + // Last-saved thread state after preemptive context switch + smp_thread_State state; +} __smp_Thread; + +// variables defined in internal.c + +// current tick number +extern uint64_t __smp_Now; + +// number of cores in the system +extern int __smp_Count; + +// should __smp_Switch not tick once +extern bool __smp_PauseTicker; + +// __smp_Thread*[], current thread for each core +extern __smp_Thread **__smp_Current; + +/* Priority = lastTick + nice + * So the average value is in fact constantly growing */ + +// [thread id] -> struct __smp_Thread +extern tree_Tree *__smp_Threads; +// [priority] -> struct __smp_Thread* +extern tree_Tree *__smp_ThreadsWaiting; diff --git a/smp/kthread.c b/smp/kthread.c new file mode 100644 index 0000000..f1c8c7d --- /dev/null +++ b/smp/kthread.c @@ -0,0 +1,52 @@ + +#include "kthread.h" +#include "kthread_switch.h" +#include "internal.h" + +#include "../util/tree.h" +#include "../interrupt/interrupt.h" +#include "../driver/irq/pic/rtc/rtc.h" + + +smp_thread_ID smp_thread_Init() { + INTERRUPT_DISABLE; + __smp_Threads = tree_Create(sizeof(__smp_Thread)); + __smp_ThreadsWaiting = tree_Create(sizeof(void *)); + __smp_Now = 1; + + smp_thread_ID id = 1; + tree_Node * node = tree_Insert(__smp_Threads, 1, NULL); + __smp_Thread *t = (__smp_Thread *)node->data; + t->nice = SMP_NICENESS_DEFAULT; + t->id = id; + t->lastTick = 1; + __smp_Current[0] = t; + + if (!pic_rtc_Enabled) + pic_rtc_Init(); + pic_rtc_SetHandler(__smp_IntSwitch); + + INTERRUPT_RESTORE; + return id; +} + +int smp_thread_Nice(smp_thread_ID id, int newnice) { + INTERRUPT_DISABLE; + __smp_Thread *t = tree_Find(__smp_Threads, id); + if (!t) { + INTERRUPT_RESTORE; + return -1; + } else if (newnice < 0) { + INTERRUPT_RESTORE; + return t->nice; + } + + int oldnice = t->nice; + t->nice = newnice; + INTERRUPT_RESTORE; + return oldnice; +} + +void smp_thread_Yield() { + __smp_PauseTicker = true; +} diff --git a/smp/kthread.h b/smp/kthread.h new file mode 100644 index 0000000..f5a0fc8 --- /dev/null +++ b/smp/kthread.h @@ -0,0 +1,51 @@ +#pragma once + +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +// Thread is a kernel thread primitive. +typedef int smp_thread_ID; + +// Arguments are the numeric arguments passed to the thread entry point. +typedef struct { + uintptr_t a, b, c, d, e, f; +} smp_thread_Arguments; + +// Init initializes the kernel multithreading environment. +// +// The current execution environment is saved as a thread, with its ID returned (usually 1). +smp_thread_ID smp_thread_Init(); + +// Start starts a new thread, putting it into the scheduler (but not executing it right off) +// +// The entry point specified must have System V x64 ABI, +// and only up to 6 numeric arguments. +// +// The return value is discarded. +smp_thread_ID smp_thread_Start(void *entry, const smp_thread_Arguments *args, unsigned int nice); + +// Nice sets the new niceness for a thread. +// +// If newnice is less than 0, the new niceness is not set. +// +// Returns the old niceness of the thread. +// If the thread does not exist, -1 is returned. +int smp_thread_Nice(smp_thread_ID id, int newnice); + +// Yield pauses the execution of the current thread, possibly switching to another. +void smp_thread_Yield(); + +// Sleep sleeps for a given amount of ticks (1024Hz) +void smp_thread_Sleep(int ticks); + +// Default niceness for kernel threads. +#define SMP_NICENESS_DEFAULT 80 + + +#ifdef __cplusplus +} +#endif diff --git a/smp/kthread_switch.c b/smp/kthread_switch.c new file mode 100644 index 0000000..d3af6d7 --- /dev/null +++ b/smp/kthread_switch.c @@ -0,0 +1,67 @@ + +#include "kthread.h" +#include "kthread_switch.h" +#include "internal.h" + +#include "../runtime/stdio.h" +#include + + +// defined in assembly +SYSV_ABI void __smp_Switch_Idle(); + +SYSV_ABI uintptr_t __smp_Switch() { + // the calling function smp_IntSwitch already CLI-ed for us + + // TODO ticker and switch should be 2 functions! + if (!__smp_PauseTicker) { + __smp_Now++; + if (__smp_Now % 8) + return 0; + } else + __smp_PauseTicker = false; + + io_Printf("__smp_Switch: Tick: %d, switching\n", __smp_Now); + + __smp_Thread *t = __smp_Current[0]; + + tree_Node *node = 0; + // insert the current thread back into the waiting queue + if (!t) { + uint64_t priority = t->nice + __smp_Now; // new priority for the thread + node = tree_InsertNode(__smp_ThreadsWaiting, priority, 0); + } + tree_Node *first = tree_FirstNode(__smp_ThreadsWaiting); + + while (first && (*((__smp_Thread **)node->data))->lastTick > __smp_Now) + first = tree_Node_Next(first); + + if (first == node) { + // the current thread is still the first, return + io_Printf(" Not context switching, still running %d\n", t ? t->id : 0); + if (!t) + tree_Delete(__smp_ThreadsWaiting, node); + return 0; + } + + // we need a real context switch + // first save the current thread context + *((void **)node->data) = t; + t->lastTick = __smp_Now; + memcpy(&t->state, &__smp_IntSwitch_LastState, sizeof(smp_thread_State)); + + if (!first) { + // no thread available, load a dummy idle thread + __smp_IntSwitch_LastState.rip = (uint64_t)__smp_Switch_Idle; + __smp_Current[0] = 0; + io_WriteConsoleASCII("__smp_Switch: Entering idle\n"); + } else { + // load the new context + io_Printf(" Context switching, from %d to %d\n", t ? t->id : 0, ((__smp_Thread *)first->data) ? ((__smp_Thread *)first->data)->id : 0); + memcpy(&__smp_IntSwitch_LastState, &((__smp_Thread *)first->data)->state, sizeof(smp_thread_State)); + tree_Delete(__smp_ThreadsWaiting, first); + __smp_Current[0] = (__smp_Thread *)first->data; + } + + return 1; +} diff --git a/smp/kthread_switch.h b/smp/kthread_switch.h new file mode 100644 index 0000000..72e06fa --- /dev/null +++ b/smp/kthread_switch.h @@ -0,0 +1,40 @@ +#pragma once + +#include "../main.h" +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +// State describes the stack, IP and general registers of a State. +typedef struct { + uint64_t rax, rbx, rcx, rdx; + uint64_t rsi, rdi, rbp, rsp; + uint64_t r8, r9, r10, r11; + uint64_t r12, r13, r14, r15; + + uint64_t rflags; + uint64_t rip; + uint16_t ss, cs; // Stack and Code Segments +} PACKED smp_thread_State; + +// Last program state saved/restored by IntSwitch() +extern smp_thread_State __smp_IntSwitch_LastState; + +// Interrupt handler, defined in assembly +void __smp_IntSwitch(); + +// Copy the current thread from LastState, +// invoke the scheduler to find a new thread, +// copy the new thread to LastState, +// and returns back to smp_IntSwitch, which restores the new state and IRET. +// +// Returns nonzero if a context switch is in fact required. +SYSV_ABI uintptr_t __smp_Switch(); + + +#ifdef __cplusplus +} +#endif diff --git a/smp/kthread_switch_asm.S b/smp/kthread_switch_asm.S new file mode 100644 index 0000000..8332c9a --- /dev/null +++ b/smp/kthread_switch_asm.S @@ -0,0 +1,107 @@ +format elf64 + +extrn __smp_Switch +public __smp_IntSwitch_LastState +public __smp_IntSwitch +public __smp_Switch_Idle + + +section '.bss' writable + +; typedef struct { +; uint64_t rax, rbx, rcx, rdx; +; uint64_t rsi, rdi, rbp, rsp; +; uint64_t r8, r9, r10, r11; +; uint64_t r12, r13, r14, r15; +; +; uint64_t rflags; +; uint64_t rip; +; uint16_t ss, cs; // Stack and Code Segments +; } PACKED smp_thread_State; +__smp_IntSwitch_LastState: + rq 18 + rw 2 + + +section '.text' executable + +; interrupt_handler smp_IntSwitch() +; +; Called from a timer interrupt. +; Saves the current processor (general-purpose) state and invoke the task switcher. +__smp_IntSwitch: + cli + + mov [__smp_IntSwitch_LastState], rax + mov [__smp_IntSwitch_LastState+8], rbx + mov [__smp_IntSwitch_LastState+16], rcx + mov [__smp_IntSwitch_LastState+24], rdx + mov [__smp_IntSwitch_LastState+32], rsi + + ; now that we have 5 free registers, pop the iret flags for later + pop rax ; rip + pop rbx ; cs + pop rcx ; rflags + pop rdx ; rsp + pop rsi ; ss + + mov [__smp_IntSwitch_LastState+40], rdi + mov [__smp_IntSwitch_LastState+48], rbp + mov [__smp_IntSwitch_LastState+56], rdx ; rsp + mov [__smp_IntSwitch_LastState+64], r8 + mov [__smp_IntSwitch_LastState+72], r9 + mov [__smp_IntSwitch_LastState+80], r10 + mov [__smp_IntSwitch_LastState+88], r11 + mov [__smp_IntSwitch_LastState+96], r12 + mov [__smp_IntSwitch_LastState+104], r13 + mov [__smp_IntSwitch_LastState+112], r14 + mov [__smp_IntSwitch_LastState+120], r15 + + mov [__smp_IntSwitch_LastState+128], rcx ; rflags + mov [__smp_IntSwitch_LastState+136], rax ; rip + mov [__smp_IntSwitch_LastState+144], si ; ss + mov [__smp_IntSwitch_LastState+146], bx ; cs + + call __smp_Switch + ;test rax, rax + ;jnz .realswitch ; nonzero return value means really a context switch + +.realswitch: + mov rdi, [__smp_IntSwitch_LastState+40] + mov rbp, [__smp_IntSwitch_LastState+48] + mov rdx, [__smp_IntSwitch_LastState+56] ; rsp + mov r8, [__smp_IntSwitch_LastState+64] + mov r9, [__smp_IntSwitch_LastState+72] + mov r10, [__smp_IntSwitch_LastState+80] + mov r11, [__smp_IntSwitch_LastState+88] + mov r12, [__smp_IntSwitch_LastState+96] + mov r13, [__smp_IntSwitch_LastState+104] + mov r14, [__smp_IntSwitch_LastState+112] + mov r15, [__smp_IntSwitch_LastState+120] + + mov rcx, [__smp_IntSwitch_LastState+128] ; rflags + mov rax, [__smp_IntSwitch_LastState+136] ; rip + mov si, [__smp_IntSwitch_LastState+144] ; ss + mov bx, [__smp_IntSwitch_LastState+146] ; cs + + push rsi ; ss + push rdx ; rsp + push rcx ; rflags + push rbx ; cs + push rax ; rip + + mov rax, [__smp_IntSwitch_LastState] + mov rbx, [__smp_IntSwitch_LastState+8] + mov rcx, [__smp_IntSwitch_LastState+16] + mov rdx, [__smp_IntSwitch_LastState+24] + mov rsi, [__smp_IntSwitch_LastState+32] + + iret + + +__smp_Switch_Idle: + hlt + jmp __smp_Switch_Idle + jmp __smp_Switch_Idle + ret +