add portable x86 loader and mp

This commit is contained in:
Yanyan Jiang 2020-01-25 17:27:05 +08:00
parent 422a21bd1a
commit 61fe0809dd
10 changed files with 278 additions and 51 deletions

View File

@ -1,6 +1,6 @@
include $(AM_HOME)/am/arch/isa/x86.mk
AM_SRCS := x86_64/qemu/start64.S \
AM_SRCS := x86_64/qemu/start.S \
x86_64/qemu/trm.c \
x86_64/qemu/cte.c \
x86_64/qemu/ioe.c \

View File

@ -1,6 +1,6 @@
include $(AM_HOME)/am/arch/isa/x86_64.mk
AM_SRCS := x86_64/qemu/start64.S \
AM_SRCS := x86_64/qemu/start.S \
x86_64/qemu/trm.c \
x86_64/qemu/cte.c \
x86_64/qemu/ioe.c \

View File

@ -99,6 +99,8 @@
// Below are only defined for c/cpp files
#ifndef __ASSEMBLER__
#include <stdint.h>
// +--------10------+-------10-------+---------12----------+
// | Page Directory | Page Table | Offset within Page |
// | Index | Index | |
@ -269,26 +271,36 @@ static inline void set_cr0(uint32_t cr0) {
}
#ifndef __x86_64__
static inline void set_idt(GateDesc *idt, int size) {
volatile static uint16_t data[3];
data[0] = size - 1;
data[1] = (uint32_t)idt;
data[2] = (uint32_t)idt >> 16;
asm volatile ("lidt (%0)" : : "r"(data));
static inline void set_idt(void *idt, int size) {
volatile static struct {
int16_t size;
void *idt;
} __attribute__((packed)) data;
data.size = size;
data.idt = idt;
asm volatile ("lidt (%0)" : : "r"(&data));
}
static inline void set_gdt(SegDesc *gdt, int size) {
volatile static uint16_t data[3];
data[0] = size - 1;
data[1] = (uint32_t)gdt;
data[2] = (uint32_t)gdt >> 16;
asm volatile ("lgdt (%0)" : : "r"(data));
static inline void set_gdt(void *gdt, int size) {
volatile static struct {
int16_t size;
void *gdt;
} __attribute__((packed)) data;
data.size = size;
data.gdt = gdt;
asm volatile ("lgdt (%0)" : : "r"(&data));
}
static inline void set_tr(int selector) {
asm volatile ("ltr %0" : : "r"((uint16_t)selector));
}
#ifdef __x86_64__
typedef uint64_t SegDesc64;
typedef struct {
uint64_t data[64];
} TSS64;
#endif
static inline uint32_t get_cr2() {

View File

@ -0,0 +1,203 @@
#include "x86_64-qemu.h"
#include <stdarg.h>
/*
static _Context* (*user_handler)(_Event, _Context*) = NULL;
static GateDesc idt[NR_IRQ];
#define IRQHANDLE_DECL(id, dpl, err) void __am_irq##id();
IRQS(IRQHANDLE_DECL)
void __am_irqall();
int _cte_init(_Context *(*handler)(_Event, _Context *)) {
if (_cpu() != 0) panic("init CTE in non-bootstrap CPU");
for (int i = 0; i < NR_IRQ; i ++) {
idt[i] = GATE(STS_TG32, KSEL(SEG_KCODE), __am_irqall, DPL_KERN);
}
#define IDT_ENTRY(id, dpl, err) \
idt[id] = GATE(STS_TG32, KSEL(SEG_KCODE), __am_irq##id, DPL_##dpl);
IRQS(IDT_ENTRY)
user_handler = handler;
__am_percpu_initirq();
return 0;
}
void _yield() {
if (!user_handler) panic("no interrupt handler");
asm volatile ("int $0x80" : : "a"(-1));
}
int _intr_read() {
if (!user_handler) panic("no interrupt handler");
return (get_efl() & FL_IF) != 0;
}
void _intr_write(int enable) {
if (!user_handler) panic("no interrupt handler");
if (enable) {
sti();
} else {
cli();
}
}
static void panic_on_return() { panic("kernel context returns"); }
_Context *_kcontext(_Area stack, void (*entry)(void *), void *arg) {
_Area stk_safe = {
(void *)ROUNDUP(stack.start, 64),
(void *)ROUNDDOWN(stack.end, 64),
};
_Context *ctx = (_Context *)stk_safe.start;
*ctx = (_Context) {
.eax = 0, .ebx = 0, .ecx = 0, .edx = 0,
.esi = 0, .edi = 0, .ebp = 0, .esp3 = 0,
.ss0 = 0, .esp0 = (uint32_t)stk_safe.end,
.cs = KSEL(SEG_KCODE), .eip = (uint32_t)entry, .eflags = FL_IF,
.ds = KSEL(SEG_KDATA), .es = KSEL(SEG_KDATA), .ss = KSEL(SEG_KDATA),
.uvm = NULL,
};
void *values[] = { panic_on_return, arg }; // copy to stack
ctx->esp0 -= sizeof(values);
for (int i = 0; i < LENGTH(values); i++) {
((uintptr_t *)ctx->esp0)[i] = (uintptr_t)values[i];
}
return ctx;
}
#define IRQ T_IRQ0 +
#define MSG(m) ev.msg = m;
void __am_irq_handle(TrapFrame *tf) {
// saving processor context
_Context ctx = {
.eax = tf->eax, .ebx = tf->ebx, .ecx = tf->ecx, .edx = tf->edx,
.esi = tf->esi, .edi = tf->edi, .ebp = tf->ebp, .esp3 = 0,
.eip = tf->eip, .eflags = tf->eflags,
.cs = tf->cs, .ds = tf->ds, .es = tf->es, .ss = 0,
.ss0 = KSEL(SEG_KDATA), .esp0 = (uint32_t)(tf + 1),
.uvm = CPU->uvm,
};
if (tf->cs & DPL_USER) { // interrupt at user code
ctx.ss = tf->ss;
ctx.esp3 = tf->esp;
} else { // interrupt at kernel code
// tf (without ss0/esp0) is everything saved on the stack
ctx.esp0 -= sizeof(uint32_t) * 2;
}
// sending end-of-interrupt
if (IRQ 0 <= tf->irq && tf->irq < IRQ 32) {
__am_lapic_eoi();
}
// creating an event
_Event ev = {
.event = _EVENT_NULL,
.cause = 0, .ref = 0,
.msg = "(no message)",
};
switch (tf->irq) {
case IRQ 0: MSG("timer interrupt (lapic)")
ev.event = _EVENT_IRQ_TIMER; break;
case IRQ 1: MSG("I/O device IRQ1 (keyboard)")
ev.event = _EVENT_IRQ_IODEV; break;
case EX_SYSCALL: MSG("int $0x80 trap: _yield() or system call")
if ((int32_t)tf->eax == -1) {
ev.event = _EVENT_YIELD;
} else {
ev.event = _EVENT_SYSCALL;
}
break;
case EX_DIV: MSG("divide by zero")
ev.event = _EVENT_ERROR; break;
case EX_UD: MSG("UD #6 invalid opcode")
ev.event = _EVENT_ERROR; break;
case EX_NM: MSG("NM #7 coprocessor error")
ev.event = _EVENT_ERROR; break;
case EX_DF: MSG("DF #8 double fault")
ev.event = _EVENT_ERROR; break;
case EX_TS: MSG("TS #10 invalid TSS")
ev.event = _EVENT_ERROR; break;
case EX_NP: MSG("NP #11 segment/gate not present")
ev.event = _EVENT_ERROR; break;
case EX_SS: MSG("SS #12 stack fault")
ev.event = _EVENT_ERROR; break;
case EX_GP: MSG("GP #13, general protection fault")
ev.event = _EVENT_ERROR; break;
case EX_PF: MSG("PF #14, page fault, @cause: _PROT_XXX")
ev.event = _EVENT_PAGEFAULT;
if (tf->err & 0x1) ev.cause |= _PROT_NONE;
if (tf->err & 0x2) ev.cause |= _PROT_WRITE;
else ev.cause |= _PROT_READ;
ev.ref = get_cr2();
break;
default: MSG("unrecognized interrupt/exception")
ev.event = _EVENT_ERROR;
ev.cause = tf->err;
break;
}
// call user handlers (registered in _cte_init)
_Context *ret_ctx = &ctx;
if (user_handler) {
_Context *next = user_handler(ev, &ctx);
if (!next) {
panic("return to a null context");
}
ret_ctx = next;
}
// Return to context @ret_ctx
#define REGS_KERNEL(_) \
_(eflags) _(cs) _(eip) _(ds) _(es) \
_(eax) _(ecx) _(edx) _(ebx) _(esp0) _(ebp) _(esi) _(edi)
#define REGS_USER(_) \
_(ss) _(esp3) REGS_KERNEL(_)
#define push(r) "push %[" #r "];" // -> push %[eax]
#define def(r) , [r] "m"(ret_ctx->r) // -> [eax] "m"(ret_ctx->eax)
CPU->uvm = ret_ctx->uvm;
if (ret_ctx->cs & DPL_USER) { // return to user
_AddressSpace *uvm = ret_ctx->uvm;
if (uvm) {
set_cr3(uvm->ptr);
}
__am_thiscpu_setstk0(ret_ctx->ss0, ret_ctx->esp0);
asm volatile goto (
"movl %[esp], %%esp;" // move stack
REGS_USER(push) // push reg context onto stack
"jmp %l[iret]" // goto iret
: : [esp] "m"(ret_ctx->esp0)
REGS_USER(def) : : iret );
} else { // return to kernel
asm volatile goto (
"movl %[esp], %%esp;" // move stack
REGS_KERNEL(push) // push reg context onto stack
"jmp %l[iret]" // goto iret
: : [esp] "m"(ret_ctx->esp0)
REGS_KERNEL(def) : : iret );
}
iret:
asm volatile (
"popal;" // restore context
"popl %es;"
"popl %ds;"
"iret;" // interrupt return
);
}
void __am_percpu_initirq() {
if (user_handler) {
__am_ioapic_enable(IRQ_KBD, 0);
set_idt(idt, sizeof(idt));
}
}
*/

View File

@ -1,6 +1,5 @@
#include <am.h>
#include "x86_64-qemu.h"
#include <x86.h>
struct cpu_local __am_cpuinfo[MAX_CPU] = {};
@ -50,12 +49,17 @@ static void percpu_entry() {
}
static void ap_entry() {
// __am_percpu_initgdt();
percpu_init();
_atomic_xchg(&apboot_done, 1);
user_entry();
}
void percpu_init() {
__am_percpu_initgdt();
// __am_percpu_initirq();
__am_percpu_initlapic();
// __am_percpu_initpg();
_atomic_xchg(&apboot_done, 1);
user_entry();
}
static void jump_to(void (*entry)()) {
@ -70,8 +74,8 @@ static void jump_to(void (*entry)()) {
}
/*
void __am_percpu_initgdt() {
#ifndef __x86_64__
SegDesc *gdt = CPU->gdt;
TSS *tss = &CPU->tss;
gdt[SEG_KCODE] = SEG (STA_X | STA_R, 0, 0xffffffff, DPL_KERN);
@ -81,8 +85,21 @@ void __am_percpu_initgdt() {
gdt[SEG_TSS] = SEG16(STS_T32A, tss, sizeof(*tss)-1, DPL_KERN);
set_gdt(gdt, sizeof(SegDesc) * NR_SEG);
set_tr(KSEL(SEG_TSS));
#else
SegDesc64 *gdt = CPU->gdt;
uint64_t tss = (uint64_t)(&CPU->tss);
gdt[0] = 0;
gdt[SEG_KCODE] = 0x0020980000000000LL;
gdt[SEG_KDATA] = 0x0000920000000000LL;
gdt[SEG_UCODE] = 0x0020F80000000000LL;
gdt[SEG_UDATA] = 0x0000F20000000000LL;
gdt[SEG_TSS+0] = (0x0067) | ((tss & 0xFFFFFF) << 16) |
(0x00E9LL << 40) | (((tss >> 24) & 0xFF) << 56);
gdt[SEG_TSS+1] = (tss >> 32);
set_gdt(gdt, sizeof(SegDesc64) * (NR_SEG + 1));
set_tr(KSEL(SEG_TSS));
#endif
}
*/
/*
void __am_thiscpu_setstk0(uintptr_t ss0, uintptr_t esp0) {

View File

@ -1,5 +1,4 @@
#include <am.h>
#include <x86.h>
#include "x86_64-qemu.h"
#include <klib.h>
@ -16,8 +15,8 @@ void _start_c(char *args) {
(boot_record()->entry)();
}
_heap = memory_probe();
lapic_init();
bootcpu_init();
percpu_init();
int ret = main(args);
_halt(ret);

View File

@ -1,5 +1,4 @@
#include <am.h>
#include <x86.h>
#include "x86_64-qemu.h"
#include <klib.h>
@ -36,7 +35,19 @@ static inline void *upcast(uint32_t ptr) {
return (void *)(uintptr_t)ptr;
}
void lapic_init() {
void bootcpu_init() {
int32_t magic = 0x5a5aa5a5;
int32_t step = 1L << 20;
extern char end;
uintptr_t st, ed;
for (st = ed = ROUNDUP(&end, step); ; ed += step) {
volatile uint32_t *ptr = (uint32_t *)ed;
if ((*ptr = magic, *ptr) != magic) {
break; // read-after-write fail
}
}
_heap = RANGE(st, ed);
for (char *st = (char *)0xf0000; st != (char *)0xffffff; st ++) {
if (*(volatile uint32_t *)st == 0x5f504d5f) {
uint32_t mpconf_ptr = ((volatile struct mpdesc *)st)->conf;
@ -58,27 +69,6 @@ void lapic_init() {
panic("seems not an x86-qemu virtual machine");
}
void boot_othercpu() {
}
_Area memory_probe() {
int32_t magic = 0x5a5aa5a5;
int32_t step = 1L << 20;
extern char end;
uintptr_t st, ed;
for (st = ed = ROUNDUP(&end, step); ; ed += step) {
volatile uint32_t *ptr = (uint32_t *)ed;
if ((*ptr = magic, *ptr) != magic) {
break; // read-after-write fail
}
}
return RANGE(st, ed);
}
// apic.c

View File

@ -1,17 +1,23 @@
#ifndef __X86_64_QEMU_H__
#define __X86_64_QEMU_H__
#include <x86.h>
#include <klib.h>
#define MAX_CPU 8
void lapic_init();
void bootcpu_init();
void percpu_init();
_Area memory_probe();
struct cpu_local {
// _AddressSpace *uvm;
#ifndef __x86_64__
// SegDesc gdt[NR_SEG];
SegDesc gdt[NR_SEG];
TSS tss;
#else
SegDesc64 gdt[NR_SEG + 1];
TSS64 tss;
#endif
// TSS tss;
uint8_t stack[4096];
};

View File

@ -5,5 +5,5 @@ set pagination off
set confirm off
file build/hello-x86_64-qemu.o
# file build/hello-x86_32-qemu.o
b _start_c
b __am_percpu_initgdt
c