diff --git a/am/src/x86/qemu/devices/cpu.c b/am/src/x86/qemu/devices/cpu.c index 171216b8..a1d3ae2a 100644 --- a/am/src/x86/qemu/devices/cpu.c +++ b/am/src/x86/qemu/devices/cpu.c @@ -44,8 +44,8 @@ void __am_thiscpu_halt() { } void __am_othercpu_halt() { - __am_bootrec->is_ap = 1; - __am_bootrec->entry = __am_thiscpu_halt; + BOOTREC->is_ap = 1; + BOOTREC->entry = __am_thiscpu_halt; for (int cpu = 0; cpu < __am_ncpu; cpu++) { if (cpu != _cpu()) { __am_lapic_bootap(cpu, 0x7c00); diff --git a/am/src/x86/qemu/mpe.c b/am/src/x86/qemu/mpe.c index 2bc9de13..b3b37cbb 100644 --- a/am/src/x86/qemu/mpe.c +++ b/am/src/x86/qemu/mpe.c @@ -2,7 +2,6 @@ int __am_ncpu = 0; struct cpu_local __am_cpuinfo[MAX_CPU]; -volatile struct boot_info *__am_bootrec = (void *)0x7000; static void (* volatile user_entry)(); static volatile intptr_t apboot_done = 0; @@ -36,8 +35,8 @@ intptr_t _atomic_xchg(volatile intptr_t *addr, intptr_t newval) { static void percpu_entry() { if (_cpu() == 0) { // bootstrap cpu, boot all aps for (int cpu = 1; cpu < __am_ncpu; cpu++) { - __am_bootrec->is_ap = 1; - __am_bootrec->entry = percpu_entry; + BOOTREC->is_ap = 1; + BOOTREC->entry = percpu_entry; __am_lapic_bootap(cpu, 0x7c00); while (_atomic_xchg(&apboot_done, 0) != 1) { pause(); diff --git a/am/src/x86/x86-qemu.h b/am/src/x86/x86-qemu.h index 65d500ba..4d39cf5d 100644 --- a/am/src/x86/x86-qemu.h +++ b/am/src/x86/x86-qemu.h @@ -20,10 +20,10 @@ struct cpu_local { uint8_t stack[4096]; }; extern volatile uint32_t *__am_lapic; -extern volatile struct boot_info *__am_bootrec; extern int __am_ncpu; extern struct cpu_local __am_cpuinfo[MAX_CPU]; #define CPU (&__am_cpuinfo[_cpu()]) +#define BOOTREC ((volatile struct boot_info *)0x7000) #define LENGTH(arr) (sizeof(arr) / sizeof((arr)[0])) @@ -43,27 +43,6 @@ void __am_thiscpu_setstk0(uintptr_t ss0, uintptr_t esp0); void __am_thiscpu_halt() __attribute__((__noreturn__)); void __am_othercpu_halt(); -// simple spin locks -#define LOCKDECL(name) \ - void name##_lock(); \ - void name##_unlock(); - -#define LOCKDEF(name) \ - static volatile intptr_t name##_locked = 0; \ - static int name##_lock_flags[MAX_CPU]; \ - void name##_lock() { \ - name##_lock_flags[_cpu()] = get_efl() & FL_IF; \ - cli(); \ - while (1) { \ - if (0 == _atomic_xchg(&name##_locked, 1)) break; \ - pause(); \ - } \ - } \ - void name##_unlock() { \ - _atomic_xchg(&name##_locked, 0); \ - if (name##_lock_flags[_cpu()]) sti(); \ - } - #define RANGE(st, ed) (_Area) { .start = (void *)st, .end = (void *)ed } static inline int in_range(void *ptr, _Area area) { return area.start <= ptr && ptr < area.end; diff --git a/tests/mptest/main.c b/tests/mptest/main.c index aacebe42..55b4d2d2 100644 --- a/tests/mptest/main.c +++ b/tests/mptest/main.c @@ -32,7 +32,6 @@ static void free(void *ptr) { int main() { start = (uintptr_t)_heap.start; - _trace_on(_TRACE_ALL); _cte_init(interrupt); _vme_init(alloc, free); _mpe_init(f);