Merge pull request #21 from HUST-OS/clean_up_the_code

Clean up the code
This commit is contained in:
Chunchi Che 2021-04-19 17:51:47 +08:00 committed by GitHub
commit 327a1bde1f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 19 additions and 62 deletions

View File

@ -38,20 +38,6 @@ SECTIONS
_ebss = .;
}
/* 将一些共享的函数放到这个段方便内核或用户访问 */
.shared_data : ALIGN(4K) {
_sshared_data = .;
*(.shared_data .shared_data.*)
_eshared_data = .;
}
.shared_text : ALIGN(4K) {
_sshared_text = .;
*(.shared_text .shared_text.*)
_eshared_text = .;
}
/DISCARD/ : {
*(.eh_frame)
*(.debug*)

View File

@ -8,7 +8,6 @@
#![feature(asm)]
#![feature(maybe_uninit_uninit_array, maybe_uninit_ref)]
#![feature(naked_functions)]
#![feature(const_raw_ptr_to_usize_cast)]
extern crate alloc;

View File

@ -12,6 +12,7 @@ pub enum TaskResult {
/// 应当立即执行特定任务
Task(SharedTaskHandle),
/// 其他地址空间的任务要运行,应当让出时间片
/// 并返回下一个地址空间的编号
ShouldYield(usize),
/// 队列已空,所有任务已经结束
Finished

View File

@ -1,4 +1,4 @@
//! 共享运行时的设计思路
//! 共享载荷
//!
use crate::algorithm::{Scheduler, RingFifoScheduler};
use crate::mm::AddressSpaceId;
@ -6,6 +6,7 @@ use core::{ptr::NonNull, usize};
use super::TaskResult;
use spin::Mutex;
/// 共享载荷虚函数表
#[no_mangle]
#[link_section = ".data"]
#[export_name = "_raw_table"]
@ -26,7 +27,6 @@ type SharedScheduler = Mutex<RingFifoScheduler<SharedTaskHandle, 100>>;
/// 全局的共享调度器
/// 放到 .shared_data 段,内核或用户从这个地址里取得共享调度器
// #[link_section = ".shared_data"]
pub static SHARED_SCHEDULER: SharedScheduler = Mutex::new(RingFifoScheduler::new());
/// 得到当前正在运行的任务,以备保存上下文
@ -40,7 +40,6 @@ pub fn current_task() -> Option<SharedTaskHandle> {
///
/// 可以在共享的添加任务,弹出下一个任务中使用
#[no_mangle]
#[link_section = ".shared_text"]
#[export_name = "_shared_scheduler"]
pub fn shared_scheduler() -> NonNull<()> {
NonNull::new(&SHARED_SCHEDULER as *const _ as *mut ())
@ -64,7 +63,6 @@ pub struct SharedTaskHandle {
///
/// 在内核态和用户态都可以调用
#[no_mangle]
#[link_section = ".shared_text"]
#[export_name = "_shared_add_task"]
pub unsafe fn shared_add_task(
shared_scheduler: NonNull<()>,
@ -79,7 +77,6 @@ pub unsafe fn shared_add_task(
///
/// 在内核态和用户态都可以调用
#[no_mangle]
#[link_section = ".shared_text"]
#[export_name = "_shared_pop_task"]
pub unsafe fn shared_pop_task(
shared_scheduler: NonNull<()>,

View File

@ -136,12 +136,6 @@ async fn task_2() {
println!("hello world from 2!");
}
async fn user_task_1() {
println!("From user level!");
loop {}
// todo: 退出进程
}
struct FibonacciFuture {
a: usize,
b: usize,

View File

@ -22,12 +22,18 @@ impl Satp {
_ => unreachable!(),
}
}
/// 地址空间参数
pub fn asid(&self) -> usize {
self.0.get_bits(44..60)
}
/// 根页表物理页号
pub fn ppn(&self) -> usize {
self.0.get_bits(0..44)
}
/// 找三级页表项
pub fn find_pte(&self, vpn: VirtualPageNumber) -> Option<&mut PageTableEntry> {
let root_ppn = PhysicalPageNumber::from_satp(self.0);
let root_table_pa = root_ppn.start_address();
@ -45,11 +51,14 @@ impl Satp {
}
Some(entry)
}
/// 将虚拟页号转换为物理页号
pub fn translate(&self, vpn: VirtualPageNumber) -> Option<PhysicalPageNumber> {
self.find_pte(vpn).map(
|pte| {pte.page_number()}
)
}
pub fn inner(&self) -> usize {
self.0
}

View File

@ -39,28 +39,5 @@ pub fn syscall(param: [usize; 2], func: usize, module: usize) -> SyscallResult {
/// 下一个任务的地址空间编号由用户通过 a0 参数传给内核
fn switch_next_task(param: [usize; 2], func: usize) -> SyscallResult {
let next_asid = unsafe { AddressSpaceId::from_raw(param[0]) }; // a0
if next_asid.into_inner() == 0 {
// 内核任务,这里为了测试,不执行,直接回到用户态
let raw_table_ptr = 0x8600_0000 as *const ();
let raw_table: extern "C" fn(a0: usize) -> usize = unsafe { core::mem::transmute(raw_table_ptr) };
let shared_scheduler_ptr = raw_table(0);
let shared_scheduler: fn() -> core::ptr::NonNull<()> = unsafe {
core::mem::transmute(shared_scheduler_ptr)
};
let shared_scheduler = shared_scheduler();
let shared_pop_task_ptr = raw_table(2);
let shared_pop_task: unsafe fn(
shared_scheduler: core::ptr::NonNull<()>,
should_switch: fn(&crate::task::SharedTaskHandle) -> bool
) -> crate::task::TaskResult = unsafe {
core::mem::transmute(shared_pop_task_ptr)
};
unsafe { shared_pop_task(shared_scheduler, crate::task::SharedTaskHandle::should_switch); }
return SyscallResult::Procceed{ code: 0, extra: 0};
}
if let Some(next_satp) = KernelHartInfo::get_satp(next_asid) {
SyscallResult::NextASID{ satp: next_satp}
} else {
panic!("Next satp not found!")
}
todo!()
}

View File

@ -1,14 +1,14 @@
//! 从用户过来的系统调用在这里处理
use riscv::register::scause::{self, Trap, Interrupt};
use riscv::register::{sepc, stval};
use crate::{memory::{self, PAGE_SIZE, Satp}, trap::SwapContext};
use crate::{memory::{self, Satp}, trap::SwapContext};
use crate::trap;
use super::{SyscallResult, syscall};
/// 测试用的中断处理函数,用户态发生中断会陷入到这里
/// 用户地址空间的 satp 寄存器通过 t2 传给内核
#[export_name = "_user_trap_handler"]
pub extern "C" fn user_trap_handler() {
// 用户地址空间的 satp 寄存器通过 t2 传给内核
let user_satp: usize;
unsafe {
asm!("mv {}, t2", out(reg) user_satp, options(nomem, nostack));
@ -43,14 +43,10 @@ pub extern "C" fn user_trap_handler() {
// 不跳过指令,继续运行
},
SyscallResult::NextASID{ satp } => {
let next_swap_cx = unsafe { get_swap_cx(&satp) };
next_swap_cx.epc += 4;
trap::switch_to_user(next_swap_cx, satp.inner())
todo!()
}
}
println!("return to user");
trap::switch_to_user(swap_cx, user_satp.inner())
// unreachable!()
}
_ => todo!("scause: {:?}, sepc: {:#x}, stval: {:#x}", scause::read().cause(), sepc::read(), stval::read())
}

View File

@ -55,7 +55,6 @@ pub struct TaskInner {
impl KernelTask {
/// 创建一个任务,将会复用执行器的栈
pub fn new(
future: impl Future<Output = ()> + 'static + Send + Sync,
process: Arc<Process>,

View File

@ -66,6 +66,7 @@ impl crate::algorithm::WithAddressSpace for SharedTaskHandle {
}
}
/// 共享载荷
pub struct SharedLoad {
pub shared_scheduler: NonNull<()>,
shared_add_task: unsafe fn(
@ -102,11 +103,13 @@ impl SharedLoad {
}
}
/// 往共享载荷中添加任务
pub unsafe fn add_task(&self, handle: SharedTaskHandle) -> Option<SharedTaskHandle> {
let f = self.shared_add_task;
f(self.shared_scheduler, handle)
}
/// 从共享载荷中弹出任务
pub unsafe fn pop_task(&self, should_yield: fn(&SharedTaskHandle) -> bool) -> TaskResult {
let f = self.shared_pop_task;
f(self.shared_scheduler, should_yield)

View File

@ -262,7 +262,6 @@ pub fn switch_to_user(context: &SwapContext, user_satp: usize) -> ! {
let user_trap_va = SWAP_FRAME_VA as usize;
// 该函数最后应该跳转的虚拟地址
let jmp_va = _supervisor_to_user as usize - _swap_frame as usize + SWAP_FRAME_VA;
// println!("jmp_va = {:#x}", jmp_va);
// 设置用户态陷入内核时需要跳转的地址
unsafe { stvec::write(user_trap_va, TrapMode::Direct); }
@ -273,12 +272,10 @@ pub fn switch_to_user(context: &SwapContext, user_satp: usize) -> ! {
// 将 SwapContext.epc 写到 sepc 寄存器
// 这个是用户程序入口
// println!("sepc: {:#x}", context.epc);
riscv::register::sepc::write(context.epc);
// todo: 如何处理 tp 寄存器
// 上面这样写生产出的汇编好像不太对,因此改为下面这样写
unsafe {
llvm_asm!("fence.i" :::: "volatile");
llvm_asm!("jr $0" :: "r"(jmp_va), "{a0}"(SWAP_CONTEXT_VA), "{a1}"(user_satp) :: "volatile");

View File

@ -35,7 +35,6 @@ pub fn first_enter_user(kernel_stack_top: usize) -> ! {
let user_stack_handle = process.alloc_stack().expect("alloc user stack");
// 这里减 4 是因为映射的时候虚拟地址的右半边是不包含的
let user_stack_top = user_stack_handle.end.0 - 4;
// println!("kernel stack top: {:#x}, user stack top: {:#x}", kernel_stack_top, user_stack_top);
// 获取用户地址空间编号
let user_asid = process.address_space_id().into_inner();