Merge pull request #14 from HUST-OS/temp

add shared-scheduler
This commit is contained in:
hustccc 2021-04-16 23:25:51 +08:00 committed by GitHub
commit 95a5159acb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 990 additions and 153 deletions

View File

@ -1,7 +1,8 @@
[workspace]
members = [
"tornado-kernel",
"tornado-user"
"tornado-user",
"shared-scheduler"
]
[profile.dev]

View File

@ -9,6 +9,8 @@ kernel-elf := build-path + "tornado-kernel"
kernel-bin := build-path + "tornado-kernel.bin"
user-elf := "target/" + target + "/" + user-mode + "/" + "tornado-user"
user-bin := "target/" + target + "/" + user-mode + "/" + "tornado-user.bin"
shared-elf := "target/" + target + "/" + mode + "/" + "shared-scheduler"
shared-bin := "target/" + target + "/" + mode + "/" + "shared-scheduler.bin"
objdump := "riscv64-linux-gnu-objdump"
gdb := "riscv64-unknown-elf-gdb.exe"
@ -20,16 +22,20 @@ threads := "1"
build:
@just -f "tornado-kernel/justfile" build
build-user app:
@just -f "tornado-user/justfile" build {{app}}
build-user:
@just -f "tornado-user/justfile" build
qemu: build # todo: build user
build-shared:
@just -f "shared-scheduler/justfile" build
qemu: build build-user build-shared
@qemu-system-riscv64 \
-machine virt \
-nographic \
-bios none \
-device loader,file={{bootloader-bin}},addr=0x80000000 \
-device loader,file={{kernel-bin}},addr=0x80200000 \
-device loader,file={{shared-bin}},addr=0x80200000 \
-device loader,file={{kernel-bin}},addr=0x80400000 \
-device loader,file={{user-bin}},addr=0x87000000 \
-smp threads={{threads}} \
@ -38,21 +44,22 @@ run: build qemu
asm: build
@{{objdump}} -D {{kernel-elf}} | less
asm-user app: (build-user app)
asm-user: build-user
@{{objdump}} -D {{user-elf}} | less
size: build
@{{size}} -A -x {{kernel-elf}}
@{{size}} -A -x {{user-elf}}
@{{size}} -A -x {{shared-elf}}
debug app: build (build-user app)
debug: build build-user
@qemu-system-riscv64 \
-machine virt \
-nographic \
-bios none \
-device loader,file={{bootloader-bin}},addr=0x80000000 \
-device loader,file={{kernel-bin}},addr=0x80200000 \
-device loader,file={{shared-bin}},addr=0x80200000 \
-device loader,file={{kernel-bin}},addr=0x80400000 \
-device loader,file={{user-bin}},addr=0x87000000 \
-smp threads={{threads}} \
-gdb tcp::1234 -S \

View File

@ -0,0 +1,7 @@
[build]
target = "riscv64imac-unknown-none-elf"
[target.riscv64imac-unknown-none-elf]
rustflags = [
"-C", "link-arg=-Tlinker.x",
]

View File

@ -0,0 +1,14 @@
[package]
name = "shared-scheduler"
version = "0.1.0"
authors = ["SKTT1Ryze <linuxgnulover@gmail.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
spin = "0.7"
riscv = { git = "https://github.com.cnpmjs.org/rust-embedded/riscv.git", rev = "7e9d2e5b", features = ["inline-asm"] }
lazy_static = { version = "1", features = ["spin_no_std"] }
r0 = "1.0"
buddy_system_allocator = "0.6"

18
shared-scheduler/build.rs Normal file
View File

@ -0,0 +1,18 @@
use std::env;
use std::fs;
use std::io::Write;
use std::path::PathBuf;
fn main() {
let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
// Put the linker script somewhere the linker can find it
fs::File::create(out_dir.join("linker.x"))
.unwrap()
.write_all(include_bytes!("src/linker.x"))
.unwrap();
println!("cargo:rustc-link-search={}", out_dir.display());
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=src/linker.x");
}

24
shared-scheduler/justfile Normal file
View File

@ -0,0 +1,24 @@
target := "riscv64imac-unknown-none-elf"
mode := "debug"
build-path := "../target/" + target + "/" + mode + "/"
shared-elf := build-path + "shared-scheduler"
shared-bin := build-path + "shared-scheduler.bin"
objdump := "riscv64-linux-gnu-objdump"
objcopy := "rust-objcopy --binary-architecture=riscv64"
gdb := "riscv64-unknown-elf-gdb.exe"
size := "rust-size"
threads := "2"
build: shared-scheduler
@{{objcopy}} {{shared-elf}} --strip-all -O binary {{shared-bin}}
shared-scheduler:
@cargo build --target={{target}}
asm: build
@{{objdump}} -D {{shared-elf}} | less
size: build
@{{size}} -A -x {{shared-elf}}

View File

@ -0,0 +1,23 @@
//! 调度算法这这里实现
mod ring_fifo;
pub use ring_fifo::RingFifoScheduler;
/// 调度器实例需要实现的 Trait
///
pub trait Scheduler<T: Clone + PartialEq> {
/// 优先级的类型
type Priority;
/// 向调度器中添加一个任务,成功返回 None不成功返回 Some(T)
fn add_task(&mut self, task: T) -> Option<T>;
/// 获取下一个任务的引用,但不弹出任务
fn peek_next_task(&self) -> Option<&T>;
/// 弹出下一个时间段应当执行的任务
fn next_task(&mut self) -> Option<T>;
/// 获取正在运行的任务,中断发生时,将保存这个任务的上下文
fn current_task(&self) -> Option<T>;
/// 移除一个任务
fn remove_task(&mut self, task: &T);
/// 设置任务的优先级
fn set_priority(&mut self, task: T, priority: Self::Priority);
}

View File

@ -0,0 +1,145 @@
//! 循环先进先出队列调度器实现
//!
use super::Scheduler;
use core::mem::MaybeUninit;
use core::ptr;
/// 先进先出轮转任务调度器
pub struct RingFifoScheduler<T, const N: usize> {
ring: RingQueue<T, N>,
current: Option<T>,
}
impl<T, const N: usize> RingFifoScheduler<T, N> {
/// 创建一个空的调度器
pub const fn new() -> Self {
Self {
ring: RingQueue::new(),
current: None,
}
}
}
impl<T: Clone + PartialEq, const N: usize> Scheduler<T> for RingFifoScheduler<T, N> {
type Priority = ();
fn add_task(&mut self, task: T) -> Option<T> {
// 加入环形队列
let ans = self.ring.push_back(task);
// 直接返回self.ring.push_back也可以
if let Some(t) = ans { // 如果满了,退出
return Some(t)
}
None
}
fn next_task(&mut self) -> Option<T> {
// 从头部取出
let ans = self.ring.pop_front();
self.current = ans.clone();
ans
}
fn peek_next_task(&self) -> Option<&T> {
// 拿出头部的引用
self.ring.front()
}
fn current_task(&self) -> Option<T> {
self.current.clone()
}
fn remove_task(&mut self, task: &T) {
// 移除相应的线程并且确认恰移除一个线程
drop(task);
todo!("还没做")
}
fn set_priority(&mut self, _task: T, _prio: ()) {}
}
pub struct RingQueue<T, const N: usize> {
elem: [MaybeUninit<T>; N],
front: usize,
tail: usize
}
impl<T, const N: usize> RingQueue<T, N> {
pub const fn new() -> Self {
Self {
elem: MaybeUninit::uninit_array(),
front: 0,
tail: 0,
}
}
pub const fn len(&self) -> usize {
self.tail.wrapping_sub(self.front) % N
}
pub const fn is_empty(&self) -> bool {
self.tail == self.front
}
#[inline] fn is_full(&self) -> bool {
self.len() == N - 1
}
// if push failed, value T is returned
pub fn push_back(&mut self, value: T) -> Option<T> {
if self.is_full() {
return Some(value);
}
unsafe { *self.elem[self.tail].as_mut_ptr() = value };
self.tail = self.tail.wrapping_add(1);
if self.tail > N || self.tail == 0 {
self.tail = self.tail.wrapping_sub(N);
}
None // success
}
pub fn pop_front(&mut self) -> Option<T> {
if self.is_empty() {
return None;
}
let value = unsafe { ptr::read(self.elem[self.front].as_ptr()) };
self.front = self.front.wrapping_add(1); // assured non empty
if self.front > N || self.front == 0 {
self.front = self.front.wrapping_sub(N);
}
Some(value)
}
pub fn front(&self) -> Option<&T> {
if self.is_empty() {
None
} else {
Some(unsafe { &*self.elem[self.front].as_ptr() })
}
}
pub fn iter(&self) -> Iter<'_, T, N> {
let mut elem = [&self.elem[0]; N];
for i in 0..self.elem.len() {
elem[i] = &self.elem[i];
}
Iter {
elem,
front: self.front,
tail: self.tail
}
}
}
pub struct Iter<'a, T: 'a, const N: usize> {
elem: [&'a MaybeUninit<T>; N],
front: usize,
tail: usize
}
// TODO: 这里有不确定 Unsafe 代码,需检查正确性
impl<'a, T, const N: usize> Iterator for Iter<'a, T, N> {
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
if self.tail == self.front {
// is empty
None
} else {
let value = unsafe { self.elem[self.front].assume_init_ref() };
self.front = self.front.wrapping_add(1);
if self.front > N || self.front == 0 {
self.front = self.front.wrapping_sub(N);
}
Some(value)
}
}
}

View File

@ -0,0 +1,39 @@
use crate::sbi::*;
use core::fmt::{self, Write};
struct Stdout;
// 暂时不用关中断的锁lock::Lock考虑多个硬件线程的情况
static STDOUT_LOCK: spin::Mutex<()> = spin::Mutex::new(());
impl Write for Stdout {
fn write_str(&mut self, s: &str) -> fmt::Result {
let mut buffer = [0u8; 4];
STDOUT_LOCK.lock();
for c in s.chars() {
for code_point in c.encode_utf8(&mut buffer).as_bytes().iter() {
console_putchar(*code_point as usize);
}
}
Ok(())
}
}
pub fn print(args: fmt::Arguments) {
Stdout.write_fmt(args).unwrap();
}
#[macro_export]
macro_rules! print {
($fmt: literal $(, $($arg: tt)+)?) => {
$crate::console::print(format_args!($fmt $(, $($arg)+)?));
}
}
#[macro_export]
macro_rules! println {
($fmt: literal $(, $($arg: tt)+)?) => {
$crate::console::print(format_args!(concat!($fmt, "\n") $(, $($arg)+)?));
}
}

View File

@ -0,0 +1,59 @@
OUTPUT_ARCH(riscv)
ENTRY(_start)
BASE_ADDRESS = 0x80200000;
SECTIONS
{
. = BASE_ADDRESS;
shared_start = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.entry)
*(.text .text.*)
_etext = .;
}
. = ALIGN(4K);
.rodata : ALIGN(4K) {
_srodata = .;
*(.rodata .rodata.*)
_erodata = .;
}
.data : ALIGN(4K) {
_sidata = LOADADDR(.data);
_sdata = .;
PROVIDE(__global_pointer$ = . + 0x800);
*(.sdata .sdata.* .sdata2 .sdata2.*);
*(.data .data.*)
_edata = .;
}
.bss (NOLOAD) : ALIGN(4K) {
*(.bss.stack)
_sbss = .;
*(.sbss .bss .bss.*)
_ebss = .;
}
/* 将一些共享的函数放到这个段方便内核或用户访问 */
.shared_data : ALIGN(4K) {
_sshared_data = .;
*(.shared_data .shared_data.*)
_eshared_data = .;
}
.shared_text : ALIGN(4K) {
_sshared_text = .;
*(.shared_text .shared_text.*)
_eshared_text = .;
}
/DISCARD/ : {
*(.eh_frame)
*(.debug*)
}
}

View File

@ -0,0 +1,124 @@
//! 为协程内核设计的共享调度器运行时
//!
#![no_std]
#![no_main]
#![feature(panic_info_message)]
#![feature(alloc_error_handler)]
#![feature(llvm_asm)]
#![feature(asm)]
#![feature(maybe_uninit_uninit_array, maybe_uninit_ref)]
#![feature(naked_functions)]
extern crate alloc;
mod sbi;
#[macro_use]
mod console;
mod algorithm;
mod task;
mod mm;
use buddy_system_allocator::LockedHeap;
use algorithm::{RingFifoScheduler, Scheduler};
const USER_HEAP_SIZE: usize = 32768;
static mut HEAP_SPACE: [u8; USER_HEAP_SIZE] = [0; USER_HEAP_SIZE];
#[global_allocator]
static HEAP: LockedHeap = LockedHeap::empty();
#[cfg_attr(not(test), panic_handler)]
pub fn panic_handler(panic_info: &core::panic::PanicInfo) -> ! {
println!("[shared-rt] panic: {:?}", panic_info);
sbi::shutdown()
}
#[cfg_attr(not(test), alloc_error_handler)]
pub fn handle_alloc_error(layout: core::alloc::Layout) -> ! {
println!("[shared-rt] alloc panic: {:?}", layout);
sbi::shutdown()
}
#[link_section = ".text.entry"]
#[export_name = "_start"]
#[naked]
unsafe extern "C" fn start() -> ! {
asm!(
"
la sp, boot_stack_top
call _shared_main
_start_abort:
wfi
j _start_abort
.section .bss.stack
.global boot_stack
boot_stack:
.space 4096 * 4
.global boot_stack_top
boot_stack_top:
.section .data
", options(noreturn))
}
#[export_name = "_shared_main"]
extern "C" fn shared_main() -> ! {
extern "C" {
static mut _sbss: u32;
static mut _ebss: u32;
static mut _sdata: u32;
static mut _edata: u32;
static _sidata: u32;
fn _shared_raw_table();
fn hello_world();
}
unsafe {
r0::zero_bss(&mut _sbss, &mut _ebss);
r0::init_data(&mut _sdata, &mut _edata, &_sidata);
}
unsafe {
HEAP.lock().init(HEAP_SPACE.as_ptr() as usize, USER_HEAP_SIZE);
}
println!("[shared-rt] enter shared-rt!");
println!("[shared-rt] _shared_raw_table: {:#x}", _shared_raw_table as usize);
// 堆分配测试
let mut v = alloc::vec::Vec::new();
for i in 0..5 {
v.push(i);
}
v.iter_mut().for_each(|x| *x += 1);
assert_eq!(v, alloc::vec![1, 2, 3, 4, 5]);
// 调度算法测试
let mut ring_fifo: RingFifoScheduler<usize, 50> = RingFifoScheduler::new();
ring_fifo.add_task(0);
ring_fifo.add_task(1);
assert_eq!(ring_fifo.next_task(), Some(0));
assert_eq!(ring_fifo.next_task(), Some(1));
// 通过裸指针调用函数测试
let f_ptr = _shared_raw_table as usize as *const ();
let f_code: fn(a0: usize) -> usize = unsafe { core::mem::transmute(f_ptr) };
assert_eq!(f_code(0), hello_world as usize);
println!("[shared-rt] entering kernel...");
unsafe { enter_kernel() }
}
// 跳转到内核
#[naked]
#[link_section = ".text"]
unsafe extern "C" fn enter_kernel() -> ! {
asm!("
1: auipc ra, %pcrel_hi(1f)
ld ra, %pcrel_lo(1b)(ra)
jr ra
.align 3
1: .dword 0x80400000
", options(noreturn))
}

View File

@ -0,0 +1,15 @@
//! 内存相关实现
/// 地址空间编号
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct AddressSpaceId(u16); // in Sv39, [0, 2^16)
impl AddressSpaceId {
pub(crate) unsafe fn from_raw(asid: usize) -> AddressSpaceId {
AddressSpaceId(asid as u16)
}
pub(crate) fn into_inner(self) -> usize {
self.0 as usize
}
}

View File

@ -0,0 +1,41 @@
#![allow(unused)]
#[inline(always)]
fn sbi_call(which: usize, arg0: usize, arg1: usize, arg2: usize) -> usize {
let ret;
unsafe {
llvm_asm!("ecall"
: "={x10}" (ret)
: "{x10}" (arg0), "{x11}" (arg1), "{x12}" (arg2), "{x17}" (which)
: "memory"
: "volatile");
}
ret
}
const SBI_SET_TIMER: usize = 0;
const SBI_CONSOLE_PUTCHAR: usize = 1;
const SBI_CONSOLE_GETCHAR: usize = 2;
const SBI_CLEAR_IPI: usize = 3;
const SBI_SEND_IPI: usize = 4;
const SBI_REMOTE_FENCE_I: usize = 5;
const SBI_REMOTE_SFENCE_VMA: usize = 6;
const SBI_REMOTE_SFENCE_VMA_ASID: usize = 7;
const SBI_SHUTDOWN: usize = 8;
pub fn console_putchar(c: usize) {
sbi_call(SBI_CONSOLE_PUTCHAR, c, 0, 0);
}
pub fn console_getchar() -> usize {
sbi_call(SBI_CONSOLE_GETCHAR, 0, 0, 0)
}
pub fn shutdown() -> ! {
sbi_call(SBI_SHUTDOWN, 0, 0, 0);
unreachable!()
}
pub fn set_timer(time: usize) {
sbi_call(SBI_SET_TIMER, time, 0, 0);
}

View File

@ -0,0 +1,53 @@
use spin::{Mutex, MutexGuard};
/// 关闭中断的互斥锁
#[derive(Default)]
pub struct Lock<T>(pub(self) Mutex<T>);
/// 封装 [`MutexGuard`] 来实现 drop 时恢复 sstatus
pub struct LockGuard<'a, T> {
/// 在 drop 时需要先 drop 掉 [`MutexGuard`] 再恢复 sstatus
guard: Option<MutexGuard<'a, T>>,
/// 保存的关中断前 sstatus
sstatus: usize,
}
impl<T> Lock<T> {
/// 创建一个新对象
pub const fn new(obj: T) -> Self {
Self(Mutex::new(obj))
}
/// 获得上锁的对象
pub fn lock(&self) -> LockGuard<'_, T> {
let sstatus: usize;
unsafe {
llvm_asm!("csrrci $0, sstatus, 1 << 1" : "=r"(sstatus) ::: "volatile");
}
LockGuard {
guard: Some(self.0.lock()),
sstatus,
}
}
}
/// 释放时,先释放内部的 MutexGuard再恢复 sstatus 寄存器
impl<'a, T> Drop for LockGuard<'a, T> {
fn drop(&mut self) {
self.guard.take();
unsafe { llvm_asm!("csrs sstatus, $0" :: "r"(self.sstatus & 2) :: "volatile") };
}
}
impl<'a, T> core::ops::Deref for LockGuard<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.guard.as_ref().unwrap().deref()
}
}
impl<'a, T> core::ops::DerefMut for LockGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.guard.as_mut().unwrap().deref_mut()
}
}

View File

@ -0,0 +1,19 @@
//! 协程内核异步任务
pub use self::shared::SharedTaskHandle;
mod shared;
mod lock;
/// 共享调度器返回的结果
/// 这个后面会考虑去掉,内核或用户与共享运行时通信的媒介只有 Rust 的基本类型数据
#[derive(Debug)]
#[repr(C)]
pub enum TaskResult {
/// 应当立即执行特定任务
Task(SharedTaskHandle),
/// 其他地址空间的任务要运行,应当让出时间片
ShouldYield,
/// 队列已空,所有任务已经结束
Finished
}

View File

@ -0,0 +1,121 @@
//! 共享运行时的设计思路
//!
use crate::algorithm::{Scheduler, RingFifoScheduler};
use crate::mm::AddressSpaceId;
use core::{ptr::NonNull, usize};
use super::TaskResult;
use super::lock;
use spin::Mutex;
#[link_section = ".shared_text"]
#[export_name = "_shared_raw_table"]
// 给出函数表偏移量,返回函数地址
// 0 - hello_world()
// 1 - shared_scheduler()
// 2 - shared_add_task()
// 3 - shared_pop_task()
pub extern "C" fn raw_table(offset: usize) -> usize {
// println!("[shared-rt] enter shared raw table with offset: {:#x}", offset);
match offset {
0 => hello_world as usize,
1 => shared_scheduler as usize,
2 => shared_add_task as usize,
3 => shared_pop_task as usize,
_ => unimplemented!()
}
}
#[link_section = ".shared_text"]
#[no_mangle]
pub fn hello_world() {
println!("[shared-rt] hello world from shared-rt");
}
/// 共享调度器的类型
type SharedScheduler = Mutex<RingFifoScheduler<SharedTaskHandle, 100>>;
/// 全局的共享调度器
/// 放到 .shared_data 段,内核或用户从这个地址里取得共享调度器
#[link_section = ".shared_data"]
pub static SHARED_SCHEDULER: SharedScheduler = Mutex::new(RingFifoScheduler::new());
/// 得到当前正在运行的任务,以备保存上下文
///
/// 只供内核中断使用,不需要和用户层共享代码
pub fn current_task() -> Option<SharedTaskHandle> {
SHARED_SCHEDULER.lock().current_task()
}
/// 得到共享的调度器指针
///
/// 可以在共享的添加任务,弹出下一个任务中使用
#[no_mangle]
#[link_section = ".shared_text"]
pub fn shared_scheduler() -> NonNull<()> {
NonNull::new(&SHARED_SCHEDULER as *const _ as *mut ())
.expect("create non null pointer")
}
/// 共享任务的句柄
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[repr(C)]
pub struct SharedTaskHandle {
/// 运行此任务的硬件线程编号
pub(crate) hart_id: usize,
/// 地址空间的编号
pub(crate) address_space_id: AddressSpaceId,
/// 对每个虚拟空间来说task_ptr是Arc<Task>相应的虚拟地址
/// 比如内核中是内核虚拟地址,用户中是用户的虚拟地址
pub(crate) task_ptr: usize,
}
// impl SharedTaskHandle {
// pub fn new(hart_id: usize, asid: usize, task_ptr: usize) -> Self {
// Self {
// hart_id,
// address_space_id: unsafe { AddressSpaceId::from_raw(asid) },
// task_ptr
// }
// }
// }
/// 给共享调度器添加任务
///
/// 在内核态和用户态都可以调用
#[no_mangle]
#[link_section = ".shared_text"]
pub unsafe fn shared_add_task(
shared_scheduler: NonNull<()>,
handle: SharedTaskHandle
) -> Option<SharedTaskHandle> {
let s: NonNull<SharedScheduler> = shared_scheduler.cast();
let mut scheduler = s.as_ref().lock();
scheduler.add_task(handle)
}
/// 从共享调度器中弹出一个任务
///
/// 在内核态和用户态都可以调用
#[no_mangle]
#[link_section = ".shared_text"]
pub unsafe fn shared_pop_task(
shared_scheduler: NonNull<()>,
should_switch: fn(&SharedTaskHandle) -> bool
) -> TaskResult {
// 得到共享调度器的引用
let mut s: NonNull<SharedScheduler> = shared_scheduler.cast();
let mut scheduler = s.as_mut().lock();
if let Some(task) = scheduler.peek_next_task() {
if should_switch(task) {
// 如果需要跳转到其他地址空间,则不弹出任务,给出信号
return TaskResult::ShouldYield
}
// 从共享调度器弹出任务交给调用者
let next_task = scheduler.next_task().unwrap();
drop(scheduler); // 释放锁
return TaskResult::Task(next_task)
} else {
// 没有任务了,返回已完成
return TaskResult::Finished;
}
}

View File

@ -3,7 +3,7 @@ OUTPUT_ARCH(riscv)
ENTRY(_start)
/* 省点虚拟空间……假设内核最多只有1G大小 */
BASE_ADDRESS = 0xffffffffc0200000;
BASE_ADDRESS = 0xffffffffc0400000;
SECTIONS
{

View File

@ -92,16 +92,6 @@ pub extern "C" fn rust_main(hart_id: usize) -> ! {
println!("_user_to_supervisor: {:#x}", _user_to_supervisor as usize);
println!("_supervisor_to_user: {:#x}", _supervisor_to_user as usize);
println!("_user_data: {:#x}", _user_data as usize);
println!("shared_add_task: {:#x}", task::shared_add_task as usize);
println!("shared_pop_task: {:#x}", task::shared_pop_task as usize);
// let executor = task::Executor::default();
// executor.spawn(async {
// println!("Hello world!")
// });
// executor.run_until_idle();
// 在启动程序之前需要加载内核当前线程的信息到tp寄存器中
unsafe { hart::KernelHartInfo::load_hart(hart_id) };
@ -113,65 +103,77 @@ pub extern "C" fn rust_main(hart_id: usize) -> ! {
let kernel_memory = memory::MemorySet::new_kernel().expect("create kernel memory set");
kernel_memory.activate();
let shared_scheduler = task::shared_scheduler();
// 调用共享运行时的函数
let raw_table_ptr = 0x8021_b000 as *const ();
let raw_table: extern "C" fn(a0: usize) -> usize = unsafe { core::mem::transmute(raw_table_ptr) };
let shared_scheduler_ptr = raw_table(1);
let shared_add_task_ptr = raw_table(2);
let shared_pop_task_ptr = raw_table(3);
let shared_scheduler: fn() -> core::ptr::NonNull<()> = unsafe {
core::mem::transmute(shared_scheduler_ptr)
};
let shared_add_task: unsafe fn(
shared_scheduler: core::ptr::NonNull<()>, handle: task::SharedTaskHandle
) -> Option<task::SharedTaskHandle> = unsafe {
core::mem::transmute(shared_add_task_ptr)
};
let shared_pop_task: unsafe fn(
shared_scheduler: core::ptr::NonNull<()>,
should_switch: fn(&task::SharedTaskHandle) -> bool
) -> task::TaskResult = unsafe {
core::mem::transmute(shared_pop_task_ptr)
};
let shared_scheduler = shared_scheduler();
println!("Shared scheduler: {:?}", shared_scheduler);
let process = task::Process::new(kernel_memory).expect("create process 1");
let stack_handle = process.alloc_stack().expect("alloc initial stack");
let task_1 = task::KernelTask::new(task_1(), process.clone());
let task_2 = task::KernelTask::new(task_2(), process.clone());
let task_3 = task::KernelTask::new(FibonacciFuture::new(6), process.clone());
println!("task_1: {:?}", task_1);
println!("task_2: {:?}", task_2);
println!("task_3: {:?}", task_3);
unsafe {
task::shared_add_task(shared_scheduler, task_1.shared_task_handle());
let _pop_task = task::shared_pop_task(shared_scheduler);
shared_add_task(shared_scheduler, task_1.shared_task_handle());
shared_add_task(shared_scheduler, task_2.shared_task_handle());
shared_add_task(shared_scheduler, task_3.shared_task_handle());
}
// 尝试进入用户态
task::run_until_idle(
|| unsafe { shared_pop_task(shared_scheduler, task::SharedTaskHandle::should_switch) },
|handle| unsafe { shared_add_task(shared_scheduler, handle) }
);
// 进入用户态
user::try_enter_user(stack_handle.end.0 - 4)
// let user_1_memory = memory::MemorySet::new_user().expect("create user 1 memory set");
// let process_2 = task::Process::new(user_1_memory).expect("create process 2");
// let task_4 = task::user_task::UserTask::new(user_task_1(), process_2);
// unsafe {
// task::shared_add_task(shared_scheduler, task_4.shared_task_handle()); // 用户任务
// task::shared_add_task(shared_scheduler, task_3.shared_task_handle());
// task::shared_add_task(shared_scheduler, task_1.shared_task_handle());
// }
// unsafe {
// riscv::register::sscratch::write(0); // todo 寄存器sscratch
// riscv::register::sstatus::set_sie() // todo 允许被特权级中断打断
// };
// task::run_until_idle(
// || unsafe { task::shared_pop_task(shared_scheduler) },
// |handle| unsafe { task::shared_add_task(shared_scheduler, handle) }
// );
// // 关机之前,卸载当前的核。虽然关机后内存已经清空,不是必要,预留未来热加载热卸载处理核的情况
// 关机之前,卸载当前的核。虽然关机后内存已经清空,不是必要,预留未来热加载热卸载处理核的情况
// unsafe { hart::KernelHartInfo::unload_hart() };
// // 没有任务了,关机
// 没有任务了,关机
// sbi::shutdown()
}
fn spawn(future: impl Future<Output = ()> + 'static + Send + Sync) {
unsafe {
// 创建一个新的任务
// 在用户层,这里应该使用系统调用,一次性获得一个资源分配的令牌,代替“进程”结构体,复用这个令牌获得资源
let process = hart::KernelHartInfo::current_process().unwrap();
// 新建一个任务
let new_task = task::KernelTask::new(future, process);
// 加入调度器
let shared_scheduler = task::shared_scheduler();
task::shared_add_task(shared_scheduler, new_task.shared_task_handle());
}
}
// fn spawn(future: impl Future<Output = ()> + 'static + Send + Sync) {
// unsafe {
// // 创建一个新的任务
// // 在用户层,这里应该使用系统调用,一次性获得一个资源分配的令牌,代替“进程”结构体,复用这个令牌获得资源
// let process = hart::KernelHartInfo::current_process().unwrap();
// // 新建一个任务
// let new_task = task::KernelTask::new(future, process);
// // 加入调度器
// let shared_scheduler = task::shared_scheduler();
// task::shared_add_task(shared_scheduler, new_task.shared_task_handle());
// }
// }
async fn task_1() {
spawn(task_2());
println!("hello world from 1!");
}
async fn task_2() {
println!("hello world from 2!; this will block current hart");
// loop { } // 模拟用户长时间占用硬件线程的情况
println!("hello world from 2!");
}
async fn user_task_1() {

View File

@ -141,6 +141,10 @@ impl MemorySet {
mapping.map_segment(segment, None)?;
}
// 映射共享运行时段
let va_range = VirtualAddress(0x80200000)..VirtualAddress(0x80400000);
let pa_range = PhysicalAddress(0x80200000)..PhysicalAddress(0x80400000);
mapping.map_defined(&va_range, &pa_range, Flags::WRITABLE | Flags::READABLE | Flags::EXECUTABLE );
// 映射 _swap_frame
let swap_frame_va = VirtualAddress(SWAP_FRAME_VA);
let swap_frame_vpn = VirtualPageNumber::floor(swap_frame_va);
@ -281,19 +285,25 @@ impl MemorySet {
flags: Flags::READABLE | Flags::WRITABLE,
}, None)?;
// 映射共享数据段
let shared_data_len = _eshared_data as usize - _sshared_data as usize;
let va_range = VirtualAddress(USER_SHARED_DATA_VA)..VirtualAddress(USER_SHARED_DATA_VA + shared_data_len);
let pa_range =
VirtualAddress(_sshared_data as usize).physical_address_linear()..VirtualAddress(_eshared_data as usize).physical_address_linear();
mapping.map_defined(&va_range, &pa_range, Flags::READABLE | Flags::WRITABLE | Flags::USER);
// // 映射共享数据段
// let shared_data_len = _eshared_data as usize - _sshared_data as usize;
// let va_range = VirtualAddress(USER_SHARED_DATA_VA)..VirtualAddress(USER_SHARED_DATA_VA + shared_data_len);
// let pa_range =
// VirtualAddress(_sshared_data as usize).physical_address_linear()..VirtualAddress(_eshared_data as usize).physical_address_linear();
// mapping.map_defined(&va_range, &pa_range, Flags::READABLE | Flags::WRITABLE | Flags::USER);
// 映射共享代码段
let shared_text_len = _eshared_text as usize - _sshared_text as usize;
let va_range = VirtualAddress(USER_SHARED_TEXT_VA)..VirtualAddress(USER_SHARED_TEXT_VA + shared_text_len);
let pa_range =
VirtualAddress(_sshared_text as usize).physical_address_linear()..VirtualAddress(_eshared_text as usize).physical_address_linear();
mapping.map_defined(&va_range, &pa_range, Flags::READABLE | Flags::WRITABLE | Flags::EXECUTABLE | Flags::USER);
// // 映射共享代码段
// let shared_text_len = _eshared_text as usize - _sshared_text as usize;
// let va_range = VirtualAddress(USER_SHARED_TEXT_VA)..VirtualAddress(USER_SHARED_TEXT_VA + shared_text_len);
// let pa_range =
// VirtualAddress(_sshared_text as usize).physical_address_linear()..VirtualAddress(_eshared_text as usize).physical_address_linear();
// mapping.map_defined(&va_range, &pa_range, Flags::READABLE | Flags::WRITABLE | Flags::EXECUTABLE | Flags::USER);
// 映射共享运行时段
// 目前共享运行时写死在 0x80200000 这个物理地址上
let va_range = VirtualAddress(0x80200000)..VirtualAddress(0x80400000);
let pa_range = PhysicalAddress(0x80200000)..PhysicalAddress(0x80400000);
mapping.map_defined(&va_range, &pa_range, Flags::WRITABLE | Flags::READABLE | Flags::EXECUTABLE | Flags::USER);
let address_space_id = crate::hart::KernelHartInfo::alloc_address_space_id()?; // todo: 释放asid
println!("New asid = {:?}", address_space_id);

View File

@ -35,7 +35,6 @@ where
// poll our future and give it a waker
let mut context = Context::from_waker(&*waker);
// println!("Poll begin");
let ret = task.future.lock().as_mut().poll(&mut context);
// println!("Ret = {:?}", ret);
if let Poll::Pending = ret {

View File

@ -9,10 +9,9 @@ pub use kernel_task::{KernelTask, TaskId};
pub use process::{Process, ProcessId};
pub use executor::run_until_idle;
pub use shared::{
SHARED_SCHEDULER, SHARED_RAW_TABLE,
SHARED_SCHEDULER,
SharedTaskHandle, shared_scheduler,
current_task, shared_add_task,
shared_pop_task
current_task
};
/// 共享调度器返回的结果

View File

@ -33,13 +33,13 @@ use super::lock;
/// 共享调度器的类型
// type SharedScheduler = spin::Mutex<RingFifoScheduler<SharedTaskHandle, 500>>;
type SharedScheduler = lock::Lock<SameAddrSpaceScheduler<SharedTaskHandle, 500>>;
type SharedScheduler = lock::Lock<RingFifoScheduler<SharedTaskHandle, 500>>;
/// 所有任务的调度器
///
/// 注意:所有.shared_data段内的数据不应该分配堆空间
#[link_section = ".shared_data"]
pub static SHARED_SCHEDULER: SharedScheduler = lock::Lock::new(SameAddrSpaceScheduler::new());
pub static SHARED_SCHEDULER: SharedScheduler = lock::Lock::new(RingFifoScheduler::new());
/// 得到当前正在运行的任务,以备保存上下文
///
@ -70,9 +70,16 @@ pub struct SharedTaskHandle {
}
impl SharedTaskHandle {
fn should_switch(&self) -> bool {
pub fn _new(hart_id: usize, asid: usize, task_ptr: usize) -> Self {
Self {
hart_id,
address_space_id: unsafe { AddressSpaceId::from_raw(asid) },
task_ptr
}
}
pub fn should_switch(handle: &SharedTaskHandle) -> bool {
// 如果当前和下一个任务间地址空间变化了,就说明应当切换上下文
KernelHartInfo::current_address_space_id() != self.address_space_id
KernelHartInfo::current_address_space_id() != handle.address_space_id
}
}
@ -83,43 +90,43 @@ impl crate::algorithm::WithAddressSpace for SharedTaskHandle {
}
// todo: 用上 -- luojia65
pub static SHARED_RAW_TABLE: (unsafe fn(NonNull<()>, SharedTaskHandle) -> Option<SharedTaskHandle>, unsafe fn(NonNull<()>) -> TaskResult)
= (shared_add_task, shared_pop_task);
// pub static SHARED_RAW_TABLE: (unsafe fn(NonNull<()>, SharedTaskHandle) -> Option<SharedTaskHandle>, unsafe fn(NonNull<()>) -> TaskResult)
// = (shared_add_task, shared_pop_task);
/// 共享的添加新任务
///
/// 在内核态和用户态都可以调用访问的是shared_scheduler对应的同一块内存
#[link_section = ".shared_text"]
#[no_mangle]
pub unsafe fn shared_add_task(shared_scheduler: NonNull<()>, handle: SharedTaskHandle) -> Option<SharedTaskHandle> {
let s: NonNull<SharedScheduler> = shared_scheduler.cast();
// println!("Add task: scheduler = {:?}, handle = {:x?}", s, handle);
let mut scheduler = s.as_ref().lock();
scheduler.add_task(handle)
}
// /// 共享的添加新任务
// ///
// /// 在内核态和用户态都可以调用访问的是shared_scheduler对应的同一块内存
// #[link_section = ".shared_text"]
// #[no_mangle]
// pub unsafe fn shared_add_task(shared_scheduler: NonNull<()>, handle: SharedTaskHandle) -> Option<SharedTaskHandle> {
// let s: NonNull<SharedScheduler> = shared_scheduler.cast();
// // println!("Add task: scheduler = {:?}, handle = {:x?}", s, handle);
// let mut scheduler = s.as_ref().lock();
// scheduler.add_task(handle)
// }
/// 共享的弹出下一个任务
///
/// 在内核态和用户态都可以调用访问的是shared_scheduler对应的同一块内存
#[link_section = ".shared_text"]
#[no_mangle]
pub unsafe fn shared_pop_task(shared_scheduler: NonNull<()>) -> TaskResult {
// 得到共享调度器的引用
let mut s: NonNull<SharedScheduler> = shared_scheduler.cast();
let mut scheduler = s.as_mut().lock();
if let Some(task) = scheduler.peek_next_task() {
// 还有任务,尝试运行这个任务
if task.should_switch() { // 如果需要跳转到其它的地址空间,就不弹出任务,提示需要切换地址空间
return TaskResult::ShouldYield
}
// 是本地址空间的任务,从调度器拿出这个任务
// note(unwrap): 前面peek已经返回Some了
let next_task = scheduler.next_task().unwrap();
drop(scheduler); // 释放锁
// 返回这个任务,以便当前地址空间的执行器运行
TaskResult::Task(next_task)
} else {
// 没有任务了,返回已完成
TaskResult::Finished
}
}
// /// 共享的弹出下一个任务
// ///
// /// 在内核态和用户态都可以调用访问的是shared_scheduler对应的同一块内存
// #[link_section = ".shared_text"]
// #[no_mangle]
// pub unsafe fn shared_pop_task(shared_scheduler: NonNull<()>) -> TaskResult {
// // 得到共享调度器的引用
// let mut s: NonNull<SharedScheduler> = shared_scheduler.cast();
// let mut scheduler = s.as_mut().lock();
// if let Some(task) = scheduler.peek_next_task() {
// // 还有任务,尝试运行这个任务
// if task.should_switch() { // 如果需要跳转到其它的地址空间,就不弹出任务,提示需要切换地址空间
// return TaskResult::ShouldYield
// }
// // 是本地址空间的任务,从调度器拿出这个任务
// // note(unwrap): 前面peek已经返回Some了
// let next_task = scheduler.next_task().unwrap();
// drop(scheduler); // 释放锁
// // 返回这个任务,以便当前地址空间的执行器运行
// TaskResult::Task(next_task)
// } else {
// // 没有任务了,返回已完成
// TaskResult::Finished
// }
// }

View File

@ -62,8 +62,8 @@ pub fn try_enter_user(kernel_stack_top: usize) -> ! {
kernel_satp, 0, 0, kernel_stack_top, user_stack_top, _test_user_trap as usize
);
// 在这里把 .shared_text 段在用户态的虚拟地址通过 gp 寄存器传过去
swap_cx.set_gp(memory::USER_SHARED_TEXT_VA);
// 在这里把共享运行时中 raw_table 的地址通过 gp 寄存器传给用户
swap_cx.set_gp(0x8021_b000);
// println!("swap_cx.epc: {:#x}", swap_cx.epc);
// println!("swap_cx.trap_handler: {:#x}", swap_cx.user_trap_handler);
trap::switch_to_user(swap_cx, user_satp)

View File

@ -7,8 +7,8 @@ user-bin := build-path + "tornado-user.bin"
objcopy := "rust-objcopy --binary-architecture=riscv64"
build app: (elf app)
build: user
@{{objcopy}} {{user-elf}} --strip-all -O binary {{user-bin}}
elf app:
@cargo build --target {{target}} --{{mode}} --bin {{app}}
user:
@cargo build --target={{target}} --{{mode}}

View File

@ -1,3 +0,0 @@
fn main() {
println!("Hello world!");
}

View File

@ -7,6 +7,8 @@
extern crate alloc;
mod excutor;
mod shared;
mod task;
use buddy_system_allocator::LockedHeap;
use alloc::vec;
@ -39,11 +41,6 @@ pub fn handle_alloc_error(_layout: core::alloc::Layout) -> ! {
#[link_section = ".text.entry"]
#[export_name = "_start"]
fn main() -> ! {
unsafe {
asm!(
"lw t0, (gp)"
);
}
unsafe {
HEAP.lock().init(HEAP_SPACE.as_ptr() as usize, USER_HEAP_SIZE);
}
@ -56,6 +53,41 @@ fn main() -> ! {
excutor::spawn(fib);
let ret = excutor::try_join();
assert_eq!(ret, Some(8));
// 获取共享运行时的函数表
let shared_raw_table_ptr: usize;
unsafe { asm!("mv {}, gp", out(reg) shared_raw_table_ptr, options(nomem, nostack)); }; // rust-lang/rust#82753 Thank you @Amanieu :)
assert_eq!(shared_raw_table_ptr, 0x8021_b000);
let raw_table: extern "C" fn(a0: usize) -> usize = unsafe {
core::mem::transmute(shared_raw_table_ptr)
};
let shared_scheduler_ptr = raw_table(1);
let shared_add_task_ptr = raw_table(2);
let shared_pop_task_ptr = raw_table(3);
let shared_scheduler: fn() -> core::ptr::NonNull<()> = unsafe {
core::mem::transmute(shared_scheduler_ptr)
};
let shared_add_task: unsafe fn(
shared_scheduler: core::ptr::NonNull<()>, handle: shared::SharedTaskHandle
) -> Option<shared::SharedTaskHandle> = unsafe {
core::mem::transmute(shared_add_task_ptr)
};
let shared_pop_task: unsafe fn(
shared_scheduler: core::ptr::NonNull<()>,
should_switch: fn(&shared::SharedTaskHandle) -> bool
) -> task::TaskResult = unsafe {
core::mem::transmute(shared_pop_task_ptr)
};
let shared_scheduler = shared_scheduler();
let task = task::UserTask::new(FibonacciFuture::new(6));
unsafe {
shared_add_task(shared_scheduler, task.shared_task_handle());
}
let ret = shared::run_until_ready(
|| unsafe { shared_pop_task(shared_scheduler, shared::SharedTaskHandle::should_switch) },
|handle| unsafe { shared_add_task(shared_scheduler, handle) }
);
assert_eq!(ret, Some(8));
// todo: 退出进程的系统调用
unsafe { llvm_asm!("ecall"); }
unreachable!()

View File

@ -1,4 +1,8 @@
// 尝试在用户态给共享调度器添加任务
use super::task::{TaskResult, UserTask};
use woke::waker_ref;
use alloc::sync::Arc;
use core::{mem, task::{Poll, Context}};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[repr(C)]
@ -22,3 +26,61 @@ impl AddressSpaceId {
self.0 as usize
}
}
impl SharedTaskHandle {
pub fn _new(hart_id: usize, asid: usize, task_ptr: usize) -> Self {
Self {
hart_id,
address_space_id: unsafe { AddressSpaceId::from_raw(asid) },
task_ptr
}
}
pub fn should_switch(handle: &SharedTaskHandle) -> bool {
// todo
false
}
}
pub fn run_until_ready<F, G>(pop_task: F, push_task: G) -> Option<usize>
where
F: Fn() -> TaskResult,
G: Fn(SharedTaskHandle) -> Option<SharedTaskHandle>
{
loop {
let task = pop_task();
if let TaskResult::Task(handle) = task {
let task: Arc<UserTask> = unsafe { Arc::from_raw(handle.task_ptr as *mut _) };
if task.is_sleeping() {
mem::forget(task); // 不要释放内存
push_task(handle);
continue
}
mem::forget(task); // 不要释放内存
}
match task {
TaskResult::Task(handle) => {
// 在相同的(内核)地址空间里面
let task: Arc<UserTask> = unsafe { Arc::from_raw(handle.task_ptr as *mut _) };
task.mark_sleep();
// make a waker for our task
let waker = waker_ref(&task);
// poll our future and give it a waker
let mut context = Context::from_waker(&*waker);
let ret = task.future.lock().as_mut().poll(&mut context);
if let Poll::Ready(x) = ret {
return Some(x);
}
else {
mem::forget(task); // 不要释放task的内存它将继续保存在内存中被使用
push_task(handle);
}
},
TaskResult::ShouldYield => {
//todo
// crate::trap::switch_to_user()
},
TaskResult::Finished => return None
}
}
}

View File

@ -1,6 +1,5 @@
// todo重新整理
use crate::{hart::KernelHartInfo, memory::VirtualAddress};
// 在用户的库中提供
@ -8,32 +7,29 @@ use crate::{hart::KernelHartInfo, memory::VirtualAddress};
///
/// 目前只是暂时设计,将用户态任务硬编码在内核中
use super::process::Process;
use alloc::sync::Arc;
use spin::Mutex;
use core::{ops::Range, pin::Pin};
use core::pin::Pin;
use alloc::boxed::Box;
use core::future::Future;
use core::sync::atomic::{AtomicUsize, Ordering};
use super::SharedTaskHandle;
use super::shared::{SharedTaskHandle, AddressSpaceId};
/// 临时的用户态任务实现
pub struct UserTask {
/// 任务的编号
pub id: UserTaskId,
/// 任务所属的进程
pub process: Arc<Process>,
/// 任务所属的地址空间
pub asid: AddressSpaceId,
/// 任务信息的可变部分
pub inner: Mutex<UserTaskInner>,
/// 任务的 future
pub future: Mutex<Pin<Box<dyn Future<Output = ()> + 'static + Send + Sync>>> // 用UnsafeCell代替Mutex会好一点
pub future: Mutex<Pin<Box<dyn Future<Output = usize> + 'static + Send + Sync>>> // 用UnsafeCell代替Mutex会好一点
}
/// 任务信息的可变部分
pub struct UserTaskInner {
/// 任务栈(用户态)
pub stack: Option<Range<VirtualAddress>>,
/// 任务是否在休眠
pub sleeping: bool,
/// 任务是否已经结束
@ -60,10 +56,8 @@ impl UserTaskId {
impl UserTask {
/// 创建一个用户态任务
pub fn new(
future: impl Future<Output = ()> + 'static + Send + Sync,
process: Arc<Process>
future: impl Future<Output = usize> + 'static + Send + Sync,
) -> Arc<UserTask> {
// 得到新的用户任务编号
let id = UserTaskId::generate();
@ -71,9 +65,9 @@ impl UserTask {
Arc::new(
UserTask {
id,
process,
// todo: 地址空间编号
asid: unsafe { AddressSpaceId::from_raw(0) },
inner: Mutex::new(UserTaskInner {
stack: None,
sleeping: false,
finished: false,
}),
@ -82,20 +76,45 @@ impl UserTask {
)
}
/// 给用户态任务分配一个栈
pub fn set_user_stack(&mut self, stack: Range<VirtualAddress>) {
self.inner.lock().stack = Some(stack);
}
/// 转换到共享的任务编号
/// 危险:创建了一个没有边界的生命周期
pub unsafe fn shared_task_handle(self: Arc<Self>) -> SharedTaskHandle {
SharedTaskHandle {
hart_id: KernelHartInfo::hart_id(),
address_space_id: self.process.address_space_id(),
hart_id: 0,
// todo: 地址空间编号
address_space_id: self.asid,
task_ptr: Arc::into_raw(self) as usize
}
}
}
impl UserTask {
fn mark_ready(&self) {
self.inner.lock().sleeping = false;
}
pub(crate) fn is_sleeping(&self) -> bool {
self.inner.lock().sleeping
}
pub(crate) fn mark_sleep(&self) {
self.inner.lock().sleeping = true;
}
}
impl woke::Woke for UserTask {
fn wake_by_ref(task: &Arc<Self>) {
task.mark_ready();
}
}
/// 共享调度器返回的结果
#[derive(Debug)]
pub enum TaskResult {
/// 应当立即执行特定任务
Task(SharedTaskHandle),
/// 其它地址空间的任务要运行,应当让出时间片
ShouldYield,
/// 队列已空,所有任务已经结束
Finished,
}