Merge pull request #6 from luojia65/main

merge from luojia65
This commit is contained in:
hustccc 2021-03-08 16:11:48 +08:00 committed by GitHub
commit 9f60f8b5f7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 117 additions and 59 deletions

View File

@ -1,8 +1,8 @@
//! 和处理核相关的函数
use core::ops::Add;
use alloc::collections::LinkedList;
use alloc::boxed::Box;
use alloc::sync::Arc;
use crate::process::Process;
use crate::memory::AddressSpaceId;
/// 写一个指针到上下文指针
@ -28,7 +28,11 @@ pub fn read_tp() -> usize {
// 在内核层中tp指向一个结构体说明当前的硬件线程编号以及已经分配的地址空间
pub struct KernelHartInfo {
hart_id: usize,
address_space_id: AddressSpaceId,
current_address_space_id: AddressSpaceId,
current_process: Option<Arc<Process>>,
hart_max_asid: AddressSpaceId,
// 空余的编号回收池;目前已分配最大的编号
asid_alloc: (LinkedList<usize>, usize),
}
impl KernelHartInfo {
@ -36,7 +40,10 @@ impl KernelHartInfo {
pub unsafe fn load_hart(hart_id: usize) {
let hart_info = Box::new(KernelHartInfo {
hart_id,
address_space_id: AddressSpaceId::kernel(),
current_address_space_id: AddressSpaceId::kernel(),
current_process: None,
hart_max_asid: crate::memory::max_asid(),
asid_alloc: (LinkedList::new(), 0), // 0留给内核其它留给应用
});
let tp = Box::into_raw(hart_info) as usize; // todo: 这里有内存泄漏要在drop里处理
write_tp(tp)
@ -51,26 +58,63 @@ impl KernelHartInfo {
/// 得到当前硬件线程的编号必须在load_hart之后使用
pub fn hart_id() -> usize {
let addr = read_tp();
let bx: Box<KernelHartInfo> = unsafe { Box::from_raw(addr as *mut _) };
let ans = bx.hart_id;
drop(Box::into_raw(bx));
ans
use_tp_box(|b| b.hart_id)
}
pub unsafe fn load_address_space_id(asid: AddressSpaceId) {
let addr = read_tp();
let mut bx: Box<KernelHartInfo> = Box::from_raw(addr as *mut _);
bx.address_space_id = asid;
drop(Box::into_raw(bx)); // 防止Box指向的内存被释放
use_tp_box(|b| b.current_address_space_id = asid);
}
/// 得到当前的地址空间编号
pub fn current_address_space_id() -> AddressSpaceId {
let addr = read_tp();
let bx: Box<KernelHartInfo> = unsafe { Box::from_raw(addr as *mut _) };
let ans = bx.address_space_id;
drop(Box::into_raw(bx));
ans
use_tp_box(|b| b.current_address_space_id)
}
pub unsafe fn load_process(process: Arc<Process>) {
use_tp_box(|b| b.current_process = Some(process.clone()));
}
pub fn current_process() -> Option<Arc<Process>> {
use_tp_box(|b| b.current_process.clone())
}
/// 分配一个地址空间编号
pub fn alloc_address_space_id() -> Option<AddressSpaceId> {
use_tp_box(|b| {
let (free, max) = &mut b.asid_alloc;
if let Some(_) = free.front() { // 如果链表有内容,返回内容
return free.pop_front().map(|idx| unsafe { AddressSpaceId::from_raw(idx) })
}
// 如果链表是空的
if *max < b.hart_max_asid.into_inner() {
let ans = *max;
*max += 1;
Some(unsafe { AddressSpaceId::from_raw(ans) })
} else {
None
}
})
}
/// 释放地址空间编号
pub fn free_address_space_id(asid: AddressSpaceId) {
use_tp_box(|b| {
let (free, max) = &mut b.asid_alloc;
if asid.into_inner() == *max && *max > 0 {
*max -= 1;
return;
} else {
free.push_back(asid.into_inner())
}
});
}
}
#[inline]
fn use_tp_box<F: Fn(&mut Box<KernelHartInfo>) -> T, T>(f: F) -> T {
let addr = read_tp();
let mut bx: Box<KernelHartInfo> = unsafe { Box::from_raw(addr as *mut _) };
let ans = f(&mut bx);
drop(Box::into_raw(bx)); // 防止Box指向的空间被释放
ans
}

View File

@ -49,6 +49,7 @@ pub extern "C" fn rust_main(hart_id: usize) -> ! {
}
println!("heap test passed");
println!("Max asid = {:?}", memory::max_asid());
let remap = memory::MemorySet::new_kernel().unwrap();
remap.activate();
println!("kernel remapped");
@ -74,17 +75,16 @@ pub extern "C" fn rust_main(hart_id: usize) -> ! {
// executor.run_until_idle();
println!("Max asid = {:?}", memory::riscv_max_asid());
// 在启动程序之前需要加载内核当前线程的信息到tp寄存器中
unsafe { hart::KernelHartInfo::load_hart(hart_id) };
// 这之后就可以分配地址空间了,这之前只能用内核的地址空间
println!("Current hart: {}", hart::KernelHartInfo::hart_id());
// todo: 这里要有个地方往tp里写东西目前会出错
let process = process::Process::new_kernel().expect("create process 1");
// let stack_handle = process.alloc_stack().expect("alloc initial stack");
let task_1 = process::KernelTask::new(task_1(), process.clone());
let task_2 = process::KernelTask::new(task_2(), process.clone());
let task_3 = process::KernelTask::new(async { task_3().await }, process);
@ -103,9 +103,7 @@ pub extern "C" fn rust_main(hart_id: usize) -> ! {
|| unsafe { process::shared_pop_task(shared_scheduler) },
|handle| unsafe { process::shared_add_task(shared_scheduler, handle) }
);
unsafe {
process::shared_add_task(shared_scheduler, task_3.shared_task_handle());
}
process::Executor::block_on(|| unsafe { process::shared_pop_task(shared_scheduler)});
// 关机之前,卸载当前的核。虽然关机后内存已经清空,不是必要,预留未来热加载热卸载处理核的情况
@ -115,9 +113,16 @@ pub extern "C" fn rust_main(hart_id: usize) -> ! {
}
async fn task_1() {
// let new_task = process::Task::new_kernel(task_3(), process);
// let shared_scheduler = process::shared_scheduler();
// process::shared_add_task(shared_scheduler, handle);
unsafe {
// 创建一个新的任务
// 在用户层,这里应该使用系统调用,一次性获得一个资源分配的令牌,代替“进程”结构体,复用这个令牌获得资源
let process = hart::KernelHartInfo::current_process().unwrap();
// 新建一个任务
let new_task = process::KernelTask::new(task_3(), process);
// 加入调度器
let shared_scheduler = process::shared_scheduler();
process::shared_add_task(shared_scheduler, new_task.shared_task_handle());
}
println!("hello world from 1!");
}
@ -125,9 +130,8 @@ async fn task_2() {
println!("hello world from 2!")
}
fn task_3() -> impl core::future::Future<Output = ()> {
async fn task_3() {
println!("hello world from 3!");
TestFuture::new_ready()
}
pub(crate) struct TestFuture {

View File

@ -1,8 +1,4 @@
use crate::memory::{
frame_alloc, config::PAGE_SIZE,
PhysicalPageNumber, VirtualAddress, VirtualPageNumber,
frame::FrameTracker
};
use crate::memory::{AddressSpaceId, PhysicalPageNumber, VirtualAddress, VirtualPageNumber, config::PAGE_SIZE, frame::FrameTracker, frame_alloc};
use super::{Flags, MapType, Segment, page_table::{PageTable, PageTableTracker}, page_table_entry::PageTableEntry};
use alloc::{collections::VecDeque, vec::Vec};
use core::ops::Range;
@ -20,7 +16,7 @@ pub struct Mapping {
}
impl Mapping {
/// 分配一个有根节点的映射
/// 分配一个有根节点的映射,包括分配地址空间编号
pub fn new_alloc() -> Option<Mapping> {
let root_table = PageTableTracker::new_zeroed(frame_alloc()?);
let root_ppn = root_table.page_number();
@ -135,23 +131,26 @@ impl Mapping {
Some(allocated_pairs) // todo!
}
/// 把当前的映射保存到satp寄存器
pub fn activate(&self) {
pub fn activate(&self, asid: AddressSpaceId) {
// use riscv::{register::satp::{self, Mode}, asm};
// unsafe {
// // 保存到satp寄存器
// satp::set(Mode::Sv39, 0 /* asid */, self.root_ppn.into());
// satp::set(Mode::Sv39, 0 /* asid */, self.root_ppn.into());
// // 刷新页表缓存
// asm::sfence_vma_all();
// }
// satp 低 27 位为页号,高 4 位为模式8 表示 Sv39
// satp的0..=43位为页号44..=59位为地址空间编号,高 4 位为模式8 表示 Sv39
let root_ppn: usize = self.root_ppn.into();
let new_satp = root_ppn | (8 << 60);
unsafe {
let asid = asid.into_inner();
let new_satp = root_ppn | (asid << 44) | (8 << 60);
unsafe { asm!(
// 将 new_satp 的值写到 satp 寄存器
llvm_asm!("csrw satp, $0" :: "r"(new_satp) :: "volatile");
// 刷新 TLB
llvm_asm!("sfence.vma" :::: "volatile");
}
"csrw satp, {satp}",
// 刷新页表。rs1=x0、rs2=asid说明刷新与这个地址空间有关的所有地址
"sfence.vma x0, {asid}",
satp = in(reg) new_satp,
asid = in(reg) asid
) };
}
}

View File

@ -1,17 +1,19 @@
use crate::memory::config::{FREE_MEMORY_START, MEMORY_END_ADDRESS, PAGE_SIZE};
use crate::memory::{Mapping, MapType, Segment, Flags, VirtualAddress, VirtualPageNumber, FrameTracker};
use crate::memory::{Mapping, MapType, Segment, Flags, VirtualAddress, VirtualPageNumber, FrameTracker, AddressSpaceId};
use alloc::vec::Vec;
use core::ops::Range;
/// 一个上下文中,所有与内存空间有关的信息
/// 一个地址空间中,所有与内存空间有关的信息
#[derive(Debug)]
pub struct MemorySet {
/// 本上下文的页表和映射关系
/// 本空间的页表和映射关系
pub mapping: Mapping,
/// 每个字段
pub segments: Vec<Segment>,
/// 所有分配的物理页面映射信息
pub allocated_pairs: Vec<(VirtualPageNumber, FrameTracker)>,
/// 这个映射关系的地址空间编号
pub address_space_id: AddressSpaceId
}
impl MemorySet {
@ -93,7 +95,8 @@ impl MemorySet {
for segment in segments.iter() {
mapping.map_segment(segment, None)?;
}
Some(MemorySet { mapping, segments, allocated_pairs })
let address_space_id = AddressSpaceId::kernel();
Some(MemorySet { mapping, segments, allocated_pairs, address_space_id })
}
/// 检测一段内存区域和已有的是否存在重叠区域
pub fn overlap_with(&self, range: Range<VirtualPageNumber>) -> bool {
@ -153,7 +156,8 @@ impl MemorySet {
///
/// 如果当前页表就是自身,则不会替换,但仍然会刷新 TLB。
pub fn activate(&self) {
self.mapping.activate()
println!("Activating memory set in asid {:?}", self.address_space_id);
self.mapping.activate(self.address_space_id)
}
}

View File

@ -21,6 +21,12 @@ impl AddressSpaceId {
pub fn kernel() -> AddressSpaceId {
AddressSpaceId(0)
}
pub(crate) unsafe fn from_raw(asid: usize) -> AddressSpaceId {
AddressSpaceId(asid as u16)
}
pub(crate) fn into_inner(self) -> usize {
self.0 as usize
}
}
use alloc::vec::Vec;
@ -30,7 +36,7 @@ lazy_static::lazy_static! {
spin::Mutex::new((Vec::new(), 0)); // 剩余的空间;
}
pub fn riscv_max_asid() -> AddressSpaceId {
pub fn max_asid() -> AddressSpaceId {
#[cfg(target_pointer_width = "64")]
let mut val: usize = ((1 << 16) - 1) << 44;
#[cfg(target_pointer_width = "32")]

View File

@ -3,6 +3,7 @@ use alloc::sync::Arc;
use core::ops::Range;
use spin::Mutex;
use crate::memory::{AddressSpaceId, Flags, MemorySet, STACK_SIZE, VirtualAddress};
use crate::hart::KernelHartInfo;
/// 进程的所有信息
#[derive(Debug)]
@ -20,8 +21,6 @@ pub struct Process {
pub struct ProcessInner {
/// 进程中所有任务的公用内存映射
memory_set: MemorySet,
/// 进程的地址空间编号
address_space_id: AddressSpaceId,
}
impl Process {
@ -29,16 +28,18 @@ impl Process {
///
/// 如果内存分配失败返回None
pub fn new_kernel() -> Option<Arc<Self>> {
let address_space_id = AddressSpaceId::kernel();
unsafe { crate::hart::KernelHartInfo::load_address_space_id(address_space_id) };
Some(Arc::new(Process {
let process = Arc::new(Process {
id: next_process_id(),
is_user: false,
inner: Mutex::new(ProcessInner {
memory_set: MemorySet::new_kernel()?,
address_space_id,
})
}))
});
unsafe {
KernelHartInfo::load_address_space_id(process.address_space_id());
KernelHartInfo::load_process(process.clone());
};
Some(process)
}
// /// 得到进程编号
// pub fn process_id(&self) -> ProcessId {
@ -46,7 +47,7 @@ impl Process {
// }
/// 得到进程对应的地址空间编号
pub fn address_space_id(&self) -> AddressSpaceId {
self.inner.lock().address_space_id
self.inner.lock().memory_set.address_space_id
}
/// 在本进程的地址空间下,分配一个新的任务栈
pub fn alloc_stack(&self) -> Option<Range<VirtualAddress>> {