Compare commits

...

8 Commits

Author SHA1 Message Date
Zhiyuan Shao f287fda5a6 init commit of lab3_3 2021-08-18 10:54:39 +08:00
Zhiyuan Shao 22309fdb73 init commit of lab3_2 2021-08-18 10:53:32 +08:00
Zhiyuan Shao 24cbe04af7 init commit of lab3_1 2021-08-18 10:52:14 +08:00
Zhiyuan Shao 895f4e9a6a init commit of lab2_3 2021-08-18 10:47:26 +08:00
Zhiyuan Shao 2fd2f52c73 init commit of lab2_2 2021-08-18 10:45:52 +08:00
Zhiyuan Shao 075681b957 init commit of lab2_1 2021-08-18 10:44:36 +08:00
Zhiyuan Shao 6df8c85479 init commit of lab1_3 2021-08-18 10:38:56 +08:00
Zhiyuan Shao 8c64512cab init commit of lab1_2 2021-08-18 10:36:55 +08:00
27 changed files with 1144 additions and 75 deletions

View File

@ -63,14 +63,14 @@ SPIKE_INF_LIB := $(OBJ_DIR)/spike_interface.a
#--------------------- user -----------------------
USER_LDS := user/user.lds
USER_CPPS := user/*.c
USER_CPPS := $(wildcard $(USER_CPPS))
USER_OBJS := $(addprefix $(OBJ_DIR)/, $(patsubst %.c,%.o,$(USER_CPPS)))
USER_TARGET := $(OBJ_DIR)/app_helloworld
USER_TARGET := $(OBJ_DIR)/app_two_long_loops
#------------------------targets------------------------
$(OBJ_DIR):
@-mkdir -p $(OBJ_DIR)
@ -102,9 +102,9 @@ $(KERNEL_TARGET): $(OBJ_DIR) $(UTIL_LIB) $(SPIKE_INF_LIB) $(KERNEL_OBJS) $(KERNE
@$(COMPILE) $(KERNEL_OBJS) $(UTIL_LIB) $(SPIKE_INF_LIB) -o $@ -T $(KERNEL_LDS)
@echo "PKE core has been built into" \"$@\"
$(USER_TARGET): $(OBJ_DIR) $(UTIL_LIB) $(USER_OBJS) $(USER_LDS)
$(USER_TARGET): $(OBJ_DIR) $(UTIL_LIB) $(USER_OBJS)
@echo "linking" $@ ...
@$(COMPILE) $(USER_OBJS) $(UTIL_LIB) -o $@ -T $(USER_LDS)
@$(COMPILE) --entry=main $(USER_OBJS) $(UTIL_LIB) -o $@
@echo "User app has been built into" \"$@\"
-include $(wildcard $(OBJ_DIR)/*/*.d)

View File

@ -4,17 +4,13 @@
// we use only one HART (cpu) in fundamental experiments
#define NCPU 1
#define DRAM_BASE 0x80000000
//interval of timer interrupt
#define TIMER_INTERVAL 1000000
/* we use fixed physical (also logical) addresses for the stacks and trap frames as in
Bare memory-mapping mode */
// user stack top
#define USER_STACK 0x81100000
// the maximum memory space that PKE is allowed to manage
#define PKE_MAX_ALLOWABLE_RAM 128 * 1024 * 1024
// the stack used by PKE kernel when a syscall happens
#define USER_KSTACK 0x81200000
// the trap frame used to assemble the user "process"
#define USER_TRAP_FRAME 0x81300000
// the ending physical address that PKE observes
#define PHYS_TOP (DRAM_BASE + PKE_MAX_ALLOWABLE_RAM)
#endif

View File

@ -6,6 +6,8 @@
#include "elf.h"
#include "string.h"
#include "riscv.h"
#include "vmm.h"
#include "pmm.h"
#include "spike_interface/spike_utils.h"
typedef struct elf_info_t {
@ -17,8 +19,17 @@ typedef struct elf_info_t {
// the implementation of allocater. allocates memory space for later segment loading
//
static void *elf_alloc_mb(elf_ctx *ctx, uint64 elf_pa, uint64 elf_va, uint64 size) {
// directly returns the virtual address as we are in the Bare mode in lab1
return (void *)elf_va;
elf_info *msg = (elf_info *)ctx->info;
// We assume that size of proram segment is smaller than a page.
kassert(size < PGSIZE);
void *pa = alloc_page();
if (pa == 0) panic("uvmalloc mem alloc falied\n");
memset((void *)pa, 0, PGSIZE);
user_vm_map((pagetable_t)msg->p->pagetable, elf_va, PGSIZE, (uint64)pa,
prot_to_type(PROT_WRITE | PROT_READ | PROT_EXEC, 1));
return pa;
}
//
@ -46,7 +57,7 @@ elf_status elf_init(elf_ctx *ctx, void *info) {
}
//
// load the elf segments to memory regions as we are in Bare mode in lab1
// load the elf segments to memory regions
//
elf_status elf_load(elf_ctx *ctx) {
elf_prog_header ph_addr;
@ -66,6 +77,24 @@ elf_status elf_load(elf_ctx *ctx) {
// actual loading
if (elf_fpread(ctx, dest, ph_addr.memsz, ph_addr.off) != ph_addr.memsz)
return EL_EIO;
// record the vm region in proc->mapped_info
int j;
for( j=0; j<PGSIZE/sizeof(mapped_region); j++ )
if( (process*)(((elf_info*)(ctx->info))->p)->mapped_info[j].va == 0x0 ) break;
((process*)(((elf_info*)(ctx->info))->p))->mapped_info[j].va = ph_addr.vaddr;
((process*)(((elf_info*)(ctx->info))->p))->mapped_info[j].npages = 1;
if( ph_addr.flags == (SEGMENT_READABLE|SEGMENT_EXECUTABLE) ){
((process*)(((elf_info*)(ctx->info))->p))->mapped_info[j].seg_type = CODE_SEGMENT;
sprint( "CODE_SEGMENT added at mapped info offset:%d\n", j );
}else if ( ph_addr.flags == (SEGMENT_READABLE|SEGMENT_WRITABLE) ){
((process*)(((elf_info*)(ctx->info))->p))->mapped_info[j].seg_type = DATA_SEGMENT;
sprint( "DATA_SEGMENT added at mapped info offset:%d\n", j );
}else
panic( "unknown program segment encountered, segment flag:%d.\n", ph_addr.flags );
((process*)(((elf_info*)(ctx->info))->p))->total_mapped_region ++;
}
return EL_OK;

View File

@ -25,6 +25,11 @@ typedef struct elf_header_t {
uint16 shstrndx; /* Section header string table index */
} elf_header;
// segment types, attributes of elf_prog_header_t.flags
#define SEGMENT_READABLE 0x4
#define SEGMENT_EXECUTABLE 0x1
#define SEGMENT_WRITABLE 0x2
// Program segment header.
typedef struct elf_prog_header_t {
uint32 type; /* Segment type */

View File

@ -6,22 +6,42 @@
#include "string.h"
#include "elf.h"
#include "process.h"
#include "pmm.h"
#include "vmm.h"
#include "sched.h"
#include "memlayout.h"
#include "spike_interface/spike_utils.h"
process user_app;
//
// trap_sec_start points to the beginning of S-mode trap segment (i.e., the entry point of
// S-mode trap vector).
//
extern char trap_sec_start[];
//
// turn on paging.
//
void enable_paging() {
// write the pointer to kernel page (table) directory into the CSR of "satp".
write_csr(satp, MAKE_SATP(g_kernel_pagetable));
// refresh tlb to invalidate its content.
flush_tlb();
}
//
// load the elf, and construct a "process" (with only a trapframe).
// load_bincode_from_host_elf is defined in elf.c
//
void load_user_program(process *proc) {
proc->trapframe = (trapframe *)USER_TRAP_FRAME;
memset(proc->trapframe, 0, sizeof(trapframe));
proc->kstack = USER_KSTACK;
proc->trapframe->regs.sp = USER_STACK;
process* load_user_program( ) {
process* proc;
proc = alloc_process();
sprint("User application is loading.\n");
load_bincode_from_host_elf(proc);
return proc;
}
//
@ -29,15 +49,26 @@ void load_user_program(process *proc) {
//
int s_start(void) {
sprint("Enter supervisor mode...\n");
// Note: we use direct (i.e., Bare mode) for memory mapping in lab1.
// which means: Virtual Address = Physical Address
// in the beginning, we use Bare mode (direct) memory mapping as in lab1,
// but now switch to paging mode in lab2.
write_csr(satp, 0);
// the application code (elf) is first loaded into memory, and then put into execution
load_user_program(&user_app);
// init phisical memory manager
pmm_init();
// build the kernel page table
kern_vm_init();
// now, switch to paging mode by turning on paging (SV39)
enable_paging();
sprint("kernel page table is on \n");
init_proc_pool();
// the application code (elf) is first loaded into memory, and then put into execution
sprint("Switch to user mode...\n");
switch_to(&user_app);
insert_to_ready_queue( load_user_program() );
schedule();
return 0;
}

View File

@ -16,11 +16,15 @@ __attribute__((aligned(16))) char stack0[4096 * NCPU];
// sstart is the supervisor state entry point
extern void s_start(); // defined in kernel/kernel.c
// M-mode trap entry point
extern void mtrapvec();
// htif is defined in kernel/machine/spike_htif.c, marks the availability of HTIF
extern uint64 htif;
// g_mem_size is defined in kernel/machine/spike_memory.c, size of the emulated memory
extern uint64 g_mem_size;
// g_itrframe is used for saving registers when interrupt hapens in M mode
struct riscv_regs g_itrframe;
//
// get the information of HTIF (calling interface) and the emulated memory by
@ -62,6 +66,17 @@ static void delegate_traps() {
assert(read_csr(medeleg) == exceptions);
}
//
// enabling timer interrupt (irq) in Machine mode
//
void timerinit(uintptr_t hartid) {
// fire timer irq after TIMER_INTERVAL from now.
*(uint64*)CLINT_MTIMECMP(hartid) = *(uint64*)CLINT_MTIME + TIMER_INTERVAL;
// enable machine-mode timer irq in MIE (Machine Interrupt Enable) csr.
write_csr(mie, read_csr(mie) | MIE_MTIE);
}
//
// m_start: machine mode C entry point.
//
@ -73,14 +88,26 @@ void m_start(uintptr_t hartid, uintptr_t dtb) {
// init HTIF (Host-Target InterFace) and memory by using the Device Table Blob (DTB)
init_dtb(dtb);
// save the address of frame for interrupt in M mode to csr "mscratch".
write_csr(mscratch, &g_itrframe);
// set previous privilege mode to S (Supervisor), and will enter S mode after 'mret'
write_csr(mstatus, ((read_csr(mstatus) & ~MSTATUS_MPP_MASK) | MSTATUS_MPP_S));
// set M Exception Program Counter to sstart, for mret (requires gcc -mcmodel=medany)
write_csr(mepc, (uint64)s_start);
// setup trap handling vector
write_csr(mtvec, (uint64)mtrapvec);
// enable machine-mode interrupts.
write_csr(mstatus, read_csr(mstatus) | MSTATUS_MIE);
// delegate all interrupts and exceptions to supervisor mode.
delegate_traps();
write_csr(sie, read_csr(sie) | SIE_SEIE | SIE_STIE | SIE_SSIE);
timerinit(hartid);
// switch to supervisor mode and jump to s_start(), i.e., set pc to mepc
asm volatile("mret");

63
kernel/machine/mtrap.c Normal file
View File

@ -0,0 +1,63 @@
#include "kernel/riscv.h"
#include "kernel/process.h"
#include "spike_interface/spike_utils.h"
static void handle_instruction_access_fault() { panic("Instruction access fault!"); }
static void handle_load_access_fault() { panic("Load access fault!"); }
static void handle_store_access_fault() { panic("Store/AMO access fault!"); }
static void handle_illegal_instruction() { panic("Illegal instruction!"); }
static void handle_misaligned_load() { panic("Misaligned Load!"); }
static void handle_misaligned_store() { panic("Misaligned AMO!"); }
static void handle_timer() {
int cpuid = 0;
// setup the timer fired at next time (TIMER_INTERVAL from now)
*(uint64*)CLINT_MTIMECMP(cpuid) = *(uint64*)CLINT_MTIMECMP(cpuid) + TIMER_INTERVAL;
// setup a soft interrupt in sip (S-mode Interrupt Pending) to be handled in S-mode
write_csr(sip, SIP_SSIP);
}
//
// handle_mtrap calls cooresponding functions to handle an exception of a given type.
//
void handle_mtrap() {
uint64 mcause = read_csr(mcause);
switch (mcause) {
case CAUSE_MTIMER:
handle_timer();
break;
case CAUSE_FETCH_ACCESS:
handle_instruction_access_fault();
break;
case CAUSE_LOAD_ACCESS:
handle_load_access_fault();
case CAUSE_STORE_ACCESS:
handle_store_access_fault();
break;
case CAUSE_ILLEGAL_INSTRUCTION:
// TODO (lab1_2): call handle_illegal_instruction to implement illegal instruction
// interception, and finish lab1_2.
panic( "call handle_illegal_instruction to accomplish illegal instruction interception for lab1_2.\n" );
break;
case CAUSE_MISALIGNED_LOAD:
handle_misaligned_load();
break;
case CAUSE_MISALIGNED_STORE:
handle_misaligned_store();
break;
default:
sprint("machine trap(): unexpected mscause %p\n", mcause);
sprint(" mepc=%p mtval=%p\n", read_csr(mepc), read_csr(mtval));
panic( "unexpected exception happened in M-mode.\n" );
break;
}
}

View File

@ -0,0 +1,38 @@
#include "util/load_store.S"
#
# M-mode trap entry point
#
.globl mtrapvec
.align 4
mtrapvec:
# swap a0 and mscratch
# so that a0 points to interrupt frame
csrrw a0, mscratch, a0
# save the registers in interrupt frame
addi t6, a0, 0
store_all_registers
# save the user a0 in itrframe->a0
csrr t0, mscratch
sd t0, 72(a0)
# use stack0 for sp
la sp, stack0
li a3, 4096
csrr a4, mhartid
addi a4, a4, 1
mul a3, a3, a4
add sp, sp, a3
// save the address of interrupt frame in the csr "mscratch"
csrw mscratch, a0
call handle_mtrap
// restore all registers
csrr t6, mscratch
restore_all_registers
mret

20
kernel/memlayout.h Normal file
View File

@ -0,0 +1,20 @@
#ifndef _MEMLAYOUT_H
#define _MEMLAYOUT_H
#include "riscv.h"
// RISC-V machine places its physical memory above DRAM_BASE
#define DRAM_BASE 0x80000000
// the beginning virtual address of PKE kernel
#define KERN_BASE 0x80000000
// default stack size
#define STACK_SIZE 4096
// virtual address of stack top of user process
#define USER_STACK_TOP 0x7ffff000
// simple heap bottom, virtual address starts from 4MB
#define USER_FREE_ADDRESS_START 0x00000000 + PGSIZE * 1024
#endif

87
kernel/pmm.c Normal file
View File

@ -0,0 +1,87 @@
#include "pmm.h"
#include "util/functions.h"
#include "riscv.h"
#include "config.h"
#include "util/string.h"
#include "memlayout.h"
#include "spike_interface/spike_utils.h"
// _end is defined in kernel/kernel.lds, it marks the ending (virtual) address of PKE kernel
extern char _end[];
// g_mem_size is defined in spike_interface/spike_memory.c, it indicates the size of our
// (emulated) spike machine.
extern uint64 g_mem_size;
static uint64 free_mem_start_addr; //beginning address of free memory
static uint64 free_mem_end_addr; //end address of free memory (not included)
typedef struct node {
struct node *next;
} list_node;
// g_free_mem_list is the head of the list of free physical memory pages
static list_node g_free_mem_list;
//
// actually creates the freepage list. each page occupies 4KB (PGSIZE)
//
static void create_freepage_list(uint64 start, uint64 end) {
g_free_mem_list.next = 0;
for (uint64 p = ROUNDUP(start, PGSIZE); p + PGSIZE < end; p += PGSIZE)
free_page( (void *)p );
}
//
// place a physical page at *pa to the free list of g_free_mem_list (to reclaim the page)
//
void free_page(void *pa) {
if (((uint64)pa % PGSIZE) != 0 || (uint64)pa < free_mem_start_addr || (uint64)pa >= free_mem_end_addr)
panic("free_page 0x%lx \n", pa);
// insert a physical page to g_free_mem_list
list_node *n = (list_node *)pa;
n->next = g_free_mem_list.next;
g_free_mem_list.next = n;
}
//
// takes the first free page from g_free_mem_list, and returns (allocates) it.
// Allocates only ONE page!
//
void *alloc_page(void) {
list_node *n = g_free_mem_list.next;
if (n) g_free_mem_list.next = n->next;
return (void *)n;
}
//
// pmm_init() establishes the list of free physical pages according to available
// physical memory space.
//
void pmm_init() {
// start of kernel program segment
uint64 g_kernel_start = KERN_BASE;
uint64 g_kernel_end = (uint64)&_end;
uint64 pke_kernel_size = g_kernel_end - g_kernel_start;
sprint("PKE kernel start 0x%lx, PKE kernel end: 0x%lx, PKE kernel size: 0x%lx .\n",
g_kernel_start, g_kernel_end, pke_kernel_size);
// free memory starts from the end of PKE kernel and must be page-aligined
free_mem_start_addr = ROUNDUP(g_kernel_end , PGSIZE);
// recompute g_mem_size to limit the physical memory space that PKE kernel
// needs to manage
g_mem_size = MIN(PKE_MAX_ALLOWABLE_RAM, g_mem_size);
if( g_mem_size < pke_kernel_size )
panic( "Error when recomputing physical memory size (g_mem_size).\n" );
free_mem_end_addr = g_mem_size + DRAM_BASE;
sprint("free physical memory address: [0x%lx, 0x%lx] \n", free_mem_start_addr,
free_mem_end_addr - 1);
sprint("kernel memory manager is initializing ...\n");
// create the list of free pages
create_freepage_list(free_mem_start_addr, free_mem_end_addr);
}

11
kernel/pmm.h Normal file
View File

@ -0,0 +1,11 @@
#ifndef _PMM_H_
#define _PMM_H_
// Initialize phisical memeory manager
void pmm_init();
// Allocate a free phisical page
void* alloc_page();
// Free an allocated page
void free_page(void* pa);
#endif

View File

@ -12,16 +12,31 @@
#include "process.h"
#include "elf.h"
#include "string.h"
#include "vmm.h"
#include "pmm.h"
#include "memlayout.h"
#include "sched.h"
#include "spike_interface/spike_utils.h"
//Two functions defined in kernel/usertrap.S
extern char smode_trap_vector[];
extern void return_to_user(trapframe*);
extern void return_to_user(trapframe *, uint64 satp);
//
// trap_sec_start points to the beginning of S-mode trap segment (i.e., the entry point
// of S-mode trap vector).
//
extern char trap_sec_start[];
// current points to the currently running user-mode application.
process* current = NULL;
// process pool
process procs[NPROC];
// start virtual address of our simple heap.
uint64 g_ufree_page = USER_FREE_ADDRESS_START;
//
// switch to a user-mode process
//
@ -32,7 +47,8 @@ void switch_to(process* proc) {
write_csr(stvec, (uint64)smode_trap_vector);
// set up trapframe values that smode_trap_vector will need when
// the process next re-enters the kernel.
proc->trapframe->kernel_sp = proc->kstack; // process's kernel stack
proc->trapframe->kernel_sp = proc->kstack; // process's kernel stack
proc->trapframe->kernel_satp = read_csr(satp); // kernel page table
proc->trapframe->kernel_trap = (uint64)smode_trap_handler;
// set up the registers that strap_vector.S's sret will use
@ -48,6 +64,148 @@ void switch_to(process* proc) {
// set S Exception Program Counter to the saved user pc.
write_csr(sepc, proc->trapframe->epc);
//make user page table
uint64 user_satp = MAKE_SATP(proc->pagetable);
// switch to user mode with sret.
return_to_user(proc->trapframe);
return_to_user(proc->trapframe, user_satp);
}
//
// initialize process pool (the procs[] array)
//
void init_proc_pool() {
memset( procs, 0, sizeof(struct process)*NPROC );
for (int i = 0; i < NPROC; ++i) {
procs[i].status = FREE;
procs[i].pid = i;
}
}
//
// allocate an empty process, init its vm space. returns its pid
//
process* alloc_process() {
// locate the first usable process structure
int i;
for( i=0; i<NPROC; i++ )
if( procs[i].status == FREE ) break;
if( i>=NPROC ){
panic( "cannot find any free process structure.\n" );
return 0;
}
// init proc[i]'s vm space
procs[i].trapframe = (trapframe *)alloc_page(); //trapframe, used to save context
memset(procs[i].trapframe, 0, sizeof(trapframe));
// page directory
procs[i].pagetable = (pagetable_t)alloc_page();
memset((void *)procs[i].pagetable, 0, PGSIZE);
procs[i].kstack = (uint64)alloc_page() + PGSIZE; //user kernel stack top
uint64 user_stack = (uint64)alloc_page(); //phisical address of user stack bottom
procs[i].trapframe->regs.sp = USER_STACK_TOP; //virtual address of user stack top
// allocates a page to record memory regions (segments)
procs[i].mapped_info = (mapped_region*)alloc_page();
memset( procs[i].mapped_info, 0, PGSIZE );
// map user stack in userspace
user_vm_map((pagetable_t)procs[i].pagetable, USER_STACK_TOP - PGSIZE, PGSIZE,
user_stack, prot_to_type(PROT_WRITE | PROT_READ, 1));
procs[i].mapped_info[0].va = USER_STACK_TOP - PGSIZE;
procs[i].mapped_info[0].npages = 1;
procs[i].mapped_info[0].seg_type = STACK_SEGMENT;
// map trapframe in user space (direct mapping as in kernel space).
user_vm_map((pagetable_t)procs[i].pagetable, (uint64)procs[i].trapframe, PGSIZE,
(uint64)procs[i].trapframe, prot_to_type(PROT_WRITE | PROT_READ, 0));
procs[i].mapped_info[1].va = (uint64)procs[i].trapframe;
procs[i].mapped_info[1].npages = 1;
procs[i].mapped_info[1].seg_type = CONTEXT_SEGMENT;
// map S-mode trap vector section in user space (direct mapping as in kernel space)
// we assume that the size of usertrap.S is smaller than a page.
user_vm_map((pagetable_t)procs[i].pagetable, (uint64)trap_sec_start, PGSIZE,
(uint64)trap_sec_start, prot_to_type(PROT_READ | PROT_EXEC, 0));
procs[i].mapped_info[2].va = (uint64)trap_sec_start;
procs[i].mapped_info[2].npages = 1;
procs[i].mapped_info[2].seg_type = SYSTEM_SEGMENT;
sprint("in alloc_proc. user frame 0x%lx, user stack 0x%lx, user kstack 0x%lx \n",
procs[i].trapframe, procs[i].trapframe->regs.sp, procs[i].kstack);
procs[i].total_mapped_region = 3;
// return after initialization.
return &procs[i];
}
//
// reclaim a process
//
int free_process( process* proc ) {
// we set the status to ZOMBIE, but cannot destruct its vm space immediately.
// since proc can be current process, and its user kernel stack is currently in use!
// but for proxy kernel, it (memory leaking) may NOT be a really serious issue,
// as it is different from regular OS, which needs to run 7x24.
proc->status = ZOMBIE;
return 0;
}
//
// implements fork syscal in kernel.
// basic idea here is to first allocate an empty process (child), then duplicate the
// context and data segments of parent process to the child, and lastly, map other
// segments (code, system) of the parent to child. the stack segment remains unchanged
// for the child.
//
int do_fork( process* parent)
{
sprint( "will fork a child from parent %d.\n", parent->pid );
process* child = alloc_process();
for( int i=0; i<parent->total_mapped_region; i++ ){
// browse parent's vm space, and copy its trapframe and data segments,
// map its code segment.
switch( parent->mapped_info[i].seg_type ){
case CONTEXT_SEGMENT:
*child->trapframe = *parent->trapframe;
break;
case STACK_SEGMENT:
memcpy( (void*)lookup_pa(child->pagetable, child->mapped_info[0].va),
(void*)lookup_pa(parent->pagetable, parent->mapped_info[i].va), PGSIZE );
break;
case CODE_SEGMENT:
// TODO (lab3_1): implment the mapping of child code segment to parent's
// code segment.
// hint: the virtual address mapping of code segment is tracked in mapped_info
// page of parent's process structure. use the information in mapped_info to
// retrieve the virtual to physical mapping of code segment.
// after having the mapping information, just map the corresponding virtual
// address region of child to the physical pages that actually store the code
// segment of parent process.
// DO NOT COPY THE PHYSICAL PAGES, JUST MAP THEM.
panic( "You need to implement the code segment mapping of child in lab3_1.\n" );
// after mapping, register the vm region (do not delete codes below!)
child->mapped_info[child->total_mapped_region].va = parent->mapped_info[i].va;
child->mapped_info[child->total_mapped_region].npages =
parent->mapped_info[i].npages;
child->mapped_info[child->total_mapped_region].seg_type = CODE_SEGMENT;
child->total_mapped_region++;
break;
}
}
child->status = READY;
child->trapframe->regs.a0 = 0;
child->parent = parent;
insert_to_ready_queue( child );
return child->pid;
}

View File

@ -13,18 +13,80 @@ typedef struct trapframe {
/* offset:256 */ uint64 kernel_trap;
// saved user process counter
/* offset:264 */ uint64 epc;
//kernel page table
/* offset:272 */ uint64 kernel_satp;
}trapframe;
// PKE kernel supports at most 32 processes
#define NPROC 32
// possible status of a process
enum proc_status {
FREE, // unused state
READY, // ready state
RUNNING, // currently running
BLOCKED, // waiting for something
ZOMBIE, // terminated but not reclaimed yet
};
// types of a segment
enum segment_type {
CODE_SEGMENT, // ELF segment
DATA_SEGMENT, // ELF segment
STACK_SEGMENT, // runtime segment
CONTEXT_SEGMENT, // trapframe segment
SYSTEM_SEGMENT, // system segment
};
// the VM regions mapped to a user process
typedef struct mapped_region {
uint64 va; // mapped virtual address
uint32 npages; // mapping_info is unused if npages == 0
uint32 seg_type; // segment type, one of the segment_types
} mapped_region;
// the extremely simple definition of process, used for begining labs of PKE
typedef struct process {
// pointing to the stack used in trap handling.
uint64 kstack;
// user page table
pagetable_t pagetable;
// trapframe storing the context of a (User mode) process.
trapframe* trapframe;
// points to a page that contains mapped_regions
mapped_region *mapped_info;
// next free mapped region in mapped_info
int total_mapped_region;
// process id
uint64 pid;
// process status
int status;
// parent process
struct process *parent;
// next queue element
struct process *queue_next;
// accounting
int tick_count;
}process;
// switch to run user app
void switch_to(process*);
// initialize process pool (the procs[] array)
void init_proc_pool();
// allocate an empty process, init its vm space. returns its pid
process* alloc_process();
// reclaim a process, destruct its vm space and free physical pages.
int free_process( process* proc );
// fork a child from parent
int do_fork(process* parent);
// current running process
extern process* current;
// virtual address of our simple heap
extern uint64 g_ufree_page;
#endif

View File

@ -52,6 +52,18 @@
#define CAUSE_LOAD_PAGE_FAULT 0xd // Load page fault
#define CAUSE_STORE_PAGE_FAULT 0xf // Store/AMO page fault
// irqs (interrupts)
#define CAUSE_MTIMER 0x8000000000000007
#define CAUSE_MTIMER_S_TRAP 0x8000000000000001
//Supervisor interrupt-pending register
#define SIP_SSIP (1L << 1)
// core local interruptor (CLINT), which contains the timer.
#define CLINT 0x2000000L
#define CLINT_MTIMECMP(hartid) (CLINT + 0x4000 + 8 * (hartid))
#define CLINT_MTIME (CLINT + 0xBFF8) // cycles since boot.
// fields of sstatus, the Supervisor mode Status register
#define SSTATUS_SPP (1L << 8) // Previous mode, 1=Supervisor, 0=User
#define SSTATUS_SPIE (1L << 5) // Supervisor Previous Interrupt Enable
@ -135,6 +147,42 @@ static inline uint64 read_tp(void) {
// write tp, the thread pointer, holding hartid (core number), the index into cpus[].
static inline void write_tp(uint64 x) { asm volatile("mv tp, %0" : : "r"(x)); }
static inline void flush_tlb(void) { asm volatile("sfence.vma zero, zero"); }
#define PGSIZE 4096 // bytes per page
#define PGSHIFT 12 // bits of offset within a page
// use riscv's sv39 page table scheme.
#define SATP_SV39 (8L << 60)
#define MAKE_SATP(pagetable) (SATP_SV39 | (((uint64)pagetable) >> 12))
#define PTE_V (1L << 0) // valid
#define PTE_R (1L << 1)
#define PTE_W (1L << 2)
#define PTE_X (1L << 3)
#define PTE_U (1L << 4) // 1 -> user can access
#define PTE_G (1L << 5) // Global
#define PTE_A (1L << 6) // Accessed
#define PTE_D (1L << 7) // Dirty
// shift a physical address to the right place for a PTE.
#define PA2PTE(pa) ((((uint64)pa) >> 12) << 10)
#define PTE2PA(pte) (((pte) >> 10) << 12)
#define PTE_FLAGS(pte) ((pte)&0x3FF)
// extract the three 9-bit page table indices from a virtual address.
#define PXMASK 0x1FF // 9 bits
#define PXSHIFT(level) (PGSHIFT + (9 * (level)))
#define PX(level, va) ((((uint64)(va)) >> PXSHIFT(level)) & PXMASK)
// one beyond the highest possible virtual address.
// MAXVA is actually one bit less than the max allowed by
// Sv39, to avoid having to sign-extend virtual addresses
// that have the high bit set.
#define MAXVA (1L << (9 + 9 + 9 + 12 - 1))
typedef uint64 pte_t;
typedef uint64 *pagetable_t; // 512 PTEs
typedef struct riscv_regs {
/* 0 */ uint64 ra;
/* 8 */ uint64 sp;

73
kernel/sched.c Normal file
View File

@ -0,0 +1,73 @@
/*
* implementing the scheduler
*/
#include "sched.h"
#include "spike_interface/spike_utils.h"
process* ready_queue_head = NULL;
//
// insert a process, proc, into the END of ready queue.
//
void insert_to_ready_queue( process* proc ) {
sprint( "going to insert process %d to ready queue.\n", proc->pid );
// if the queue is empty in the beginning
if( ready_queue_head == NULL ){
proc->status = READY;
proc->queue_next = NULL;
ready_queue_head = proc;
return;
}
// ready queue is not empty
process *p;
// browse the ready queue to see if proc is already in-queue
for( p=ready_queue_head; p->queue_next!=NULL; p=p->queue_next )
if( p == proc ) return; //already in queue
// p points to the last element of the ready queue
if( p==proc ) return;
p->queue_next = proc;
proc->status = READY;
proc->queue_next = NULL;
return;
}
//
// choose a proc from the ready queue, and put it to run.
// note: schedule() does not take care of previous current process. If the current
// process is still runnable, you should place it into the ready queue (by calling
// ready_queue_insert), and then call schedule().
//
extern process procs[NPROC];
void schedule() {
if ( !ready_queue_head ){
// by default, if there are no ready process, and all processes are in the status of
// FREE and ZOMBIE, we should shutdown the emulated RISC-V machine.
int should_shutdown = 1;
for( int i=0; i<NPROC; i++ )
if( (procs[i].status != FREE) && (procs[i].status != ZOMBIE) ){
should_shutdown = 0;
sprint( "ready queue empty, but process %d is not in free/zombie state:%d\n",
i, procs[i].status );
}
if( should_shutdown ){
sprint( "no more ready processes, system shutdown now.\n" );
shutdown( 0 );
}else{
panic( "Not handled: we should let system wait for unfinished processes.\n" );
}
}
current = ready_queue_head;
assert( current->status == READY );
ready_queue_head = ready_queue_head->queue_next;
current->status == RUNNING;
sprint( "going to schedule process %d to run.\n", current->pid );
switch_to( current );
}

12
kernel/sched.h Normal file
View File

@ -0,0 +1,12 @@
#ifndef _SCHED_H_
#define _SCHED_H_
#include "process.h"
//length of a time slice, in number of ticks
#define TIME_SLICE_LEN 2
void insert_to_ready_queue( process* proc );
void schedule();
#endif

View File

@ -6,6 +6,10 @@
#include "process.h"
#include "strap.h"
#include "syscall.h"
#include "pmm.h"
#include "vmm.h"
#include "sched.h"
#include "util/functions.h"
#include "spike_interface/spike_utils.h"
@ -26,6 +30,52 @@ static void handle_syscall(trapframe *tf) {
}
//
// global variable that store the recorded "ticks"
static uint64 g_ticks = 0;
void handle_mtimer_trap() {
sprint("Ticks %d\n", g_ticks);
// TODO (lab1_3): increase g_ticks to record this "tick", and then clear the "SIP"
// field in sip register.
// hint: use write_csr to disable the SIP_SSIP bit in sip.
panic( "lab1_3: increase g_ticks by one, and clear SIP field in sip register.\n" );
}
//
// the page fault handler. the parameters:
// sepc: the pc when fault happens;
// stval: the virtual address that causes pagefault when being accessed.
//
void handle_user_page_fault(uint64 mcause, uint64 sepc, uint64 stval) {
sprint("handle_page_fault: %lx\n", stval);
switch (mcause) {
case CAUSE_STORE_PAGE_FAULT:
// TODO (lab2_3): implement the operations that solve the page fault to
// dynamically increase application stack.
// hint: first allocate a new physical page, and then, maps the new page to the
// virtual address that causes the page fault.
panic( "You need to implement the operations that actually handle the page fault in lab2_3.\n" );
break;
default:
sprint("unknown page fault.\n");
break;
}
}
//
// implements round-robin scheduling
//
void rrsched() {
// TODO (lab3_3): implements round-robin scheduling.
// hint: increase the tick_count member of current process by one, if it is bigger than
// TIME_SLICE_LEN (means it has consumed its time slice), change its status into READY,
// place it in the rear of ready queue, and finally schedule next process to run.
panic( "You need to further implement the timer handling in lab3_3.\n" );
}
//
// kernel/smode_trap.S will pass control to smode_trap_handler, when a trap happens
// in S-mode.
@ -40,12 +90,27 @@ void smode_trap_handler(void) {
current->trapframe->epc = read_csr(sepc);
// if the cause of trap is syscall from user application
if (read_csr(scause) == CAUSE_USER_ECALL) {
handle_syscall(current->trapframe);
} else {
sprint("smode_trap_handler(): unexpected scause %p\n", read_csr(scause));
sprint(" sepc=%p stval=%p\n", read_csr(sepc), read_csr(stval));
panic( "unexpected exception happened.\n" );
uint64 cause = read_csr(scause);
switch (cause) {
case CAUSE_USER_ECALL:
handle_syscall(current->trapframe);
break;
case CAUSE_MTIMER_S_TRAP:
handle_mtimer_trap();
rrsched();
break;
case CAUSE_STORE_PAGE_FAULT:
case CAUSE_LOAD_PAGE_FAULT:
// the address of missing page is stored in stval
// call handle_user_page_fault to process page faults
handle_user_page_fault(cause, read_csr(sepc), read_csr(stval));
break;
default:
sprint("smode_trap_handler(): unexpected scause %p\n", read_csr(scause));
sprint(" sepc=%p stval=%p\n", read_csr(sepc), read_csr(stval));
panic( "unexpected exception happened.\n" );
break;
}
// continue the execution of current process.

View File

@ -33,6 +33,11 @@ smode_trap_vector:
# load the address of smode_trap_handler() from p->trapframe->kernel_trap
ld t0, 256(a0)
# restore kernel page table from p->trapframe->kernel_satp
ld t1, 272(a0)
csrw satp, t1
sfence.vma zero, zero
# jump to smode_trap_handler() that is defined in kernel/trap.c
jr t0
@ -44,6 +49,13 @@ smode_trap_vector:
#
.globl return_to_user
return_to_user:
# a0: TRAPFRAME
# a1: user page table, for satp.
# switch to the user page table.
csrw satp, a1
sfence.vma zero, zero
# save a0 in sscratch, so sscratch points to a trapframe now.
csrw sscratch, a0

View File

@ -10,6 +10,9 @@
#include "string.h"
#include "process.h"
#include "util/functions.h"
#include "pmm.h"
#include "vmm.h"
#include "sched.h"
#include "spike_interface/spike_utils.h"
@ -17,7 +20,11 @@
// implement the SYS_user_print syscall
//
ssize_t sys_user_print(const char* buf, size_t n) {
sprint(buf);
//buf is an address in user space on user stack,
//so we have to transfer it into phisical address (kernel is running in direct mapping).
assert( current );
char* pa = (char*)user_va_to_pa((pagetable_t)(current->pagetable), (void*)buf);
sprint(pa);
return 0;
}
@ -26,9 +33,52 @@ ssize_t sys_user_print(const char* buf, size_t n) {
//
ssize_t sys_user_exit(uint64 code) {
sprint("User exit with code:%d.\n", code);
// in lab1, PKE considers only one app (one process).
// therefore, shutdown the system when the app calls exit()
shutdown(code);
// in lab3 now, we should reclaim the current process, and reschedule.
free_process( current );
schedule();
return 0;
}
//
// maybe, the simplest implementation of malloc in the world ...
//
uint64 sys_user_allocate_page() {
void* pa = alloc_page();
uint64 va = g_ufree_page;
g_ufree_page += PGSIZE;
user_vm_map((pagetable_t)current->pagetable, va, PGSIZE, (uint64)pa,
prot_to_type(PROT_WRITE | PROT_READ, 1));
return va;
}
//
// reclaim a page, indicated by "va".
//
uint64 sys_user_free_page(uint64 va) {
user_vm_unmap((pagetable_t)current->pagetable, va, PGSIZE, 1);
return 0;
}
//
// kerenl entry point of naive_fork
//
ssize_t sys_user_fork() {
sprint("User call fork.\n");
return do_fork( current );
}
//
// kerenl entry point of yield
//
ssize_t sys_user_yield() {
// TODO (lab3_2): implment the syscall of yield.
// hint: the functionality of yield is to give up the processor. therefore,
// we should set the status of currently running process to READY, insert it in
// the rear of ready queue, and finally, schedule a READY process to run.
panic( "You need to implement the yield syscall in lab3_2.\n" );
return 0;
}
//
@ -41,6 +91,14 @@ long do_syscall(long a0, long a1, long a2, long a3, long a4, long a5, long a6, l
return sys_user_print((const char*)a1, a2);
case SYS_user_exit:
return sys_user_exit(a1);
case SYS_user_allocate_page:
return sys_user_allocate_page();
case SYS_user_free_page:
return sys_user_free_page(a1);
case SYS_user_fork:
return sys_user_fork();
case SYS_user_yield:
return sys_user_yield();
default:
panic("Unknown syscall %ld \n", a0);
}

View File

@ -8,6 +8,10 @@
#define SYS_user_base 64
#define SYS_user_print (SYS_user_base + 0)
#define SYS_user_exit (SYS_user_base + 1)
#define SYS_user_allocate_page (SYS_user_base + 2)
#define SYS_user_free_page (SYS_user_base + 3)
#define SYS_user_fork (SYS_user_base + 4)
#define SYS_user_yield (SYS_user_base + 5)
long do_syscall(long a0, long a1, long a2, long a3, long a4, long a5, long a6, long a7);

206
kernel/vmm.c Normal file
View File

@ -0,0 +1,206 @@
/*
* virtual address mapping related functions.
*/
#include "vmm.h"
#include "riscv.h"
#include "pmm.h"
#include "util/types.h"
#include "memlayout.h"
#include "util/string.h"
#include "spike_interface/spike_utils.h"
#include "util/functions.h"
/* --- utility functions for virtual address mapping --- */
//
// establish mapping of virtual address [va, va+size] to phyiscal address [pa, pa+size]
// with the permission of "perm".
//
int map_pages(pagetable_t page_dir, uint64 va, uint64 size, uint64 pa, int perm) {
uint64 first, last;
pte_t *pte;
for (first = ROUNDDOWN(va, PGSIZE), last = ROUNDDOWN(va + size - 1, PGSIZE);
first <= last; first += PGSIZE, pa += PGSIZE) {
if ((pte = page_walk(page_dir, first, 1)) == 0) return -1;
if (*pte & PTE_V)
panic("map_pages fails on mapping va (0x%lx) to pa (0x%lx)", first, pa);
*pte = PA2PTE(pa) | perm | PTE_V;
}
return 0;
}
//
// convert permission code to permission types of PTE
//
uint64 prot_to_type(int prot, int user) {
uint64 perm = 0;
if (prot & PROT_READ) perm |= PTE_R | PTE_A;
if (prot & PROT_WRITE) perm |= PTE_W | PTE_D;
if (prot & PROT_EXEC) perm |= PTE_X | PTE_A;
if (perm == 0) perm = PTE_R;
if (user) perm |= PTE_U;
return perm;
}
//
// traverse the page table (starting from page_dir) to find the corresponding pte of va.
// returns: PTE (page table entry) pointing to va.
//
pte_t *page_walk(pagetable_t page_dir, uint64 va, int alloc) {
if (va >= MAXVA) panic("page_walk");
// starting from the page directory
pagetable_t pt = page_dir;
// traverse from page directory to page table.
// as we use risc-v sv39 paging scheme, there will be 3 layers: page dir,
// page medium dir, and page table.
for (int level = 2; level > 0; level--) {
// macro "PX" gets the PTE index in page table of current level
// "pte" points to the entry of current level
pte_t *pte = pt + PX(level, va);
// now, we need to know if above pte is valid (established mapping to phyiscal page)
// or not.
if (*pte & PTE_V) { //PTE valid
// phisical address of pagetable of next level
pt = (pagetable_t)PTE2PA(*pte);
} else { //PTE invalid (not exist).
// allocate a page (to be the new pagetable), if alloc == 1
if( alloc && ((pt = (pte_t *)alloc_page(1)) != 0) ){
memset(pt, 0, PGSIZE);
// writes the physical address of newly allocated page to pte, to establish the
// page table tree.
*pte = PA2PTE(pt) | PTE_V;
}else //returns NULL, if alloc == 0, or no more physical page remains
return 0;
}
}
// return a PTE which contains phisical address of a page
return pt + PX(0, va);
}
//
// look up a virtual page address, return the physical page address or 0 if not mapped.
//
uint64 lookup_pa(pagetable_t pagetable, uint64 va) {
pte_t *pte;
uint64 pa;
if (va >= MAXVA) return 0;
pte = page_walk(pagetable, va, 0);
if (pte == 0 || (*pte & PTE_V) == 0 || ((*pte & PTE_R) == 0 && (*pte & PTE_W) == 0))
return 0;
pa = PTE2PA(*pte);
return pa;
}
/* --- kernel page table part --- */
// _etext is defined in kernel.lds, it points to the address after text and rodata segments.
extern char _etext[];
// pointer to kernel page director
pagetable_t g_kernel_pagetable;
//
// maps virtual address [va, va+sz] to [pa, pa+sz] (for kernel).
//
void kern_vm_map(pagetable_t page_dir, uint64 va, uint64 pa, uint64 sz, int perm) {
if (map_pages(page_dir, va, sz, pa, perm) != 0) panic("kern_vm_map");
}
//
// kern_vm_init() constructs the kernel page table.
//
void kern_vm_init(void) {
pagetable_t t_page_dir;
// allocate a page (t_page_dir) to be the page directory for kernel
t_page_dir = (pagetable_t)alloc_page();
memset(t_page_dir, 0, PGSIZE);
// map virtual address [KERN_BASE, _etext] to physical address [DRAM_BASE, DRAM_BASE+(_etext - KERN_BASE)],
// to maintain (direct) text section kernel address mapping.
kern_vm_map(t_page_dir, KERN_BASE, DRAM_BASE, (uint64)_etext - KERN_BASE,
prot_to_type(PROT_READ | PROT_EXEC, 0));
sprint("KERN_BASE 0x%lx\n", lookup_pa(t_page_dir, KERN_BASE));
// also (direct) map remaining address space, to make them accessable from kernel.
// this is important when kernel needs to access the memory content of user's app
// without copying pages between kernel and user spaces.
kern_vm_map(t_page_dir, (uint64)_etext, (uint64)_etext, PHYS_TOP - (uint64)_etext,
prot_to_type(PROT_READ | PROT_WRITE, 0));
sprint("physical address of _etext is: 0x%lx\n", lookup_pa(t_page_dir, (uint64)_etext));
g_kernel_pagetable = t_page_dir;
}
/* --- user page table part --- */
//
// convert and return the corresponding physical address of a virtual address (va) of
// application.
//
void *user_va_to_pa(pagetable_t page_dir, void *va) {
// TODO (lab2_1): implement user_va_to_pa to convert a given user virtual address "va"
// to its corresponding physical address, i.e., "pa". To do it, we need to walk
// through the page table, starting from its directory "page_dir", to locate the PTE
// that maps "va". If found, returns the "pa" by using:
// pa = PYHS_ADDR(PTE) + (va - va & (1<<PGSHIFT -1))
// Here, PYHS_ADDR() means retrieving the starting address (4KB aligned), and
// (va - va & (1<<PGSHIFT -1)) means computing the offset of "va" in its page.
// Also, it is possible that "va" is not mapped at all. in such case, we can find
// invalid PTE, and should return NULL.
panic( "You have to implement user_va_to_pa (convert user va to pa) to print messages in lab2_1.\n" );
}
//
// maps virtual address [va, va+sz] to [pa, pa+sz] (for user application).
//
void user_vm_map(pagetable_t page_dir, uint64 va, uint64 size, uint64 pa, int perm) {
if (map_pages(page_dir, va, size, pa, perm) != 0) {
panic("fail to user_vm_map .\n");
}
}
//
// unmap virtual address [va, va+size] from the user app.
// reclaim the physical pages if free!=0
//
void user_vm_unmap(pagetable_t page_dir, uint64 va, uint64 size, int free) {
// TODO (lab2_2): implement user_vm_unmap to disable the mapping of the virtual pages
// in [va, va+size], and free the corresponding physical pages used by the virtual
// addresses when if free is not zero.
// basic idea here is to first locate the PTEs of the virtual pages, and then reclaim
// (use free_page() defined in pmm.c) the physical pages. lastly, invalidate the PTEs.
// as naive_free reclaims only one page at a time, you only need to consider one page
// to make user/app_naive_malloc to produce the correct hehavior.
panic( "You have to implement user_vm_unmap to free pages using naive_free in lab2_2.\n" );
}
//
// debug function, print the vm space of a process.
//
void print_proc_vmspace(process* proc) {
sprint( "======\tbelow is the vm space of process%d\t========\n", proc->pid );
for( int i=0; i<proc->total_mapped_region; i++ ){
sprint( "-va:%lx, npage:%d, ", proc->mapped_info[i].va, proc->mapped_info[i].npages);
switch(proc->mapped_info[i].seg_type){
case CODE_SEGMENT: sprint( "type: CODE SEGMENT" ); break;
case DATA_SEGMENT: sprint( "type: DATA SEGMENT" ); break;
case STACK_SEGMENT: sprint( "type: STACK SEGMENT" ); break;
case CONTEXT_SEGMENT: sprint( "type: TRAPFRAME SEGMENT" ); break;
case SYSTEM_SEGMENT: sprint( "type: USER KERNEL STACK SEGMENT" ); break;
}
sprint( ", mapped to pa:%lx\n", lookup_pa(proc->pagetable, proc->mapped_info[i].va) );
}
}

36
kernel/vmm.h Normal file
View File

@ -0,0 +1,36 @@
#ifndef _VMM_H_
#define _VMM_H_
#include "riscv.h"
#include "process.h"
/* --- utility functions for virtual address mapping --- */
int map_pages(pagetable_t pagetable, uint64 va, uint64 size, uint64 pa, int perm);
// permission codes.
enum VMPermision {
PROT_NONE = 0,
PROT_READ = 1,
PROT_WRITE = 2,
PROT_EXEC = 4,
};
uint64 prot_to_type(int prot, int user);
pte_t *page_walk(pagetable_t pagetable, uint64 va, int alloc);
uint64 lookup_pa(pagetable_t pagetable, uint64 va);
/* --- kernel page table --- */
// pointer to kernel page directory
extern pagetable_t g_kernel_pagetable;
void kern_vm_map(pagetable_t page_dir, uint64 va, uint64 pa, uint64 sz, int perm);
// Initialize the kernel pagetable
void kern_vm_init(void);
/* --- user page table --- */
void *user_va_to_pa(pagetable_t page_dir, void *va);
void user_vm_map(pagetable_t page_dir, uint64 va, uint64 size, uint64 pa, int perm);
void user_vm_unmap(pagetable_t page_dir, uint64 va, uint64 size, int free);
void print_proc_vmspace(process* proc);
#endif

View File

@ -1,17 +0,0 @@
/*
* Below is the given application for lab1_1.
*
* You can build this app (as well as our PKE OS kernel) by command:
* $ make
*
* Or run this app (with the support from PKE OS kernel) by command:
* $ make run
*/
#include "user_lib.h"
int main(void) {
printu("Hello world!\n");
exit(0);
}

28
user/app_two_long_loops.c Normal file
View File

@ -0,0 +1,28 @@
/*
* The application of lab3_3.
* parent and child processes never give up their processor during execution.
*/
#include "user/user_lib.h"
#include "util/types.h"
int main(void) {
uint64 pid = fork();
uint64 rounds = 100000000;
uint64 interval = 10000000;
uint64 a = 0;
if (pid == 0) {
printu("Child: Hello world! \n");
for (uint64 i = 0; i < rounds; ++i) {
if (i % interval == 0) printu("Child running %ld \n", i);
}
} else {
printu("Parent: Hello world! \n");
for (uint64 i = 0; i < rounds; ++i) {
if (i % interval == 0) printu("Parent running %ld \n", i);
}
}
exit(0);
return 0;
}

View File

@ -1,14 +0,0 @@
OUTPUT_ARCH( "riscv" )
ENTRY(main)
SECTIONS
{
. = 0x81000000;
. = ALIGN(0x1000);
.text : { *(.text) }
. = ALIGN(16);
.data : { *(.data) }
. = ALIGN(16);
.bss : { *(.bss) }
}

View File

@ -10,7 +10,7 @@
#include "util/snprintf.h"
#include "kernel/syscall.h"
int do_user_call(uint64 sysnum, uint64 a1, uint64 a2, uint64 a3, uint64 a4, uint64 a5, uint64 a6,
uint64 do_user_call(uint64 sysnum, uint64 a1, uint64 a2, uint64 a3, uint64 a4, uint64 a5, uint64 a6,
uint64 a7) {
int ret;
@ -49,3 +49,30 @@ int printu(const char* s, ...) {
int exit(int code) {
return do_user_call(SYS_user_exit, code, 0, 0, 0, 0, 0, 0);
}
//
// lib call to naive_malloc
//
void* naive_malloc() {
return (void*)do_user_call(SYS_user_allocate_page, 0, 0, 0, 0, 0, 0, 0);
}
//
// lib call to naive_free
//
void naive_free(void* va) {
do_user_call(SYS_user_free_page, (uint64)va, 0, 0, 0, 0, 0, 0);
}
//
// lib call to naive_fork
int fork() {
return do_user_call(SYS_user_fork, 0, 0, 0, 0, 0, 0, 0);
}
//
// lib call to yield
//
void yield() {
do_user_call(SYS_user_yield, 0, 0, 0, 0, 0, 0, 0);
}

View File

@ -4,3 +4,7 @@
int printu(const char *s, ...);
int exit(int code);
void* naive_malloc();
void naive_free(void* va);
int fork();
void yield();