finish user mode execute

This commit is contained in:
Yuke Peng
2022-08-26 08:59:20 -07:00
parent 2caa3d9df5
commit c9b5a6a14a
17 changed files with 435 additions and 159 deletions

View File

@ -1,5 +1,9 @@
//! CPU.
use x86_64::registers::model_specific::FsBase;
use crate::trap::{CalleeRegs, CallerRegs, SyscallFrame, TrapFrame};
/// Defines a CPU-local variable.
#[macro_export]
macro_rules! cpu_local {
@ -19,7 +23,7 @@ pub fn this_cpu() -> u32 {
}
/// Cpu context, including both general-purpose registers and floating-point registers.
#[derive(Clone, Default)]
#[derive(Clone, Default, Copy)]
#[repr(C)]
pub struct CpuContext {
pub gp_regs: GpRegs,
@ -51,8 +55,118 @@ pub struct GpRegs {
pub rflag: u64,
}
impl From<SyscallFrame> for CpuContext {
fn from(syscall: SyscallFrame) -> Self {
Self {
gp_regs: GpRegs {
r8: syscall.caller.r8 as u64,
r9: syscall.caller.r9 as u64,
r10: syscall.caller.r10 as u64,
r11: syscall.caller.r11 as u64,
r12: syscall.callee.r12 as u64,
r13: syscall.callee.r13 as u64,
r14: syscall.callee.r14 as u64,
r15: syscall.callee.r15 as u64,
rdi: syscall.caller.rdi as u64,
rsi: syscall.caller.rsi as u64,
rbp: syscall.callee.rbp as u64,
rbx: syscall.callee.rbx as u64,
rdx: syscall.caller.rdx as u64,
rax: syscall.caller.rax as u64,
rcx: syscall.caller.rcx as u64,
rsp: syscall.callee.rsp as u64,
rip: 0,
rflag: 0,
},
fs_base: 0,
fp_regs: FpRegs::default(),
}
}
}
impl Into<SyscallFrame> for CpuContext {
fn into(self) -> SyscallFrame {
SyscallFrame {
caller: CallerRegs {
rax: self.gp_regs.rax as usize,
rcx: self.gp_regs.rcx as usize,
rdx: self.gp_regs.rdx as usize,
rsi: self.gp_regs.rsi as usize,
rdi: self.gp_regs.rdi as usize,
r8: self.gp_regs.r8 as usize,
r9: self.gp_regs.r9 as usize,
r10: self.gp_regs.r10 as usize,
r11: self.gp_regs.r11 as usize,
},
callee: CalleeRegs {
rsp: self.gp_regs.rsp as usize,
rbx: self.gp_regs.rbx as usize,
rbp: self.gp_regs.rbp as usize,
r12: self.gp_regs.r12 as usize,
r13: self.gp_regs.r13 as usize,
r14: self.gp_regs.r14 as usize,
r15: self.gp_regs.r15 as usize,
},
}
}
}
impl From<TrapFrame> for CpuContext {
fn from(trap: TrapFrame) -> Self {
Self {
gp_regs: GpRegs {
r8: trap.regs.r8 as u64,
r9: trap.regs.r9 as u64,
r10: trap.regs.r10 as u64,
r11: trap.regs.r11 as u64,
r12: trap.id as u64,
r13: trap.err as u64,
r14: trap.cs as u64,
r15: trap.ss as u64,
rdi: trap.regs.rdi as u64,
rsi: trap.regs.rsi as u64,
rbp: 0 as u64,
rbx: 0 as u64,
rdx: trap.regs.rdx as u64,
rax: trap.regs.rax as u64,
rcx: trap.regs.rcx as u64,
rsp: trap.rsp as u64,
rip: trap.rip as u64,
rflag: trap.rflags as u64,
},
fs_base: 0,
fp_regs: FpRegs::default(),
}
}
}
impl Into<TrapFrame> for CpuContext {
fn into(self) -> TrapFrame {
TrapFrame {
regs: CallerRegs {
rax: self.gp_regs.rax as usize,
rcx: self.gp_regs.rcx as usize,
rdx: self.gp_regs.rdx as usize,
rsi: self.gp_regs.rsi as usize,
rdi: self.gp_regs.rdi as usize,
r8: self.gp_regs.r8 as usize,
r9: self.gp_regs.r9 as usize,
r10: self.gp_regs.r10 as usize,
r11: self.gp_regs.r11 as usize,
},
id: self.gp_regs.r12 as usize,
err: self.gp_regs.r13 as usize,
rip: self.gp_regs.rip as usize,
cs: self.gp_regs.r14 as usize,
rflags: self.gp_regs.rflag as usize,
rsp: self.gp_regs.rsp as usize,
ss: self.gp_regs.r15 as usize,
}
}
}
/// The floating-point state of CPU.
#[derive(Clone)]
#[derive(Clone, Copy)]
#[repr(C)]
pub struct FpRegs {
//buf: Aligned<A16, [u8; 512]>,

View File

@ -10,6 +10,8 @@
#![feature(alloc_error_handler)]
#![feature(core_intrinsics)]
#![feature(new_uninit)]
#![feature(link_llvm_intrinsics)]
extern crate alloc;
pub mod cell;
@ -44,7 +46,6 @@ pub fn init(boot_info: &'static mut BootInfo) {
let siz = boot_info.framebuffer.as_ref().unwrap() as *const FrameBuffer as usize;
device::init(boot_info.framebuffer.as_mut().unwrap());
device::framebuffer::WRITER.lock().as_mut().unwrap().clear();
println!("{:x}", siz);
trap::init();
let mut memory_init = false;
// memory
@ -65,17 +66,6 @@ pub fn init(boot_info: &'static mut BootInfo) {
panic!("memory init failed");
}
// breakpoint
let breakpoint_irq: Arc<&IrqLine>;
unsafe {
breakpoint_irq = IrqLine::acquire(3);
}
let a = breakpoint_irq.on_active(breakpoint_handler);
x86_64::instructions::interrupts::int3(); // breakpoint
}
fn breakpoint_handler(interrupt_information: TrapFrame) {
println!("EXCEPTION: BREAKPOINT\n{:#?}", interrupt_information);
}
#[inline(always)]

View File

@ -7,19 +7,24 @@ use crate::{
*,
};
use alloc::collections::{btree_map::Entry, BTreeMap};
use alloc::vec;
use core::fmt;
pub struct MapArea {
/// flags
pub flags: PTFlags,
/// start virtual address
pub start_va: VirtAddr,
/// the size of these area
pub size : usize,
/// all the map information
pub mapper: BTreeMap<VirtAddr, VmFrame>,
}
pub struct MemorySet {
pub pt: PageTable,
/// all the map area
area: Option<MapArea>,
/// all the map area, sort by the start virtual address
areas: BTreeMap<VirtAddr,MapArea>,
}
impl MapArea {
@ -27,6 +32,16 @@ impl MapArea {
self.mapper.len()
}
pub fn clone(&self) -> Self {
let mut mapper = BTreeMap::new();
for (&va, old) in &self.mapper {
let new = PhysFrame::alloc().unwrap();
new.as_slice().copy_from_slice(old.physical_frame.exclusive_access().as_slice());
mapper.insert(va, unsafe{VmFrame::new(new)});
}
Self { start_va: self.start_va, size: self.size, flags: self.flags, mapper }
}
/// This function will map the vitural address to the given physical frames
pub fn new(
start_va: VirtAddr,
@ -42,6 +57,8 @@ impl MapArea {
let mut map_area = Self {
flags,
start_va,
size,
mapper: BTreeMap::new(),
};
let mut current_va = start_va.clone();
@ -82,136 +99,153 @@ impl MapArea {
self.mapper.remove(&va)
}
pub fn write_data(&mut self, offset: usize, data: &[u8]) {
let mut start = offset;
let mut remain = data.len();
let mut processed = 0;
for (va, pa) in self.mapper.iter_mut() {
if start >= PAGE_SIZE {
start -= PAGE_SIZE;
} else {
let copy_len = (PAGE_SIZE - start).min(remain);
let src = &data[processed..processed + copy_len];
let dst = &mut pa.start_pa().kvaddr().get_bytes_array()[start..src.len() + start];
dst.copy_from_slice(src);
processed += copy_len;
remain -= copy_len;
start = 0;
if remain == 0 {
return;
}
}
}
}
pub fn read_data(&self, offset: usize, data: &mut [u8]) {
let mut start = offset;
pub fn write_data(&mut self, addr: usize, data: &[u8]) {
let mut current_start_address = addr;
let mut remain = data.len();
let mut processed = 0;
for (va, pa) in self.mapper.iter() {
if start >= PAGE_SIZE {
start -= PAGE_SIZE;
} else {
let copy_len = (PAGE_SIZE - start).min(remain);
let src = &mut data[processed..processed + copy_len];
let dst = &mut pa.start_pa().kvaddr().get_bytes_array()[start..src.len() + start];
src.copy_from_slice(dst);
if current_start_address >= va.0 && current_start_address <va.0+PAGE_SIZE{
let offset = current_start_address-va.0;
let copy_len = (va.0+PAGE_SIZE - current_start_address).min(remain);
let src = &data[processed..processed + copy_len];
let dst = &mut pa.start_pa().kvaddr().get_bytes_array()[offset..copy_len];
dst.copy_from_slice(src);
processed += copy_len;
remain -= copy_len;
start = 0;
if remain == 0 {
return;
}
current_start_address = va.0+PAGE_SIZE;
}
}
}
pub fn read_data(&self, addr: usize, data: &mut [u8]) {
let mut start = addr;
let mut remain = data.len();
let mut processed = 0;
for (va, pa) in self.mapper.iter() {
if start >= va.0 && start <va.0+PAGE_SIZE{
let offset = start-va.0;
let copy_len = (va.0+PAGE_SIZE - start).min(remain);
let src = &mut data[processed..processed + copy_len];
let dst = &pa.start_pa().kvaddr().get_bytes_array()[offset..copy_len];
src.copy_from_slice(dst);
processed += copy_len;
remain -= copy_len;
if remain == 0 {
return;
}
start = va.0+PAGE_SIZE;
}
}
}
}
impl Clone for MapArea {
fn clone(&self) -> Self {
let mut mapper = BTreeMap::new();
for (&va, old) in &self.mapper {
let new = VmFrame::alloc().unwrap();
new.physical_frame
.exclusive_access()
.as_slice()
.copy_from_slice(old.physical_frame.exclusive_access().as_slice());
mapper.insert(va, new);
}
Self {
flags: self.flags,
mapper,
}
}
}
// impl Clone for MapArea {
// fn clone(&self) -> Self {
// let mut mapper = BTreeMap::new();
// for (&va, old) in &self.mapper {
// let new = VmFrame::alloc().unwrap();
// new.physical_frame
// .exclusive_access()
// .as_slice()
// .copy_from_slice(old.physical_frame.exclusive_access().as_slice());
// mapper.insert(va, new);
// }
// Self {
// flags: self.flags,
// mapper,
// }
// }
// }
impl MemorySet {
pub fn new(area: MapArea) -> Self {
let mut pt = PageTable::new();
pt.map_area(&area);
Self {
pt: pt,
area: Some(area),
pub fn map(&mut self, area: MapArea) {
if area.size > 0 {
// TODO: check overlap
if let Entry::Vacant(e) = self.areas.entry(area.start_va) {
self.pt.map_area(e.insert(area));
} else {
panic!("MemorySet::map: MapArea starts from {:#x?} is existed!", area.start_va);
}
}
}
pub fn zero() -> Self {
pub fn new() -> Self {
Self {
pt: PageTable::new(),
area: None,
areas: BTreeMap::new(),
}
}
pub fn map_area(&mut self, area: MapArea) {
self.pt.map_area(&area);
self.area = Some(area);
}
pub fn unmap(&mut self, va: VirtAddr) -> Result<()> {
if self.area.is_none() {
Err(Error::InvalidArgs)
} else {
self.area.take().unwrap().unmap(va);
if let Some(area) = self.areas.remove(&va){
self.pt.unmap_area(&area);
Ok(())
}else{
Err(Error::PageFault)
}
}
pub fn clear(&mut self) {
self.pt.unmap_area(&self.area.take().unwrap());
self.area = None;
for area in self.areas.values_mut() {
self.pt.unmap_area(area);
}
self.areas.clear();
}
pub fn write_bytes(&mut self, offset: usize, data: &[u8]) -> Result<()> {
if self.area.is_none() {
Err(Error::InvalidArgs)
} else {
self.area.take().unwrap().write_data(offset, data);
Ok(())
pub fn write_bytes(&mut self, addr: usize, data: &[u8]) -> Result<()> {
let mut current_addr = addr;
let mut remain = data.len();
let start_write = false;
for (va,area) in self.areas.iter_mut(){
if current_addr>=va.0&& current_addr < area.size+va.0{
if !area.flags.contains(PTFlags::WRITABLE){
return Err(Error::PageFault)
}
area.write_data(current_addr, data);
remain -= (va.0+area.size-current_addr).min(remain);
if remain ==0{
return Ok(())
}
current_addr = va.0+area.size;
}else if start_write{
return Err(Error::PageFault)
}
}
Err(Error::PageFault)
}
pub fn read_bytes(&self, offset: usize, data: &mut [u8]) -> Result<()> {
if self.area.is_none() {
Err(Error::InvalidArgs)
} else {
self.area.as_ref().unwrap().read_data(offset, data);
Ok(())
pub fn read_bytes(&self, addr: usize, data: &mut [u8]) -> Result<()> {
let mut current_addr = addr;
let mut remain = data.len();
let start_read = false;
for (va,area) in self.areas.iter(){
if current_addr>=va.0&& current_addr < area.size+va.0{
area.read_data(current_addr, data);
remain -= (va.0+area.size-current_addr).min(remain);
if remain ==0{
return Ok(())
}
current_addr = va.0+area.size;
}else if start_read{
return Err(Error::PageFault)
}
}
Err(Error::PageFault)
}
}
impl Clone for MemorySet {
fn clone(&self) -> Self {
println!("clone memory set");
if self.area.is_none() {
Self::zero()
} else {
Self::new(self.area.clone().unwrap())
let mut ms = Self::new();
for area in self.areas.values() { ms.map(area.clone()); }
ms
}
}
}
impl Drop for MemorySet {
fn drop(&mut self) {
self.clear();
@ -230,7 +264,7 @@ impl fmt::Debug for MapArea {
impl fmt::Debug for MemorySet {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("MemorySet")
.field("areas", &self.area)
.field("areas", &self.areas)
.field("page_table_root", &self.pt.root_pa)
.finish()
}

View File

@ -1,5 +1,4 @@
use super::{memory_set::MapArea, *};
use crate::cell::Cell;
use crate::{
config::{ENTRY_COUNT, KERNEL_OFFSET, PAGE_SIZE, PHYS_OFFSET},
vm::VmFrame,
@ -9,9 +8,6 @@ use alloc::{collections::BTreeMap, vec, vec::Vec};
use core::fmt;
use lazy_static::lazy_static;
static KERNEL_PTE: Cell<PageTableEntry> = zero();
static PHYS_PTE: Cell<PageTableEntry> = zero();
lazy_static! {
pub static ref ALL_MAPPED_PTE: UPSafeCell<BTreeMap<usize, PageTableEntry>> =
unsafe { UPSafeCell::new(BTreeMap::new()) };
@ -65,7 +61,6 @@ impl PageTable {
for (index, pte) in map_pte.iter() {
p4[*index] = *pte;
}
println!("start_pa:{:x}", root_frame.start_pa());
Self {
root_pa: root_frame.start_pa(),
tables: vec![root_frame],
@ -81,8 +76,6 @@ impl PageTable {
println!("index:{:?},PTE:{:?}", i, a);
}
}
println!("kernel_pte:{:?}", p4[p4_index(VirtAddr(KERNEL_OFFSET))]);
println!("PHYS_PTE:{:?}", p4[p4_index(VirtAddr(PHYS_OFFSET))]);
}
pub fn map(&mut self, va: VirtAddr, pa: PhysAddr, flags: PTFlags) {
@ -210,15 +203,8 @@ pub(crate) fn init() {
for i in 0..512 {
if !p4[i].flags().is_empty() {
map_pte.insert(i, p4[i]);
// println!("i:{:x},{:?}",i,p4[i]);
}
}
// print how it use p4[0]
// *KERNEL_PTE.get() = p4[p4_index(VirtAddr(KERNEL_OFFSET))];
// *PHYS_PTE.get() = p4[p4_index(VirtAddr(PHYS_OFFSET))];
// println!("kernel_pte:{:?}", *KERNEL_PTE.get());
// println!("PHYS_PTE:{:?}", *PHYS_PTE.get());
// Cancel mapping in lowest addresses.
// p4[0].0 = 0;
}

View File

@ -1,4 +1,4 @@
use core::cell::{RefCell, RefMut};
use core::cell::{Ref, RefCell, RefMut};
#[derive(Debug)]
/// Wrap a static data structure inside it so that we are
@ -28,4 +28,9 @@ impl<T> UPSafeCell<T> {
pub fn exclusive_access(&self) -> RefMut<'_, T> {
self.inner.borrow_mut()
}
/// Panic if the data has been borrowed.
pub fn get_ref(&self) -> Ref<'_, T> {
self.inner.borrow()
}
}

View File

@ -5,5 +5,9 @@ mod scheduler;
#[allow(clippy::module_inception)]
mod task;
pub use self::processor::get_idle_task_cx_ptr;
pub use self::scheduler::{set_scheduler, Scheduler};
pub use self::task::context_switch;
pub use self::task::TaskContext;
pub use self::task::SWITCH_TO_USER_SPACE_TASK;
pub use self::task::{Task, TaskStatus};

View File

@ -45,6 +45,10 @@ pub fn current_task() -> Option<Arc<Task>> {
PROCESSOR.exclusive_access().current()
}
pub fn get_idle_task_cx_ptr() -> *mut TaskContext {
PROCESSOR.exclusive_access().get_idle_task_cx_ptr()
}
/// call this function to switch to other task by using GLOBAL_SCHEDULER
///
/// if current task is none, then it will use the default task context and it will not return to this function again
@ -55,7 +59,7 @@ pub fn current_task() -> Option<Arc<Task>> {
pub fn schedule() {
let next_task = fetch_task().expect("no more task found");
let current_task_option = current_task();
let next_task_cx_ptr = &next_task.inner_exclusive_access().ctx as *const TaskContext;
let next_task_cx_ptr = &next_task.inner_ctx() as *const TaskContext;
let current_task: Arc<Task>;
let current_task_cx_ptr = if current_task_option.is_none() {
PROCESSOR.exclusive_access().get_idle_task_cx_ptr()

View File

@ -35,7 +35,6 @@ impl GlobalScheduler {
/// enqueue a task using scheduler
/// require the scheduler is not none
pub fn enqueue(&mut self, task: Arc<Task>) {
println!("{:?}", self.scheduler.is_none());
self.scheduler.unwrap().enqueue(task)
}
}

View File

@ -1,12 +1,15 @@
use core::cell::RefMut;
use core::mem::size_of;
use lazy_static::lazy_static;
use crate::cell::Cell;
use crate::mm::PhysFrame;
use crate::trap::{CalleeRegs, SyscallFrame};
use crate::user::UserSpace;
use crate::trap::{CalleeRegs, SyscallFrame, TrapFrame};
use crate::user::{syscall_switch_to_user_space, trap_switch_to_user_space, UserSpace};
use crate::{prelude::*, UPSafeCell};
use super::processor::{current_task, schedule};
use super::processor::{current_task, schedule, PROCESSOR};
use super::scheduler::add_task;
core::arch::global_asm!(include_str!("switch.S"));
@ -21,6 +24,58 @@ extern "C" {
pub fn context_switch(cur: *mut TaskContext, nxt: *const TaskContext);
}
pub fn context_switch_to_user_space() {
let task = Task::current();
let switch_space_task = SWITCH_TO_USER_SPACE_TASK.get();
if task.inner_exclusive_access().is_from_trap {
*switch_space_task.trap_frame() = *task.trap_frame();
unsafe {
trap_switch_to_user_space(
&task.user_space.as_ref().unwrap().cpu_ctx,
switch_space_task.trap_frame(),
);
}
} else {
*switch_space_task.syscall_frame() = *task.syscall_frame();
unsafe {
syscall_switch_to_user_space(
&task.user_space.as_ref().unwrap().cpu_ctx,
switch_space_task.syscall_frame(),
);
}
}
}
lazy_static! {
/// This variable is mean to switch to user space and then switch back in `UserMode.execute`
///
/// When context switch to this task, there is no need to set the current task
pub static ref SWITCH_TO_USER_SPACE_TASK : Cell<Task> = unsafe{
Cell::new({
let task = Task{
func: Box::new(context_switch_to_user_space),
data: Box::new(None::<u8>),
user_space: None,
task_inner: unsafe {
UPSafeCell::new(TaskInner {
task_status: TaskStatus::Runnable,
ctx: TaskContext::default(),
is_from_trap:false,
})
},
exit_code: usize::MAX,
kstack: KernelStack::new(),
};
task.task_inner.exclusive_access().task_status = TaskStatus::Runnable;
task.task_inner.exclusive_access().ctx.rip = context_switch_to_user_space as usize;
task.task_inner.exclusive_access().ctx.regs.rsp = task.kstack.frame.end_pa().kvaddr().0
as usize
- size_of::<usize>()
- size_of::<SyscallFrame>();
task
})};
}
pub struct KernelStack {
frame: PhysFrame,
}
@ -40,13 +95,15 @@ pub struct Task {
user_space: Option<Arc<UserSpace>>,
task_inner: UPSafeCell<TaskInner>,
exit_code: usize,
/// kernel stack, note that the top is SyscallFrame
/// kernel stack, note that the top is SyscallFrame/TrapFrame
kstack: KernelStack,
}
pub struct TaskInner {
pub task_status: TaskStatus,
pub ctx: TaskContext,
/// whether the task from trap. If it is Trap, then you should use read TrapFrame instead of SyscallFrame
pub is_from_trap: bool,
}
impl Task {
@ -60,6 +117,11 @@ impl Task {
self.task_inner.exclusive_access()
}
/// get inner
pub fn inner_ctx(&self) -> TaskContext {
self.task_inner.exclusive_access().ctx
}
/// Yields execution so that another task may be scheduled.
///
/// Note that this method cannot be simply named "yield" as the name is
@ -97,6 +159,7 @@ impl Task {
UPSafeCell::new(TaskInner {
task_status: TaskStatus::Runnable,
ctx: TaskContext::default(),
is_from_trap: false,
})
},
exit_code: 0,
@ -129,6 +192,13 @@ impl Task {
}
}
pub fn trap_frame(&self) -> &mut TrapFrame {
unsafe {
&mut *(self.kstack.frame.end_pa().kvaddr().get_mut::<TrapFrame>() as *mut TrapFrame)
.sub(1)
}
}
/// Returns the task status.
pub fn status(&self) -> TaskStatus {
self.task_inner.exclusive_access().task_status

View File

@ -1,12 +1,21 @@
use crate::task::{
context_switch, get_idle_task_cx_ptr, Task, TaskContext, SWITCH_TO_USER_SPACE_TASK,
};
use super::{irq::IRQ_LIST, *};
#[no_mangle]
pub extern "C" fn syscall_handler(f: &'static mut SyscallFrame) -> isize {
let r = &f.caller;
println!("{:?}", f);
// let ret = syscall::syscall(r.rax, [r.rdi, r.rsi, r.rdx]);
// current_check_signal();
// ret
let current = Task::current();
current.inner_exclusive_access().is_from_trap = false;
*current.syscall_frame() = *SWITCH_TO_USER_SPACE_TASK.get().syscall_frame();
unsafe {
context_switch(
get_idle_task_cx_ptr() as *mut TaskContext,
&Task::current().inner_ctx() as *const TaskContext,
)
}
-1
}
@ -20,9 +29,22 @@ const TIMER: usize = 32;
#[no_mangle]
pub extern "C" fn trap_handler(f: &'static mut TrapFrame) {
if !is_from_kernel(f.cs){
let current = Task::current();
current.inner_exclusive_access().is_from_trap = true;
}
let irq_line = IRQ_LIST.get(f.id as usize).unwrap();
let callback_functions = irq_line.callback_list();
for callback_function in callback_functions.iter() {
callback_function.call(f.clone());
}
}
fn is_from_kernel(cs:usize)->bool{
if cs&0x3==0{
true
}else{
false
}
}

View File

@ -36,6 +36,8 @@ __trap_entry:
# trap_handler
mov rdi, rsp
call trap_handler
__trap_return:
# judge whether the trap from kernel mode
mov rax, [rsp + 96] # 96 = offsetof(TrapFrame, cs)
and rax, 0x3
jz __from_kernel
@ -46,6 +48,9 @@ __from_kernel:
add rsp, 16 # skip TrapFrame.err and id
iretq
.global syscall_entry
syscall_entry:
# syscall instruction do:
@ -73,7 +78,7 @@ syscall_return: # (SyscallFrame *)
mov rsp, rdi
__syscall_return:
lea rax, [rsp + 128] # prepare new TSS.sp0, 128 = sizeof(SyscallFrame)
# ring0stack4TSS
# store the rsp in TSS
mov [TSS + rip + 4], rax
restore
mov rbx, [rsp + 8]
@ -85,12 +90,18 @@ __syscall_return:
mov rsp, [rsp + 0]
sysretq
.global switch_to_user_space
switch_to_user_space: # (cpu_context: *CpuContext,reg: *CallerRegs)
.global syscall_switch_to_user_space
syscall_switch_to_user_space: # (cpu_context: *CpuContext,reg: *SyscallFrame)
# mov rflag, [rdi+136]
mov rdi, rsi
jmp syscall_return
.global trap_switch_to_user_space
trap_switch_to_user_space: # (cpu_context: *CpuContext,reg: *TrapFrame)
# mov rflag, [rdi+136]
mov rdi, rsi
mov rsp, rdi
jmp __trap_return

View File

@ -4,12 +4,15 @@ use crate::println;
use crate::cpu::CpuContext;
use crate::prelude::*;
use crate::task::Task;
use crate::trap::SyscallFrame;
use crate::task::{context_switch, Task, TaskContext, SWITCH_TO_USER_SPACE_TASK};
use crate::trap::{SyscallFrame, TrapFrame};
use crate::vm::VmSpace;
use crate::x86_64_util::get_return_address;
extern "C" {
fn switch_to_user_space(cpu_context: &CpuContext, syscall_frame: &SyscallFrame);
pub fn syscall_switch_to_user_space(cpu_context: &CpuContext, syscall_frame: &SyscallFrame);
/// cpu_context may delete in the future
pub fn trap_switch_to_user_space(cpu_context: &CpuContext, trap_frame: &TrapFrame);
}
/// A user space.
@ -20,7 +23,7 @@ pub struct UserSpace {
/// vm space
vm_space: VmSpace,
/// cpu context before entering user space
cpu_ctx: CpuContext,
pub cpu_ctx: CpuContext,
}
impl UserSpace {
@ -78,6 +81,8 @@ impl UserSpace {
pub struct UserMode<'a> {
current: Arc<Task>,
user_space: &'a Arc<UserSpace>,
context: CpuContext,
executed: bool,
}
// An instance of `UserMode` is bound to the current task. So it cannot be
@ -88,10 +93,12 @@ impl<'a> UserMode<'a> {
Self {
current: Task::current(),
user_space,
context: CpuContext::default(),
executed: false,
}
}
/// Starts executing in the user mode.
/// Starts executing in the user mode. Make sure current task is the task in `UserMode`.
///
/// The method returns for one of three possible reasons indicated by `UserEvent`.
/// 1. The user invokes a system call;
@ -102,22 +109,46 @@ impl<'a> UserMode<'a> {
/// this method can be invoked again to go back to the user space.
pub fn execute(&mut self) -> UserEvent {
self.user_space.vm_space().activate();
if !self.executed {
self.current.syscall_frame().caller.rcx = self.user_space.cpu_ctx.gp_regs.rip as usize;
println!("{:?}", self.current.syscall_frame());
unsafe {
switch_to_user_space(&self.user_space.cpu_ctx, self.current.syscall_frame());
self.executed = true;
} else {
if self.current.inner_exclusive_access().is_from_trap {
*self.current.trap_frame() = self.context.into();
} else {
*self.current.syscall_frame() = self.context.into();
}
}
let mut current_task_inner = self.current.inner_exclusive_access();
let binding = SWITCH_TO_USER_SPACE_TASK.get();
let next_task_inner = binding.inner_exclusive_access();
let current_ctx = &mut current_task_inner.ctx as *mut TaskContext;
let next_ctx = &next_task_inner.ctx as *const TaskContext;
drop(current_task_inner);
drop(next_task_inner);
drop(binding);
unsafe {
context_switch(current_ctx, next_ctx);
// switch_to_user_space(&self.user_space.cpu_ctx, self.current.syscall_frame());
}
if self.current.inner_exclusive_access().is_from_trap {
self.context = CpuContext::from(*self.current.trap_frame());
UserEvent::Exception
} else {
self.context = CpuContext::from(*self.current.syscall_frame());
println!("[kernel] syscall id:{}",self.context.gp_regs.rax);
UserEvent::Syscall
}
}
/// Returns an immutable reference the user-mode CPU context.
pub fn context(&self) -> &CpuContext {
todo!()
&self.context
}
/// Returns a mutable reference the user-mode CPU context.
pub fn context_mut(&mut self) -> &mut CpuContext {
todo!()
&mut self.context
}
}

View File

@ -29,7 +29,7 @@ impl VmSpace {
/// Creates a new VM address space.
pub fn new() -> Self {
Self {
memory_set: unsafe { UPSafeCell::new(MemorySet::zero()) },
memory_set: unsafe { UPSafeCell::new(MemorySet::new()) },
}
}
@ -54,7 +54,7 @@ impl VmSpace {
if options.addr.is_none() {
return Err(Error::InvalidArgs);
}
self.memory_set.exclusive_access().map_area(MapArea::new(
self.memory_set.exclusive_access().map(MapArea::new(
VirtAddr(options.addr.unwrap()),
frames.len() * PAGE_SIZE,
flags,
@ -99,12 +99,12 @@ impl Default for VmSpace {
}
impl VmIo for VmSpace {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
self.memory_set.exclusive_access().read_bytes(offset, buf)
fn read_bytes(&self, vaddr: usize, buf: &mut [u8]) -> Result<()> {
self.memory_set.exclusive_access().read_bytes(vaddr, buf)
}
fn write_bytes(&mut self, offset: usize, buf: &[u8]) -> Result<()> {
self.memory_set.exclusive_access().write_bytes(offset, buf)
fn write_bytes(&mut self, vaddr: usize, buf: &[u8]) -> Result<()> {
self.memory_set.exclusive_access().write_bytes(vaddr, buf)
}
}

View File

@ -177,6 +177,15 @@ pub fn get_cr3_raw() -> usize {
val
}
#[inline(always)]
pub fn get_return_address() -> usize {
let val: usize;
unsafe {
asm!("mov {}, [rsp]", out(reg) val);
}
val
}
#[inline(always)]
pub fn set_cr3(pa: usize) {
unsafe {

View File

@ -27,7 +27,6 @@ pub fn spawn_user_task_from_elf(elf_file_content: &[u8]) -> Arc<Task> {
let mut user_mode = UserMode::new(user_space);
loop {
let user_event = user_mode.execute();
println!("get user event:{:?}", user_event);
let context = user_mode.context_mut();
if let HandlerResult::Exit = handle_user_event(user_event, context) {
// FIXME: How to set task status? How to set exit code of process?
@ -36,8 +35,6 @@ pub fn spawn_user_task_from_elf(elf_file_content: &[u8]) -> Arc<Task> {
}
}
// QEMU crash when entering the task spawn function.
println!("[kxos std]:before entering task spawn");
// FIXME: set the correct type when task has no data
Task::spawn(user_task_entry, None::<u8>, Some(user_space)).expect("spawn user task failed.")
}

View File

@ -3,7 +3,7 @@ global _start
section .text
_start:
mov rax, 1 ; syswrite
mov rax, 64 ; syswrite
mov rdi, 1 ; fd
mov rsi, msg ; "Hello, world!\n",
mov rdx, msglen ; sizeof("Hello, world!\n")

View File

@ -19,7 +19,7 @@ fn kernel_main(boot_info: &'static mut BootInfo) -> ! {
// }
// }
kxos_frame::init(boot_info);
println!("finish init kxos_frame");
println!("[kernel] finish init kxos_frame");
kxos_std::init();
kxos_std::run_first_process();