Reuse trapframe

This commit is contained in:
Yuke Peng 2023-03-08 19:59:41 -08:00 committed by Tate, Hongliang Tian
parent 91b9501a1b
commit eb2b951f7c
16 changed files with 202 additions and 1910 deletions

3
.gitignore vendored
View File

@ -15,3 +15,6 @@ target/
# Ramdisk file
src/ramdisk/initramfs/
src/ramdisk/build/
# qemu log file
qemu.log

4
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,4 @@
{
"rust-analyzer.cargo.target" : "x86_64-unknown-none",
"rust-analyzer.check.extraArgs" : ["--target","x86_64-custom.json","-Zbuild-std=core,alloc,compiler_builtins","-Zbuild-std-features=compiler-builtins-mem"]
}

View File

@ -22,6 +22,7 @@ intrusive-collections = "0.9.5"
log = "0.4"
limine = { version = "0.1.10", features = ["into-uuid"] }
lazy_static = { version = "1.0", features = ["spin_no_std"] }
trapframe= "0.9.0"
[features]
default = ["serial_print"]

View File

@ -4,8 +4,8 @@ use core::arch::x86_64::{_fxrstor, _fxsave};
use core::fmt::Debug;
use core::mem::MaybeUninit;
use crate::trap::{CalleeRegs, CallerRegs, SyscallFrame, TrapFrame};
use crate::x86_64_util::rdfsbase;
use trapframe::{GeneralRegs, UserContext};
use log::debug;
use pod::Pod;
@ -35,6 +35,7 @@ pub struct CpuContext {
pub fp_regs: FpRegs,
pub gp_regs: GpRegs,
pub fs_base: u64,
pub gs_base: u64,
/// trap information, this field is all zero when it is syscall
pub trap_information: TrapInformation,
}
@ -63,8 +64,6 @@ pub struct TrapInformation {
pub cr2: u64,
pub id: u64,
pub err: u64,
pub cs: u64,
pub ss: u64,
}
/// The general-purpose registers of CPU.
@ -96,131 +95,68 @@ unsafe impl Pod for TrapInformation {}
unsafe impl Pod for CpuContext {}
unsafe impl Pod for FpRegs {}
impl From<SyscallFrame> for CpuContext {
fn from(syscall: SyscallFrame) -> Self {
impl From<UserContext> for CpuContext {
fn from(value: UserContext) -> Self {
Self {
gp_regs: GpRegs {
r8: syscall.caller.r8,
r9: syscall.caller.r9,
r10: syscall.caller.r10,
r11: syscall.caller.r11,
r12: syscall.callee.r12,
r13: syscall.callee.r13,
r14: syscall.callee.r14,
r15: syscall.callee.r15,
rdi: syscall.caller.rdi,
rsi: syscall.caller.rsi,
rbp: syscall.callee.rbp,
rbx: syscall.callee.rbx,
rdx: syscall.caller.rdx,
rax: syscall.caller.rax,
rcx: syscall.caller.rcx,
rsp: syscall.callee.rsp,
rip: syscall.caller.rcx,
rflag: 0,
r8: value.general.r8 as u64,
r9: value.general.r9 as u64,
r10: value.general.r10 as u64,
r11: value.general.r11 as u64,
r12: value.general.r12 as u64,
r13: value.general.r13 as u64,
r14: value.general.r14 as u64,
r15: value.general.r15 as u64,
rdi: value.general.rdi as u64,
rsi: value.general.rsi as u64,
rbp: value.general.rbp as u64,
rbx: value.general.rbx as u64,
rdx: value.general.rdx as u64,
rax: value.general.rax as u64,
rcx: value.general.rcx as u64,
rsp: value.general.rsp as u64,
rip: value.general.rip as u64,
rflag: value.general.rflags as u64,
},
fs_base: 0,
fp_regs: FpRegs::default(),
trap_information: TrapInformation::default(),
}
}
}
impl Into<SyscallFrame> for CpuContext {
fn into(self) -> SyscallFrame {
SyscallFrame {
caller: CallerRegs {
rax: self.gp_regs.rax,
rcx: self.gp_regs.rcx,
rdx: self.gp_regs.rdx,
rsi: self.gp_regs.rsi,
rdi: self.gp_regs.rdi,
r8: self.gp_regs.r8,
r9: self.gp_regs.r9,
r10: self.gp_regs.r10,
r11: self.gp_regs.r11,
},
callee: CalleeRegs {
rsp: self.gp_regs.rsp,
rbx: self.gp_regs.rbx,
rbp: self.gp_regs.rbp,
r12: self.gp_regs.r12,
r13: self.gp_regs.r13,
r14: self.gp_regs.r14,
r15: self.gp_regs.r15,
},
}
}
}
impl From<TrapFrame> for CpuContext {
fn from(trap: TrapFrame) -> Self {
Self {
gp_regs: GpRegs {
r8: trap.caller.r8,
r9: trap.caller.r9,
r10: trap.caller.r10,
r11: trap.caller.r11,
r12: trap.callee.r12,
r13: trap.callee.r13,
r14: trap.callee.r14,
r15: trap.callee.r15,
rdi: trap.caller.rdi,
rsi: trap.caller.rsi,
rbp: trap.callee.rbp,
rbx: trap.callee.rbx,
rdx: trap.caller.rdx,
rax: trap.caller.rax,
rcx: trap.caller.rcx,
rsp: trap.rsp,
rip: trap.rip,
rflag: trap.rflags,
},
fs_base: rdfsbase(),
fs_base: value.general.fsbase as u64,
fp_regs: FpRegs::default(),
trap_information: TrapInformation {
cr2: trap.cr2,
id: trap.id,
err: trap.err,
cs: trap.cs,
ss: trap.ss,
cr2: x86_64::registers::control::Cr2::read_raw(),
id: value.trap_num as u64,
err: value.error_code as u64,
},
gs_base: value.general.gsbase as u64,
}
}
}
impl Into<TrapFrame> for CpuContext {
fn into(self) -> TrapFrame {
let trap_information = self.trap_information;
TrapFrame {
caller: CallerRegs {
rax: self.gp_regs.rax,
rcx: self.gp_regs.rcx,
rdx: self.gp_regs.rdx,
rsi: self.gp_regs.rsi,
rdi: self.gp_regs.rdi,
r8: self.gp_regs.r8,
r9: self.gp_regs.r9,
r10: self.gp_regs.r10,
r11: self.gp_regs.r11,
impl Into<UserContext> for CpuContext {
fn into(self) -> UserContext {
UserContext {
trap_num: self.trap_information.id as usize,
error_code: self.trap_information.err as usize,
general: GeneralRegs {
rax: self.gp_regs.rax as usize,
rbx: self.gp_regs.rbx as usize,
rcx: self.gp_regs.rcx as usize,
rdx: self.gp_regs.rdx as usize,
rsi: self.gp_regs.rsi as usize,
rdi: self.gp_regs.rdi as usize,
rbp: self.gp_regs.rbp as usize,
rsp: self.gp_regs.rsp as usize,
r8: self.gp_regs.r8 as usize,
r9: self.gp_regs.r9 as usize,
r10: self.gp_regs.r10 as usize,
r11: self.gp_regs.r11 as usize,
r12: self.gp_regs.r12 as usize,
r13: self.gp_regs.r13 as usize,
r14: self.gp_regs.r14 as usize,
r15: self.gp_regs.r15 as usize,
rip: self.gp_regs.rip as usize,
rflags: self.gp_regs.rflag as usize,
fsbase: self.fs_base as usize,
gsbase: self.gs_base as usize,
},
callee: CalleeRegs {
rsp: self.gp_regs.rsp,
rbx: self.gp_regs.rbx,
rbp: self.gp_regs.rbp,
r12: self.gp_regs.r12,
r13: self.gp_regs.r13,
r14: self.gp_regs.r14,
r15: self.gp_regs.r15,
},
id: trap_information.id,
err: trap_information.err,
cr2: trap_information.cr2,
rip: self.gp_regs.rip,
cs: trap_information.cs,
rflags: self.gp_regs.rflag,
rsp: self.gp_regs.rsp,
ss: trap_information.ss,
}
}
}

View File

@ -41,8 +41,9 @@ pub use device::serial::receive_char;
pub use limine::LimineModuleRequest;
pub use mm::address::{align_down, align_up, is_aligned, virt_to_phys};
pub use mm::page_table::translate_not_offset_virtual_address;
pub use trap::{allocate_irq, IrqAllocateHandle, TrapFrame};
pub use trap::{allocate_irq, IrqAllocateHandle};
use trap::{IrqCallbackHandle, IrqLine};
pub use trapframe::TrapFrame;
pub use util::AlignExt;
pub use x86_64::registers::rflags::read as get_rflags;
pub use x86_64::registers::rflags::RFlags;

View File

@ -8,8 +8,7 @@ use crate::{config::PAGE_SIZE, vm::Paddr};
use super::address::PhysAddr;
static FRAME_ALLOCATOR: Once<Mutex<FrameAllocator>> = Once::new();
static FRAME_ALLOCATOR: Once<Mutex<FrameAllocator>> = Once::new();
#[derive(Debug, Clone)]
// #[repr(transparent)]
@ -28,23 +27,33 @@ impl PhysFrame {
}
pub fn alloc() -> Option<Self> {
FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).map(|pa| Self {
frame_index: pa,
need_dealloc: true,
})
FRAME_ALLOCATOR
.get()
.unwrap()
.lock()
.alloc(1)
.map(|pa| Self {
frame_index: pa,
need_dealloc: true,
})
}
pub fn alloc_continuous_range(frame_count: usize) -> Option<Vec<Self>> {
FRAME_ALLOCATOR.get().unwrap().lock().alloc(frame_count).map(|start| {
let mut vector = Vec::new();
for i in 0..frame_count {
vector.push(Self {
frame_index: start + i,
need_dealloc: true,
})
}
vector
})
FRAME_ALLOCATOR
.get()
.unwrap()
.lock()
.alloc(frame_count)
.map(|start| {
let mut vector = Vec::new();
for i in 0..frame_count {
vector.push(Self {
frame_index: start + i,
need_dealloc: true,
})
}
vector
})
}
pub fn alloc_with_paddr(paddr: Paddr) -> Option<Self> {
@ -73,7 +82,11 @@ impl PhysFrame {
impl Drop for PhysFrame {
fn drop(&mut self) {
if self.need_dealloc {
FRAME_ALLOCATOR.get().unwrap().lock().dealloc(self.frame_index, 1);
FRAME_ALLOCATOR
.get()
.unwrap()
.lock()
.dealloc(self.frame_index, 1);
}
}
}
@ -94,5 +107,5 @@ pub(crate) fn init(regions: &Vec<&LimineMemmapEntry>) {
);
}
}
FRAME_ALLOCATOR.call_once(||Mutex::new(allocator));
}
FRAME_ALLOCATOR.call_once(|| Mutex::new(allocator));
}

View File

@ -5,10 +5,6 @@ mod scheduler;
#[allow(clippy::module_inception)]
mod task;
pub(crate) use self::processor::get_idle_task_cx_ptr;
pub use self::processor::schedule;
pub use self::scheduler::{set_scheduler, Scheduler};
pub(crate) use self::task::context_switch;
pub(crate) use self::task::TaskContext;
pub(crate) use self::task::SWITCH_TO_USER_SPACE_TASK;
pub use self::task::{Task, TaskAdapter, TaskStatus};

View File

@ -1,14 +1,9 @@
use core::mem::size_of;
use lazy_static::lazy_static;
use spin::{Mutex, MutexGuard};
use crate::cell::Cell;
use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE};
use crate::prelude::*;
use crate::task::processor::switch_to_task;
use crate::trap::{CalleeRegs, SyscallFrame, TrapFrame};
use crate::user::{syscall_switch_to_user_space, trap_switch_to_user_space, UserSpace};
use crate::user::UserSpace;
use crate::vm::{VmAllocOptions, VmFrameVec};
use intrusive_collections::intrusive_adapter;
@ -17,6 +12,19 @@ use intrusive_collections::LinkedListAtomicLink;
use super::processor::{current_task, schedule};
core::arch::global_asm!(include_str!("switch.S"));
#[derive(Debug, Default, Clone, Copy)]
#[repr(C)]
pub struct CalleeRegs {
pub rsp: u64,
pub rbx: u64,
pub rbp: u64,
pub r12: u64,
pub r13: u64,
pub r14: u64,
pub r15: u64,
}
#[derive(Debug, Default, Clone, Copy)]
#[repr(C)]
pub(crate) struct TaskContext {
@ -28,56 +36,6 @@ extern "C" {
pub(crate) fn context_switch(cur: *mut TaskContext, nxt: *const TaskContext);
}
fn context_switch_to_user_space() {
let task = Task::current();
let switch_space_task = SWITCH_TO_USER_SPACE_TASK.get();
if task.inner_exclusive_access().is_from_trap {
*switch_space_task.trap_frame() = *task.trap_frame();
unsafe {
trap_switch_to_user_space(
&task.user_space.as_ref().unwrap().cpu_ctx,
switch_space_task.trap_frame(),
);
}
} else {
*switch_space_task.syscall_frame() = *task.syscall_frame();
unsafe {
syscall_switch_to_user_space(
&task.user_space.as_ref().unwrap().cpu_ctx,
switch_space_task.syscall_frame(),
);
}
}
}
lazy_static! {
/// This variable is mean to switch to user space and then switch back in `UserMode.execute`
///
/// When context switch to this task, there is no need to set the current task
pub(crate) static ref SWITCH_TO_USER_SPACE_TASK : Cell<Task> =
Cell::new({
let task = Task{
func: Box::new(context_switch_to_user_space),
data: Box::new(None::<u8>),
user_space: None,
task_inner: Mutex::new(TaskInner {
task_status: TaskStatus::Runnable,
ctx: TaskContext::default(),
is_from_trap:false,
}),
exit_code: usize::MAX,
kstack: KernelStack::new(),
link: LinkedListAtomicLink::new(),
};
task.task_inner.lock().task_status = TaskStatus::Runnable;
task.task_inner.lock().ctx.rip = context_switch_to_user_space as usize;
task.task_inner.lock().ctx.regs.rsp = (task.kstack.frame.end_pa().unwrap().kvaddr().0
- size_of::<usize>()
- size_of::<SyscallFrame>()) as u64;
task
});
}
pub struct KernelStack {
frame: VmFrameVec,
}
@ -85,8 +43,10 @@ pub struct KernelStack {
impl KernelStack {
pub fn new() -> Self {
Self {
frame: VmFrameVec::allocate(&VmAllocOptions::new(KERNEL_STACK_SIZE / PAGE_SIZE))
.expect("out of memory"),
frame: VmFrameVec::allocate(
&VmAllocOptions::new(KERNEL_STACK_SIZE / PAGE_SIZE).is_contiguous(true),
)
.expect("out of memory"),
}
}
}
@ -109,8 +69,6 @@ intrusive_adapter!(pub TaskAdapter = Arc<Task>: Task { link: LinkedListAtomicLin
pub(crate) struct TaskInner {
pub task_status: TaskStatus,
pub ctx: TaskContext,
/// whether the task from trap. If it is Trap, then you should use read TrapFrame instead of SyscallFrame
pub is_from_trap: bool,
}
impl Task {
@ -166,7 +124,6 @@ impl Task {
task_inner: Mutex::new(TaskInner {
task_status: TaskStatus::Runnable,
ctx: TaskContext::default(),
is_from_trap: false,
}),
exit_code: 0,
kstack: KernelStack::new(),
@ -175,9 +132,8 @@ impl Task {
result.task_inner.lock().task_status = TaskStatus::Runnable;
result.task_inner.lock().ctx.rip = kernel_task_entry as usize;
result.task_inner.lock().ctx.regs.rsp = (result.kstack.frame.end_pa().unwrap().kvaddr().0
- size_of::<usize>()
- size_of::<SyscallFrame>()) as u64;
result.task_inner.lock().ctx.regs.rsp =
(result.kstack.frame.end_pa().unwrap().kvaddr().0) as u64;
let arc_self = Arc::new(result);
switch_to_task(arc_self.clone());
@ -208,7 +164,6 @@ impl Task {
task_inner: Mutex::new(TaskInner {
task_status: TaskStatus::Runnable,
ctx: TaskContext::default(),
is_from_trap: false,
}),
exit_code: 0,
kstack: KernelStack::new(),
@ -217,9 +172,8 @@ impl Task {
result.task_inner.lock().task_status = TaskStatus::Runnable;
result.task_inner.lock().ctx.rip = kernel_task_entry as usize;
result.task_inner.lock().ctx.regs.rsp = (result.kstack.frame.end_pa().unwrap().kvaddr().0
- size_of::<usize>()
- size_of::<SyscallFrame>()) as u64;
result.task_inner.lock().ctx.regs.rsp =
(result.kstack.frame.end_pa().unwrap().kvaddr().0) as u64;
Ok(Arc::new(result))
}
@ -228,32 +182,6 @@ impl Task {
switch_to_task(self.clone());
}
pub(crate) fn syscall_frame(&self) -> &mut SyscallFrame {
unsafe {
&mut *(self
.kstack
.frame
.end_pa()
.unwrap()
.kvaddr()
.get_mut::<SyscallFrame>() as *mut SyscallFrame)
.sub(1)
}
}
pub(crate) fn trap_frame(&self) -> &mut TrapFrame {
unsafe {
&mut *(self
.kstack
.frame
.end_pa()
.unwrap()
.kvaddr()
.get_mut::<TrapFrame>() as *mut TrapFrame)
.sub(1)
}
}
/// Returns the task status.
pub fn status(&self) -> TaskStatus {
self.task_inner.lock().task_status

View File

@ -1,63 +1,27 @@
use crate::task::{
context_switch, get_idle_task_cx_ptr, Task, TaskContext, SWITCH_TO_USER_SPACE_TASK,
};
use super::{irq::IRQ_LIST, *};
use trapframe::TrapFrame;
/// Only from kernel
#[no_mangle]
pub(crate) extern "C" fn syscall_handler(f: &mut SyscallFrame) -> isize {
let r = &f.caller;
let current = Task::current();
current.inner_exclusive_access().is_from_trap = false;
*current.syscall_frame() = *SWITCH_TO_USER_SPACE_TASK.get().syscall_frame();
unsafe {
context_switch(
get_idle_task_cx_ptr() as *mut TaskContext,
&Task::current().inner_ctx() as *const TaskContext,
)
extern "sysv64" fn trap_handler(f: &mut TrapFrame) {
if is_cpu_fault(f) {
panic!("cannot handle kernel cpu fault now, information:{:#x?}", f);
}
-1
call_irq_callback_functions(f);
}
#[no_mangle]
pub(crate) extern "C" fn trap_handler(f: &mut TrapFrame) {
if !is_from_kernel(f.cs) {
let current = Task::current();
current.inner_exclusive_access().is_from_trap = true;
*current.trap_frame() = *SWITCH_TO_USER_SPACE_TASK.trap_frame();
if is_cpu_fault(current.trap_frame()) {
// if is cpu fault, we will pass control to trap handler in jinux std
unsafe {
context_switch(
get_idle_task_cx_ptr() as *mut TaskContext,
&Task::current().inner_ctx() as *const TaskContext,
)
}
}
} else {
if is_cpu_fault(f) {
panic!("cannot handle kernel cpu fault now, information:{:#x?}", f);
}
}
let irq_line = IRQ_LIST.get(f.id as usize).unwrap();
pub(crate) fn call_irq_callback_functions(f: &mut TrapFrame) {
let irq_line = IRQ_LIST.get(f.trap_num as usize).unwrap();
let callback_functions = irq_line.callback_list();
for callback_function in callback_functions.iter() {
callback_function.call(f);
}
if f.id >= 0x20 {
if f.trap_num >= 0x20 {
crate::driver::xapic_ack();
crate::driver::pic_ack();
}
}
fn is_from_kernel(cs: u64) -> bool {
if cs & 0x3 == 0 {
true
} else {
false
}
}
/// As Osdev Wiki defines(https://wiki.osdev.org/Exceptions):
/// CPU exceptions are classified as:
@ -68,7 +32,7 @@ fn is_from_kernel(cs: u64) -> bool {
/// This function will determine a trap is a CPU faults.
/// We will pass control to jinux-std if the trap is **faults**.
pub fn is_cpu_fault(trap_frame: &TrapFrame) -> bool {
match trap_frame.id {
match trap_frame.trap_num as u64 {
DIVIDE_BY_ZERO
| DEBUG
| BOUND_RANGE_EXCEEDED

View File

@ -1,10 +1,10 @@
use crate::{prelude::*, Error};
use super::TrapFrame;
use crate::util::recycle_allocator::RecycleAllocator;
use core::fmt::Debug;
use lazy_static::lazy_static;
use spin::{Mutex, MutexGuard};
use trapframe::TrapFrame;
lazy_static! {
/// The IRQ numbers which are not using

View File

@ -1,192 +1,13 @@
mod handler;
mod irq;
use crate::cell::Cell;
use lazy_static::lazy_static;
use x86_64::{
registers::{
model_specific::{self, EferFlags},
rflags::RFlags,
},
structures::{gdt::*, tss::TaskStateSegment},
};
pub(crate) use self::handler::call_irq_callback_functions;
pub use self::irq::{allocate_irq, IrqAllocateHandle};
pub(crate) use self::irq::{allocate_target_irq, IrqCallbackHandle, IrqLine};
use core::{fmt::Debug, mem::size_of_val};
use crate::{x86_64_util::*, *};
core::arch::global_asm!(include_str!("trap.S"));
core::arch::global_asm!(include_str!("vector.S"));
#[derive(Default, Clone, Copy)]
#[repr(C)]
pub struct CallerRegs {
pub rax: u64,
pub rcx: u64,
pub rdx: u64,
pub rsi: u64,
pub rdi: u64,
pub r8: u64,
pub r9: u64,
pub r10: u64,
pub r11: u64,
}
impl Debug for CallerRegs {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.write_fmt(format_args!("rax: 0x{:x}, rcx: 0x{:x}, rdx: 0x{:x}, rsi: 0x{:x}, rdi: 0x{:x}, r8: 0x{:x}, r9: 0x{:x}, r10: 0x{:x}, r11: 0x{:x}",
self.rax, self.rcx, self.rdx, self.rsi, self.rdi, self.r8, self.r9, self.r10, self.r11))?;
Ok(())
}
}
#[derive(Default, Clone, Copy)]
#[repr(C)]
pub struct CalleeRegs {
pub rsp: u64,
pub rbx: u64,
pub rbp: u64,
pub r12: u64,
pub r13: u64,
pub r14: u64,
pub r15: u64,
}
impl Debug for CalleeRegs {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.write_fmt(format_args!("rsp: 0x{:x}, rbx: 0x{:x}, rbp: 0x{:x}, r12: 0x{:x}, r13: 0x{:x}, r14: 0x{:x}, r15: 0x{:x}", self.rsp, self.rbx, self.rbp, self.r12, self.r13, self.r14, self.r15))?;
Ok(())
}
}
#[derive(Debug, Default, Clone, Copy)]
#[repr(C)]
pub struct SyscallFrame {
pub caller: CallerRegs,
pub callee: CalleeRegs,
}
#[derive(Debug, Default, Clone, Copy)]
#[repr(C)]
pub struct TrapFrame {
pub cr2: u64,
pub caller: CallerRegs,
// do not use the rsp inside the callee, use another rsp instead
pub callee: CalleeRegs,
pub id: u64,
pub err: u64,
// Pushed by CPU
pub rip: u64,
pub cs: u64,
pub rflags: u64,
pub rsp: u64,
pub ss: u64,
}
const TSS_SIZE: usize = 104;
extern "C" {
/// TSS
static TSS: [u8; TSS_SIZE];
/// 所有的中断向量push一个id后跳转到trao_entry
static __vectors: [usize; 256];
fn syscall_entry();
}
lazy_static! {
static ref GDT: Cell<GlobalDescriptorTable> = Cell::new(GlobalDescriptorTable::new());
}
#[repr(C, align(16))]
struct IDT {
/**
* The structure of all entries in IDT are shown below:
* related link: https://wiki.osdev.org/IDT#Structure_on_x86-64
* Low 64 bits of entry:
* |0-------------------------------------15|16------------------------------31|
* | Low 16 bits of target address | Segment Selector |
* |32-34|35------39|40-------43|44|45-46|47|48------------------------------63|
* | IST | Reserved | Gate Type | 0| DPL |P | Middle 16 bits of target address |
* |---------------------------------------------------------------------------|
* High 64 bits of entry:
* |64-----------------------------------------------------------------------95|
* | High 32 bits of target address |
* |96----------------------------------------------------------------------127|
* | Reserved |
* |---------------------------------------------------------------------------|
*/
entries: [[usize; 2]; 256],
}
impl IDT {
const fn default() -> Self {
Self {
entries: [[0; 2]; 256],
}
}
}
static mut IDT: IDT = IDT::default();
pub(crate) fn init() {
// FIXME: use GDT in x86_64 crate in
let tss = unsafe { &*(TSS.as_ptr() as *const TaskStateSegment) };
let gdt = GDT.get();
let kcs = gdt.add_entry(Descriptor::kernel_code_segment());
let kss = gdt.add_entry(Descriptor::kernel_data_segment());
let uss = gdt.add_entry(Descriptor::user_data_segment());
let ucs = gdt.add_entry(Descriptor::user_code_segment());
let tss_load = gdt.add_entry(Descriptor::tss_segment(tss));
gdt.load();
x86_64_util::set_cs(kcs.0);
x86_64_util::set_ss(kss.0);
load_tss(tss_load.0);
unsafe {
// enable syscall extensions
model_specific::Efer::update(|efer_flags| {
efer_flags.insert(EferFlags::SYSTEM_CALL_EXTENSIONS);
});
}
model_specific::Star::write(ucs, uss, kcs, kss)
.expect("error when configure star msr register");
// set the syscall entry
model_specific::LStar::write(x86_64::VirtAddr::new(syscall_entry as u64));
model_specific::SFMask::write(
RFlags::TRAP_FLAG
| RFlags::DIRECTION_FLAG
// | RFlags::INTERRUPT_FLAG
| RFlags::IOPL_LOW
| RFlags::IOPL_HIGH
| RFlags::NESTED_TASK
| RFlags::ALIGNMENT_CHECK,
);
// initialize the trap entry for all irq number
for i in 0..256 {
let p = unsafe { __vectors[i] };
// set gate type to 1110: 64 bit Interrupt Gate, Present bit to 1, DPL to Ring 0
let p_low = (((p >> 16) & 0xFFFF) << 48) | (p & 0xFFFF);
let trap_entry_option: usize = 0b1000_1110_0000_0000;
let low = (trap_entry_option << 32) | ((kcs.0 as usize) << 16) | p_low;
let high = p >> 32;
unsafe {
IDT.entries[i] = [low, high];
}
}
unsafe {
lidt(&DescriptorTablePointer {
limit: size_of_val(&IDT) as u16 - 1,
base: &IDT as *const _ as _,
})
trapframe::init();
}
}

View File

@ -1,126 +0,0 @@
.data
.align 4
TSS:
.space 104
.text
.macro save
push r11
push r10
push r9
push r8
push rdi
push rsi
push rdx
push rcx
push rax
.endm
.macro restore
pop rax
pop rcx
pop rdx
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
.endm
.global __trap_entry
__trap_entry:
#
push r15
push r14
push r13
push r12
push rbp
push rbx
# mov rdi, 0
push rdi
save
# save cr2
mov rdi, cr2
push rdi
# trap_handler
mov rdi, rsp
call trap_handler
__trap_return:
# judge whether the trap from kernel mode
mov rax, [rsp + 160] # 160 = offsetof(TrapFrame, cs)
and rax, 0x3
jz __from_kernel
lea rax, [rsp + 192] # prepare new TSS.sp0, 192 = sizeof(TrapFrame)
mov [TSS + rip + 4], rax
__from_kernel:
add rsp, 8 # skip cr2
restore
add rsp,8 # skip rsp in callee
pop rbx
pop rbp
pop r12
pop r13
pop r14
pop r15
add rsp, 16 # skip TrapFrame.err and id
iretq
.global syscall_entry
syscall_entry:
# syscall instruction do:
# - load cs, ss from STAR MSR
# - r11 <- rflags, mask rflags from RFMASK MSR
# - rcx <- rip, load rip from LSTAR MSR
# temporarily store user rsp into TSS.sp0 and load kernel rsp from it.
xchg rsp, [TSS + rip + 4]
push r15
push r14
push r13
push r12
push rbp
push rbx
push [TSS + rip + 4] # store user rsp into SyscallFrame.rsp
save
mov rdi, rsp
call syscall_handler
mov [rsp], rax # CallerRegs.rax is at offset 0
jmp __syscall_return
.global syscall_return
syscall_return: # (SyscallFrame *)
mov rsp, rdi
__syscall_return:
lea rax, [rsp + 128] # prepare new TSS.sp0, 128 = sizeof(SyscallFrame)
# store the rsp in TSS
mov [TSS + rip + 4], rax
restore
mov rbx, [rsp + 8]
mov rbp, [rsp + 16]
mov r12, [rsp + 24]
mov r13, [rsp + 32]
mov r14, [rsp + 40]
mov r15, [rsp + 48]
mov rsp, [rsp + 0]
sysretq
.global syscall_switch_to_user_space
syscall_switch_to_user_space: # (cpu_context: *CpuContext,reg: *SyscallFrame)
# mov rflag, [rdi+136]
mov rdi, rsi
jmp syscall_return
.global trap_switch_to_user_space
trap_switch_to_user_space: # (cpu_context: *CpuContext,reg: *TrapFrame)
# mov rflag, [rdi+136]
mov rdi, rsi
mov rsp, rdi
jmp __trap_return

File diff suppressed because it is too large Load Diff

View File

@ -1,24 +1,16 @@
//! User space.
use crate::x86_64_util::{rdfsbase, wrfsbase};
use crate::trap::call_irq_callback_functions;
use crate::x86_64_util::{self, rdfsbase, wrfsbase};
use log::debug;
use trapframe::{TrapFrame, UserContext};
use x86_64::registers::rflags::RFlags;
use crate::cpu::CpuContext;
use crate::prelude::*;
use crate::task::{context_switch, Task, TaskContext, SWITCH_TO_USER_SPACE_TASK};
use crate::trap::{SyscallFrame, TrapFrame};
use crate::task::Task;
use crate::vm::VmSpace;
extern "C" {
pub(crate) fn syscall_switch_to_user_space(
cpu_context: &CpuContext,
syscall_frame: &SyscallFrame,
);
/// cpu_context may delete in the future
pub(crate) fn trap_switch_to_user_space(cpu_context: &CpuContext, trap_frame: &TrapFrame);
}
/// A user space.
///
/// Each user space has a VM address space and allows a task to execute in
@ -86,6 +78,7 @@ pub struct UserMode<'a> {
current: Arc<Task>,
user_space: &'a Arc<UserSpace>,
context: CpuContext,
user_context: UserContext,
executed: bool,
}
@ -99,6 +92,7 @@ impl<'a> UserMode<'a> {
user_space,
context: CpuContext::default(),
executed: false,
user_context: UserContext::default(),
}
}
@ -116,12 +110,10 @@ impl<'a> UserMode<'a> {
self.user_space.vm_space().activate();
}
if !self.executed {
*self.current.syscall_frame() = self.user_space.cpu_ctx.into();
self.context = self.user_space.cpu_ctx;
if self.context.gp_regs.rflag == 0 {
self.context.gp_regs.rflag = (RFlags::INTERRUPT_FLAG | RFlags::ID).bits();
self.context.gp_regs.rflag = (RFlags::INTERRUPT_FLAG | RFlags::ID).bits() | 0x2;
}
self.current.syscall_frame().caller.r11 = self.context.gp_regs.rflag;
self.current.syscall_frame().caller.rcx = self.user_space.cpu_ctx.gp_regs.rip;
// write fsbase
wrfsbase(self.user_space.cpu_ctx.fs_base);
let fp_regs = self.user_space.cpu_ctx.fp_regs;
@ -130,15 +122,6 @@ impl<'a> UserMode<'a> {
}
self.executed = true;
} else {
if self.current.inner_exclusive_access().is_from_trap {
*self.current.trap_frame() = self.context.into();
} else {
*self.current.syscall_frame() = self.context.into();
self.context.gp_regs.rflag |= RFlags::INTERRUPT_FLAG.bits();
self.current.syscall_frame().caller.r11 = self.context.gp_regs.rflag;
self.current.syscall_frame().caller.rcx = self.context.gp_regs.rip;
}
// write fsbase
if rdfsbase() != self.context.fs_base {
debug!("write fsbase: 0x{:x}", self.context.fs_base);
@ -151,26 +134,44 @@ impl<'a> UserMode<'a> {
// fp_regs.restore();
// }
}
let mut current_task_inner = self.current.inner_exclusive_access();
let binding = SWITCH_TO_USER_SPACE_TASK.get();
let next_task_inner = binding.inner_exclusive_access();
let current_ctx = &mut current_task_inner.ctx as *mut TaskContext;
let next_ctx = &next_task_inner.ctx as *const TaskContext;
drop(current_task_inner);
drop(next_task_inner);
drop(binding);
unsafe {
context_switch(current_ctx, next_ctx);
// switch_to_user_space(&self.user_space.cpu_ctx, self.current.syscall_frame());
self.user_context = self.context.into();
self.user_context.run();
let mut trap_frame;
while self.user_context.trap_num >= 0x20 && self.user_context.trap_num < 0x100 {
trap_frame = TrapFrame {
rax: self.user_context.general.rax,
rbx: self.user_context.general.rbx,
rcx: self.user_context.general.rcx,
rdx: self.user_context.general.rdx,
rsi: self.user_context.general.rsi,
rdi: self.user_context.general.rdi,
rbp: self.user_context.general.rbp,
rsp: self.user_context.general.rsp,
r8: self.user_context.general.r8,
r9: self.user_context.general.r9,
r10: self.user_context.general.r10,
r11: self.user_context.general.r11,
r12: self.user_context.general.r12,
r13: self.user_context.general.r13,
r14: self.user_context.general.r14,
r15: self.user_context.general.r15,
_pad: 0,
trap_num: self.user_context.trap_num,
error_code: self.user_context.error_code,
rip: self.user_context.general.rip,
cs: 0,
rflags: self.user_context.general.rflags,
};
call_irq_callback_functions(&mut trap_frame);
self.user_context.run();
}
if self.current.inner_exclusive_access().is_from_trap {
self.context = CpuContext::from(*self.current.trap_frame());
x86_64::instructions::interrupts::enable();
self.context = CpuContext::from(self.user_context);
if self.user_context.trap_num != 0x100 {
self.context.fs_base = rdfsbase();
// self.context.fp_regs.save();
UserEvent::Exception
} else {
self.context = CpuContext::from(*self.current.syscall_frame());
self.context.fs_base = rdfsbase();
// self.context.fp_regs.save();
// debug!("[kernel] syscall id:{}", self.context.gp_regs.rax);

View File

@ -31,6 +31,16 @@ impl VmFrameVec {
pub fn allocate(options: &VmAllocOptions) -> Result<Self> {
let page_size = options.page_size;
let mut frame_list = Vec::new();
if options.is_contiguous {
if options.paddr.is_some() {
panic!("not support contiguous paddr");
}
let frames = VmFrame::alloc_continuous(options.page_size);
if frames.is_none() {
return Err(Error::NoMemory);
}
return Ok(Self(frames.unwrap()));
}
for i in 0..page_size {
let vm_frame = if let Some(paddr) = options.paddr {
VmFrame::alloc_with_paddr(paddr + i * PAGE_SIZE)
@ -204,6 +214,7 @@ impl<'a> Iterator for VmFrameVecIter<'a> {
pub struct VmAllocOptions {
page_size: usize,
paddr: Option<Paddr>,
is_contiguous: bool,
}
impl VmAllocOptions {
@ -212,6 +223,7 @@ impl VmAllocOptions {
Self {
page_size: len,
paddr: None,
is_contiguous: false,
}
}
@ -232,7 +244,8 @@ impl VmAllocOptions {
///
/// The default value is `false`.
pub fn is_contiguous(&mut self, is_contiguous: bool) -> &mut Self {
todo!()
self.is_contiguous = is_contiguous;
self
}
/// Sets whether the pages can be accessed by devices through
@ -291,6 +304,21 @@ impl VmFrame {
})
}
/// Allocate contiguous VmFrame
pub(crate) fn alloc_continuous(frame_count: usize) -> Option<Vec<Self>> {
let phys = PhysFrame::alloc_continuous_range(frame_count);
if phys.is_none() {
return None;
}
let mut res = Vec::new();
for i in phys.unwrap() {
res.push(Self {
physical_frame: Arc::new(i),
})
}
Some(res)
}
/// Allocate a new VmFrame filled with zero
pub(crate) fn alloc_zero() -> Option<Self> {
let phys = PhysFrame::alloc_zero();

View File

@ -46,7 +46,7 @@ impl VirtioInputDevice {
fn handle_input(frame: &TrapFrame) {
debug!("in handle input");
let input_component = crate::INPUT_COMPONENT.get().unwrap();
input_component.call(frame.id as u8);
input_component.call(frame.trap_num as u8);
}
fn config_space_change(_: &TrapFrame) {
debug!("input device config space change");