Refactor the read/write operations to userspace

This commit is contained in:
Chen Chengjun
2024-06-28 09:39:51 +08:00
committed by Tate, Hongliang Tian
parent 09b6153d29
commit af908c29cf
7 changed files with 234 additions and 47 deletions

View File

@ -10,16 +10,18 @@ use log::debug;
use tdx_guest::tdcall;
use trapframe::TrapFrame;
use super::ex_table::ExTable;
#[cfg(feature = "intel_tdx")]
use crate::arch::{cpu::VIRTUALIZATION_EXCEPTION, tdx_guest::handle_virtual_exception};
use crate::{
cpu::{CpuException, PageFaultErrorCode, PAGE_FAULT},
cpu::{CpuException, CpuExceptionInfo, PageFaultErrorCode, PAGE_FAULT},
cpu_local,
mm::{
kspace::{KERNEL_PAGE_TABLE, LINEAR_MAPPING_BASE_VADDR, LINEAR_MAPPING_VADDR_RANGE},
page_prop::{CachePolicy, PageProperty},
PageFlags, PrivilegedPageFlags as PrivFlags, PAGE_SIZE,
PageFlags, PrivilegedPageFlags as PrivFlags, MAX_USERSPACE_VADDR, PAGE_SIZE,
},
task::current_task,
trap::call_irq_callback_functions,
};
@ -45,7 +47,14 @@ extern "sysv64" fn trap_handler(f: &mut TrapFrame) {
handle_virtual_exception(f, &ve_info);
}
&PAGE_FAULT => {
handle_kernel_page_fault(f);
let page_fault_addr = x86_64::registers::control::Cr2::read().as_u64();
// The actual user space implementation should be responsible
// for providing mechanism to treat the 0 virtual address.
if (0..MAX_USERSPACE_VADDR).contains(&(page_fault_addr as usize)) {
handle_user_page_fault(f, page_fault_addr);
} else {
handle_kernel_page_fault(f, page_fault_addr);
}
}
exception => {
panic!(
@ -61,10 +70,37 @@ extern "sysv64" fn trap_handler(f: &mut TrapFrame) {
}
}
/// Handles page fault from user space.
fn handle_user_page_fault(f: &mut TrapFrame, page_fault_addr: u64) {
let current_task = current_task().unwrap();
let user_space = current_task
.user_space()
.expect("the user space is missing when a page fault from the user happens.");
let info = CpuExceptionInfo {
page_fault_addr: page_fault_addr as usize,
id: f.trap_num,
error_code: f.error_code,
};
let res = user_space.vm_space().handle_page_fault(&info);
// Copying bytes by bytes can recover directly
// if handling the page fault successfully.
if res.is_ok() {
return;
}
// Use the exception table to recover to normal execution.
if let Some(addr) = ExTable::find_recovery_inst_addr(f.rip) {
f.rip = addr;
} else {
panic!("Cannot handle user page fault; Trapframe:{:#x?}.", f);
}
}
/// FIXME: this is a hack because we don't allocate kernel space for IO memory. We are currently
/// using the linear mapping for IO memory. This is not a good practice.
fn handle_kernel_page_fault(f: &TrapFrame) {
let page_fault_vaddr = x86_64::registers::control::Cr2::read().as_u64();
fn handle_kernel_page_fault(f: &TrapFrame, page_fault_vaddr: u64) {
let error_code = PageFaultErrorCode::from_bits_truncate(f.error_code);
debug!(
"kernel page fault: address {:?}, error code {:?}",

View File

@ -2,17 +2,22 @@
use core::ops::Range;
use spin::Once;
use super::{
io::UserSpace,
is_page_aligned,
kspace::KERNEL_PAGE_TABLE,
page_table::{PageTable, PageTableMode, UserMode},
CachePolicy, FrameVec, PageFlags, PageProperty, PagingConstsTrait, PrivilegedPageFlags,
PAGE_SIZE,
VmReader, VmWriter, PAGE_SIZE,
};
use crate::{
arch::mm::{
tlb_flush_addr_range, tlb_flush_all_excluding_global, PageTableEntry, PagingConsts,
current_page_table_paddr, tlb_flush_addr_range, tlb_flush_all_excluding_global,
PageTableEntry, PagingConsts,
},
cpu::CpuExceptionInfo,
mm::{
page_table::{Cursor, PageTableQueryResult as PtQr},
Frame, MAX_USERSPACE_VADDR,
@ -26,15 +31,22 @@ use crate::{
/// A virtual memory space (`VmSpace`) can be created and assigned to a user space so that
/// the virtual memory of the user space can be manipulated safely. For example,
/// given an arbitrary user-space pointer, one can read and write the memory
/// location refered to by the user-space pointer without the risk of breaking the
/// location referred to by the user-space pointer without the risk of breaking the
/// memory safety of the kernel space.
///
/// A newly-created `VmSpace` is not backed by any physical memory pages.
/// To provide memory pages for a `VmSpace`, one can allocate and map
/// physical memory ([`Frame`]s) to the `VmSpace`.
#[derive(Debug)]
///
/// A `VmSpace` can also attach a page fault handler, which will be invoked to handle
/// page faults generated from user space.
///
/// A `VmSpace` can also attach a page fault handler, which will be invoked to handle
/// page faults generated from user space.
#[allow(clippy::type_complexity)]
pub struct VmSpace {
pt: PageTable<UserMode>,
page_fault_handler: Once<fn(&VmSpace, &CpuExceptionInfo) -> core::result::Result<(), ()>>,
}
// Notes on TLB flushing:
@ -51,6 +63,7 @@ impl VmSpace {
pub fn new() -> Self {
Self {
pt: KERNEL_PAGE_TABLE.get().unwrap().create_user_page_table(),
page_fault_handler: Once::new(),
}
}
@ -59,6 +72,27 @@ impl VmSpace {
self.pt.activate();
}
pub(crate) fn handle_page_fault(
&self,
info: &CpuExceptionInfo,
) -> core::result::Result<(), ()> {
if let Some(func) = self.page_fault_handler.get() {
return func(self, info);
}
Err(())
}
/// Registers the page fault handler in this `VmSpace`.
///
/// The page fault handler of a `VmSpace` can only be initialized once.
/// If it has been initialized before, calling this method will have no effect.
pub fn register_page_fault_handler(
&self,
func: fn(&VmSpace, &CpuExceptionInfo) -> core::result::Result<(), ()>,
) {
self.page_fault_handler.call_once(|| func);
}
/// Maps some physical memory pages into the VM space according to the given
/// options, returning the address where the mapping is created.
///
@ -116,7 +150,7 @@ impl VmSpace {
}
/// Queries about a range of virtual memory.
/// You will get a iterator of `VmQueryResult` which contains the information of
/// You will get an iterator of `VmQueryResult` which contains the information of
/// each parts of the range.
pub fn query_range(&self, range: &Range<Vaddr>) -> Result<VmQueryIter> {
Ok(VmQueryIter {
@ -202,12 +236,62 @@ impl VmSpace {
/// read-only. And both the VM space will take handles to the same
/// physical memory pages.
pub fn fork_copy_on_write(&self) -> Self {
let page_fault_handler = {
let new_handler = Once::new();
if let Some(handler) = self.page_fault_handler.get() {
new_handler.call_once(|| *handler);
}
new_handler
};
let new_space = Self {
pt: self.pt.fork_copy_on_write(),
page_fault_handler,
};
tlb_flush_all_excluding_global();
new_space
}
/// Creates a reader to read data from the user space of the current task.
///
/// Returns `Err` if this `VmSpace` is not belonged to the user space of the current task
/// or the `vaddr` and `len` do not represent a user space memory range.
pub fn reader(&self, vaddr: Vaddr, len: usize) -> Result<VmReader<'_, UserSpace>> {
if current_page_table_paddr() != unsafe { self.pt.root_paddr() } {
return Err(Error::AccessDenied);
}
if vaddr.checked_add(len).unwrap_or(usize::MAX) > MAX_USERSPACE_VADDR {
return Err(Error::AccessDenied);
}
// SAFETY: As long as the current task owns user space, the page table of
// the current task will be activated during the execution of the current task.
// Since `VmReader` is neither `Sync` nor `Send`, it will not live longer than
// the current task. Hence, it is ensured that the correct page table
// is activated during the usage period of the `VmReader`.
Ok(unsafe { VmReader::<UserSpace>::from_user_space(vaddr as *const u8, len) })
}
/// Creates a writer to write data into the user space.
///
/// Returns `Err` if this `VmSpace` is not belonged to the user space of the current task
/// or the `vaddr` and `len` do not represent a user space memory range.
pub fn writer(&self, vaddr: Vaddr, len: usize) -> Result<VmWriter<'_, UserSpace>> {
if current_page_table_paddr() != unsafe { self.pt.root_paddr() } {
return Err(Error::AccessDenied);
}
if vaddr.checked_add(len).unwrap_or(usize::MAX) > MAX_USERSPACE_VADDR {
return Err(Error::AccessDenied);
}
// SAFETY: As long as the current task owns user space, the page table of
// the current task will be activated during the execution of the current task.
// Since `VmWriter` is neither `Sync` nor `Send`, it will not live longer than
// the current task. Hence, it is ensured that the correct page table
// is activated during the usage period of the `VmWriter`.
Ok(unsafe { VmWriter::<UserSpace>::from_user_space(vaddr as *mut u8, len) })
}
}
impl Default for VmSpace {