mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-11 06:16:49 +00:00
Extract x86-specific trap handler code
This commit is contained in:
parent
28b66c1c8c
commit
fa7d0a787f
@ -14,6 +14,7 @@ pub mod task;
|
|||||||
#[cfg(feature = "intel_tdx")]
|
#[cfg(feature = "intel_tdx")]
|
||||||
pub(crate) mod tdx_guest;
|
pub(crate) mod tdx_guest;
|
||||||
pub mod timer;
|
pub mod timer;
|
||||||
|
pub mod trap;
|
||||||
|
|
||||||
use core::{arch::x86_64::_rdtsc, sync::atomic::Ordering};
|
use core::{arch::x86_64::_rdtsc, sync::atomic::Ordering};
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@ use tdx_guest::{
|
|||||||
tdvmcall::{cpuid, hlt, map_gpa, rdmsr, read_mmio, write_mmio, wrmsr, IoSize},
|
tdvmcall::{cpuid, hlt, map_gpa, rdmsr, read_mmio, write_mmio, wrmsr, IoSize},
|
||||||
TdxVirtualExceptionType,
|
TdxVirtualExceptionType,
|
||||||
};
|
};
|
||||||
|
use trapframe::TrapFrame;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::mm::PageTableFlags,
|
arch::mm::PageTableFlags,
|
||||||
@ -470,3 +471,103 @@ pub unsafe fn protect_gpa_range(gpa: TdxGpa, page_num: usize) -> Result<(), Page
|
|||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "intel_tdx")]
|
||||||
|
impl TdxTrapFrame for TrapFrame {
|
||||||
|
fn rax(&self) -> usize {
|
||||||
|
self.rax
|
||||||
|
}
|
||||||
|
fn set_rax(&mut self, rax: usize) {
|
||||||
|
self.rax = rax;
|
||||||
|
}
|
||||||
|
fn rbx(&self) -> usize {
|
||||||
|
self.rbx
|
||||||
|
}
|
||||||
|
fn set_rbx(&mut self, rbx: usize) {
|
||||||
|
self.rbx = rbx;
|
||||||
|
}
|
||||||
|
fn rcx(&self) -> usize {
|
||||||
|
self.rcx
|
||||||
|
}
|
||||||
|
fn set_rcx(&mut self, rcx: usize) {
|
||||||
|
self.rcx = rcx;
|
||||||
|
}
|
||||||
|
fn rdx(&self) -> usize {
|
||||||
|
self.rdx
|
||||||
|
}
|
||||||
|
fn set_rdx(&mut self, rdx: usize) {
|
||||||
|
self.rdx = rdx;
|
||||||
|
}
|
||||||
|
fn rsi(&self) -> usize {
|
||||||
|
self.rsi
|
||||||
|
}
|
||||||
|
fn set_rsi(&mut self, rsi: usize) {
|
||||||
|
self.rsi = rsi;
|
||||||
|
}
|
||||||
|
fn rdi(&self) -> usize {
|
||||||
|
self.rdi
|
||||||
|
}
|
||||||
|
fn set_rdi(&mut self, rdi: usize) {
|
||||||
|
self.rdi = rdi;
|
||||||
|
}
|
||||||
|
fn rip(&self) -> usize {
|
||||||
|
self.rip
|
||||||
|
}
|
||||||
|
fn set_rip(&mut self, rip: usize) {
|
||||||
|
self.rip = rip;
|
||||||
|
}
|
||||||
|
fn r8(&self) -> usize {
|
||||||
|
self.r8
|
||||||
|
}
|
||||||
|
fn set_r8(&mut self, r8: usize) {
|
||||||
|
self.r8 = r8;
|
||||||
|
}
|
||||||
|
fn r9(&self) -> usize {
|
||||||
|
self.r9
|
||||||
|
}
|
||||||
|
fn set_r9(&mut self, r9: usize) {
|
||||||
|
self.r9 = r9;
|
||||||
|
}
|
||||||
|
fn r10(&self) -> usize {
|
||||||
|
self.r10
|
||||||
|
}
|
||||||
|
fn set_r10(&mut self, r10: usize) {
|
||||||
|
self.r10 = r10;
|
||||||
|
}
|
||||||
|
fn r11(&self) -> usize {
|
||||||
|
self.r11
|
||||||
|
}
|
||||||
|
fn set_r11(&mut self, r11: usize) {
|
||||||
|
self.r11 = r11;
|
||||||
|
}
|
||||||
|
fn r12(&self) -> usize {
|
||||||
|
self.r12
|
||||||
|
}
|
||||||
|
fn set_r12(&mut self, r12: usize) {
|
||||||
|
self.r12 = r12;
|
||||||
|
}
|
||||||
|
fn r13(&self) -> usize {
|
||||||
|
self.r13
|
||||||
|
}
|
||||||
|
fn set_r13(&mut self, r13: usize) {
|
||||||
|
self.r13 = r13;
|
||||||
|
}
|
||||||
|
fn r14(&self) -> usize {
|
||||||
|
self.r14
|
||||||
|
}
|
||||||
|
fn set_r14(&mut self, r14: usize) {
|
||||||
|
self.r14 = r14;
|
||||||
|
}
|
||||||
|
fn r15(&self) -> usize {
|
||||||
|
self.r15
|
||||||
|
}
|
||||||
|
fn set_r15(&mut self, r15: usize) {
|
||||||
|
self.r15 = r15;
|
||||||
|
}
|
||||||
|
fn rbp(&self) -> usize {
|
||||||
|
self.rbp
|
||||||
|
}
|
||||||
|
fn set_rbp(&mut self, rbp: usize) {
|
||||||
|
self.rbp = rbp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
107
framework/aster-frame/src/arch/x86/trap.rs
Normal file
107
framework/aster-frame/src/arch/x86/trap.rs
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
use align_ext::AlignExt;
|
||||||
|
use log::debug;
|
||||||
|
#[cfg(feature = "intel_tdx")]
|
||||||
|
use tdx_guest::tdcall;
|
||||||
|
use trapframe::TrapFrame;
|
||||||
|
|
||||||
|
#[cfg(feature = "intel_tdx")]
|
||||||
|
use crate::arch::{
|
||||||
|
cpu::VIRTUALIZATION_EXCEPTION,
|
||||||
|
mm::PageTableFlags,
|
||||||
|
tdx_guest::{handle_virtual_exception, TdxTrapFrame},
|
||||||
|
};
|
||||||
|
use crate::{
|
||||||
|
cpu::{CpuException, PageFaultErrorCode, PAGE_FAULT},
|
||||||
|
trap::call_irq_callback_functions,
|
||||||
|
vm::{
|
||||||
|
kspace::{KERNEL_PAGE_TABLE, LINEAR_MAPPING_BASE_VADDR, LINEAR_MAPPING_VADDR_RANGE},
|
||||||
|
page_prop::{CachePolicy, PageProperty},
|
||||||
|
PageFlags, PrivilegedPageFlags as PrivFlags, PAGE_SIZE,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Only from kernel
|
||||||
|
#[no_mangle]
|
||||||
|
extern "sysv64" fn trap_handler(f: &mut TrapFrame) {
|
||||||
|
if CpuException::is_cpu_exception(f.trap_num as u16) {
|
||||||
|
match CpuException::to_cpu_exception(f.trap_num as u16).unwrap() {
|
||||||
|
#[cfg(feature = "intel_tdx")]
|
||||||
|
&VIRTUALIZATION_EXCEPTION => {
|
||||||
|
let ve_info = tdcall::get_veinfo().expect("#VE handler: fail to get VE info\n");
|
||||||
|
handle_virtual_exception(f, &ve_info);
|
||||||
|
}
|
||||||
|
&PAGE_FAULT => {
|
||||||
|
handle_kernel_page_fault(f);
|
||||||
|
}
|
||||||
|
exception => {
|
||||||
|
panic!(
|
||||||
|
"Cannot handle kernel cpu exception:{:?}. Error code:{:x?}; Trapframe:{:#x?}.",
|
||||||
|
exception, f.error_code, f
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
call_irq_callback_functions(f);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// FIXME: this is a hack because we don't allocate kernel space for IO memory. We are currently
|
||||||
|
/// using the linear mapping for IO memory. This is not a good practice.
|
||||||
|
fn handle_kernel_page_fault(f: &TrapFrame) {
|
||||||
|
let page_fault_vaddr = x86_64::registers::control::Cr2::read().as_u64();
|
||||||
|
let error_code = PageFaultErrorCode::from_bits_truncate(f.error_code);
|
||||||
|
debug!(
|
||||||
|
"kernel page fault: address {:?}, error code {:?}",
|
||||||
|
page_fault_vaddr as *const (), error_code
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
LINEAR_MAPPING_VADDR_RANGE.contains(&(page_fault_vaddr as usize)),
|
||||||
|
"kernel page fault: the address is outside the range of the linear mapping",
|
||||||
|
);
|
||||||
|
|
||||||
|
const SUPPORTED_ERROR_CODES: PageFaultErrorCode = PageFaultErrorCode::PRESENT
|
||||||
|
.union(PageFaultErrorCode::WRITE)
|
||||||
|
.union(PageFaultErrorCode::INSTRUCTION);
|
||||||
|
assert!(
|
||||||
|
SUPPORTED_ERROR_CODES.contains(error_code),
|
||||||
|
"kernel page fault: the error code is not supported",
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
!error_code.contains(PageFaultErrorCode::INSTRUCTION),
|
||||||
|
"kernel page fault: the direct mapping cannot be executed",
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
!error_code.contains(PageFaultErrorCode::PRESENT),
|
||||||
|
"kernel page fault: the direct mapping already exists",
|
||||||
|
);
|
||||||
|
|
||||||
|
// Do the mapping
|
||||||
|
let page_table = KERNEL_PAGE_TABLE
|
||||||
|
.get()
|
||||||
|
.expect("kernel page fault: the kernel page table is not initialized");
|
||||||
|
let vaddr = (page_fault_vaddr as usize).align_down(PAGE_SIZE);
|
||||||
|
let paddr = vaddr - LINEAR_MAPPING_BASE_VADDR;
|
||||||
|
|
||||||
|
// SAFETY:
|
||||||
|
// 1. We have checked that the page fault address falls within the address range of the direct
|
||||||
|
// mapping of physical memory.
|
||||||
|
// 2. We map the address to the correct physical page with the correct flags, where the
|
||||||
|
// correctness follows the semantics of the direct mapping of physical memory.
|
||||||
|
unsafe {
|
||||||
|
page_table
|
||||||
|
.map(
|
||||||
|
&(vaddr..vaddr + PAGE_SIZE),
|
||||||
|
&(paddr..paddr + PAGE_SIZE),
|
||||||
|
PageProperty {
|
||||||
|
flags: PageFlags::RW,
|
||||||
|
cache: CachePolicy::Uncacheable,
|
||||||
|
priv_flags: PrivFlags::GLOBAL,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
}
|
@ -2,153 +2,9 @@
|
|||||||
|
|
||||||
use core::sync::atomic::{AtomicBool, Ordering};
|
use core::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
|
||||||
use align_ext::AlignExt;
|
|
||||||
use log::debug;
|
|
||||||
#[cfg(feature = "intel_tdx")]
|
|
||||||
use tdx_guest::tdcall;
|
|
||||||
use trapframe::TrapFrame;
|
use trapframe::TrapFrame;
|
||||||
|
|
||||||
#[cfg(feature = "intel_tdx")]
|
use crate::{arch::irq::IRQ_LIST, cpu::CpuException, cpu_local};
|
||||||
use crate::arch::{
|
|
||||||
cpu::VIRTUALIZATION_EXCEPTION,
|
|
||||||
mm::PageTableFlags,
|
|
||||||
tdx_guest::{handle_virtual_exception, TdxTrapFrame},
|
|
||||||
};
|
|
||||||
use crate::{
|
|
||||||
arch::irq::IRQ_LIST,
|
|
||||||
cpu::{CpuException, PageFaultErrorCode, PAGE_FAULT},
|
|
||||||
cpu_local,
|
|
||||||
vm::{
|
|
||||||
kspace::{KERNEL_PAGE_TABLE, LINEAR_MAPPING_BASE_VADDR, LINEAR_MAPPING_VADDR_RANGE},
|
|
||||||
page_prop::{CachePolicy, PageProperty},
|
|
||||||
PageFlags, PrivilegedPageFlags as PrivFlags, PAGE_SIZE,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[cfg(feature = "intel_tdx")]
|
|
||||||
impl TdxTrapFrame for TrapFrame {
|
|
||||||
fn rax(&self) -> usize {
|
|
||||||
self.rax
|
|
||||||
}
|
|
||||||
fn set_rax(&mut self, rax: usize) {
|
|
||||||
self.rax = rax;
|
|
||||||
}
|
|
||||||
fn rbx(&self) -> usize {
|
|
||||||
self.rbx
|
|
||||||
}
|
|
||||||
fn set_rbx(&mut self, rbx: usize) {
|
|
||||||
self.rbx = rbx;
|
|
||||||
}
|
|
||||||
fn rcx(&self) -> usize {
|
|
||||||
self.rcx
|
|
||||||
}
|
|
||||||
fn set_rcx(&mut self, rcx: usize) {
|
|
||||||
self.rcx = rcx;
|
|
||||||
}
|
|
||||||
fn rdx(&self) -> usize {
|
|
||||||
self.rdx
|
|
||||||
}
|
|
||||||
fn set_rdx(&mut self, rdx: usize) {
|
|
||||||
self.rdx = rdx;
|
|
||||||
}
|
|
||||||
fn rsi(&self) -> usize {
|
|
||||||
self.rsi
|
|
||||||
}
|
|
||||||
fn set_rsi(&mut self, rsi: usize) {
|
|
||||||
self.rsi = rsi;
|
|
||||||
}
|
|
||||||
fn rdi(&self) -> usize {
|
|
||||||
self.rdi
|
|
||||||
}
|
|
||||||
fn set_rdi(&mut self, rdi: usize) {
|
|
||||||
self.rdi = rdi;
|
|
||||||
}
|
|
||||||
fn rip(&self) -> usize {
|
|
||||||
self.rip
|
|
||||||
}
|
|
||||||
fn set_rip(&mut self, rip: usize) {
|
|
||||||
self.rip = rip;
|
|
||||||
}
|
|
||||||
fn r8(&self) -> usize {
|
|
||||||
self.r8
|
|
||||||
}
|
|
||||||
fn set_r8(&mut self, r8: usize) {
|
|
||||||
self.r8 = r8;
|
|
||||||
}
|
|
||||||
fn r9(&self) -> usize {
|
|
||||||
self.r9
|
|
||||||
}
|
|
||||||
fn set_r9(&mut self, r9: usize) {
|
|
||||||
self.r9 = r9;
|
|
||||||
}
|
|
||||||
fn r10(&self) -> usize {
|
|
||||||
self.r10
|
|
||||||
}
|
|
||||||
fn set_r10(&mut self, r10: usize) {
|
|
||||||
self.r10 = r10;
|
|
||||||
}
|
|
||||||
fn r11(&self) -> usize {
|
|
||||||
self.r11
|
|
||||||
}
|
|
||||||
fn set_r11(&mut self, r11: usize) {
|
|
||||||
self.r11 = r11;
|
|
||||||
}
|
|
||||||
fn r12(&self) -> usize {
|
|
||||||
self.r12
|
|
||||||
}
|
|
||||||
fn set_r12(&mut self, r12: usize) {
|
|
||||||
self.r12 = r12;
|
|
||||||
}
|
|
||||||
fn r13(&self) -> usize {
|
|
||||||
self.r13
|
|
||||||
}
|
|
||||||
fn set_r13(&mut self, r13: usize) {
|
|
||||||
self.r13 = r13;
|
|
||||||
}
|
|
||||||
fn r14(&self) -> usize {
|
|
||||||
self.r14
|
|
||||||
}
|
|
||||||
fn set_r14(&mut self, r14: usize) {
|
|
||||||
self.r14 = r14;
|
|
||||||
}
|
|
||||||
fn r15(&self) -> usize {
|
|
||||||
self.r15
|
|
||||||
}
|
|
||||||
fn set_r15(&mut self, r15: usize) {
|
|
||||||
self.r15 = r15;
|
|
||||||
}
|
|
||||||
fn rbp(&self) -> usize {
|
|
||||||
self.rbp
|
|
||||||
}
|
|
||||||
fn set_rbp(&mut self, rbp: usize) {
|
|
||||||
self.rbp = rbp;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Only from kernel
|
|
||||||
#[no_mangle]
|
|
||||||
extern "sysv64" fn trap_handler(f: &mut TrapFrame) {
|
|
||||||
if CpuException::is_cpu_exception(f.trap_num as u16) {
|
|
||||||
match CpuException::to_cpu_exception(f.trap_num as u16).unwrap() {
|
|
||||||
#[cfg(feature = "intel_tdx")]
|
|
||||||
&VIRTUALIZATION_EXCEPTION => {
|
|
||||||
let ve_info = tdcall::get_veinfo().expect("#VE handler: fail to get VE info\n");
|
|
||||||
handle_virtual_exception(f, &ve_info);
|
|
||||||
}
|
|
||||||
&PAGE_FAULT => {
|
|
||||||
handle_kernel_page_fault(f);
|
|
||||||
}
|
|
||||||
exception => {
|
|
||||||
panic!(
|
|
||||||
"Cannot handle kernel cpu exception:{:?}. Error code:{:x?}; Trapframe:{:#x?}.",
|
|
||||||
exception, f.error_code, f
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
call_irq_callback_functions(f);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn call_irq_callback_functions(trap_frame: &TrapFrame) {
|
pub(crate) fn call_irq_callback_functions(trap_frame: &TrapFrame) {
|
||||||
// For x86 CPUs, interrupts are not re-entrant. Local interrupts will be disabled when
|
// For x86 CPUs, interrupts are not re-entrant. Local interrupts will be disabled when
|
||||||
@ -180,62 +36,3 @@ cpu_local! {
|
|||||||
pub fn in_interrupt_context() -> bool {
|
pub fn in_interrupt_context() -> bool {
|
||||||
IN_INTERRUPT_CONTEXT.load(Ordering::Acquire)
|
IN_INTERRUPT_CONTEXT.load(Ordering::Acquire)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// FIXME: this is a hack because we don't allocate kernel space for IO memory. We are currently
|
|
||||||
/// using the linear mapping for IO memory. This is not a good practice.
|
|
||||||
fn handle_kernel_page_fault(f: &TrapFrame) {
|
|
||||||
let page_fault_vaddr = x86_64::registers::control::Cr2::read().as_u64();
|
|
||||||
let error_code = PageFaultErrorCode::from_bits_truncate(f.error_code);
|
|
||||||
debug!(
|
|
||||||
"kernel page fault: address {:?}, error code {:?}",
|
|
||||||
page_fault_vaddr as *const (), error_code
|
|
||||||
);
|
|
||||||
|
|
||||||
assert!(
|
|
||||||
LINEAR_MAPPING_VADDR_RANGE.contains(&(page_fault_vaddr as usize)),
|
|
||||||
"kernel page fault: the address is outside the range of the linear mapping",
|
|
||||||
);
|
|
||||||
|
|
||||||
const SUPPORTED_ERROR_CODES: PageFaultErrorCode = PageFaultErrorCode::PRESENT
|
|
||||||
.union(PageFaultErrorCode::WRITE)
|
|
||||||
.union(PageFaultErrorCode::INSTRUCTION);
|
|
||||||
assert!(
|
|
||||||
SUPPORTED_ERROR_CODES.contains(error_code),
|
|
||||||
"kernel page fault: the error code is not supported",
|
|
||||||
);
|
|
||||||
|
|
||||||
assert!(
|
|
||||||
!error_code.contains(PageFaultErrorCode::INSTRUCTION),
|
|
||||||
"kernel page fault: the direct mapping cannot be executed",
|
|
||||||
);
|
|
||||||
assert!(
|
|
||||||
!error_code.contains(PageFaultErrorCode::PRESENT),
|
|
||||||
"kernel page fault: the direct mapping already exists",
|
|
||||||
);
|
|
||||||
|
|
||||||
// Do the mapping
|
|
||||||
let page_table = KERNEL_PAGE_TABLE
|
|
||||||
.get()
|
|
||||||
.expect("kernel page fault: the kernel page table is not initialized");
|
|
||||||
let vaddr = (page_fault_vaddr as usize).align_down(PAGE_SIZE);
|
|
||||||
let paddr = vaddr - LINEAR_MAPPING_BASE_VADDR;
|
|
||||||
|
|
||||||
// SAFETY:
|
|
||||||
// 1. We have checked that the page fault address falls within the address range of the direct
|
|
||||||
// mapping of physical memory.
|
|
||||||
// 2. We map the address to the correct physical page with the correct flags, where the
|
|
||||||
// correctness follows the semantics of the direct mapping of physical memory.
|
|
||||||
unsafe {
|
|
||||||
page_table
|
|
||||||
.map(
|
|
||||||
&(vaddr..vaddr + PAGE_SIZE),
|
|
||||||
&(paddr..paddr + PAGE_SIZE),
|
|
||||||
PageProperty {
|
|
||||||
flags: PageFlags::RW,
|
|
||||||
cache: CachePolicy::Uncacheable,
|
|
||||||
priv_flags: PrivFlags::GLOBAL,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user