Use tdx-guest crate VE handler and support release mode for TDX

This commit is contained in:
Hsy-Intel 2024-08-09 18:56:43 +08:00 committed by Tate, Hongliang Tian
parent 7527d20d25
commit a739b3828d
7 changed files with 161 additions and 516 deletions

View File

@ -41,7 +41,7 @@ smoltcp = { version = "0.9.1", default-features = false, features = [
"socket-raw",
"socket-dhcpv4",
] }
tdx-guest = { version = "0.1.5", optional = true }
tdx-guest = { version = "0.1.7", optional = true }
# parse elf file
xmas-elf = "0.8.0"

View File

@ -8,7 +8,6 @@ use crate::{
error::Error,
events::IoEvents,
fs::{inode_handle::FileIo, utils::IoctlCmd},
prelude::*,
process::signal::Poller,
};

View File

@ -57,7 +57,7 @@ iced-x86 = { version = "1.21.0", default-features = false, features = [
"decoder",
"gas",
], optional = true }
tdx-guest = { version = "0.1.5", optional = true }
tdx-guest = { version = "0.1.7", optional = true }
[features]
default = ["cvm_guest", "log_color"]

View File

@ -23,8 +23,9 @@ use crate::{
cfg_if! {
if #[cfg(feature = "cvm_guest")] {
use tdx_guest::tdcall;
use crate::arch::tdx_guest::{handle_virtual_exception, TdxTrapFrame};
mod tdx;
use tdx::handle_virtualization_exception;
}
}
@ -49,106 +50,6 @@ pub struct CpuExceptionInfo {
pub page_fault_addr: usize,
}
#[cfg(feature = "cvm_guest")]
impl TdxTrapFrame for RawGeneralRegs {
fn rax(&self) -> usize {
self.rax
}
fn set_rax(&mut self, rax: usize) {
self.rax = rax;
}
fn rbx(&self) -> usize {
self.rbx
}
fn set_rbx(&mut self, rbx: usize) {
self.rbx = rbx;
}
fn rcx(&self) -> usize {
self.rcx
}
fn set_rcx(&mut self, rcx: usize) {
self.rcx = rcx;
}
fn rdx(&self) -> usize {
self.rdx
}
fn set_rdx(&mut self, rdx: usize) {
self.rdx = rdx;
}
fn rsi(&self) -> usize {
self.rsi
}
fn set_rsi(&mut self, rsi: usize) {
self.rsi = rsi;
}
fn rdi(&self) -> usize {
self.rdi
}
fn set_rdi(&mut self, rdi: usize) {
self.rdi = rdi;
}
fn rip(&self) -> usize {
self.rip
}
fn set_rip(&mut self, rip: usize) {
self.rip = rip;
}
fn r8(&self) -> usize {
self.r8
}
fn set_r8(&mut self, r8: usize) {
self.r8 = r8;
}
fn r9(&self) -> usize {
self.r9
}
fn set_r9(&mut self, r9: usize) {
self.r9 = r9;
}
fn r10(&self) -> usize {
self.r10
}
fn set_r10(&mut self, r10: usize) {
self.r10 = r10;
}
fn r11(&self) -> usize {
self.r11
}
fn set_r11(&mut self, r11: usize) {
self.r11 = r11;
}
fn r12(&self) -> usize {
self.r12
}
fn set_r12(&mut self, r12: usize) {
self.r12 = r12;
}
fn r13(&self) -> usize {
self.r13
}
fn set_r13(&mut self, r13: usize) {
self.r13 = r13;
}
fn r14(&self) -> usize {
self.r14
}
fn set_r14(&mut self, r14: usize) {
self.r14 = r14;
}
fn r15(&self) -> usize {
self.r15
}
fn set_r15(&mut self, r15: usize) {
self.r15 = r15;
}
fn rbp(&self) -> usize {
self.rbp
}
fn set_rbp(&mut self, rbp: usize) {
self.rbp = rbp;
}
}
/// User Preemption.
pub struct UserPreemption {
count: u32,
@ -222,9 +123,7 @@ impl UserContextApiInternal for UserContext {
Some(exception) => {
#[cfg(feature = "cvm_guest")]
if *exception == VIRTUALIZATION_EXCEPTION {
let ve_info =
tdcall::get_veinfo().expect("#VE handler: fail to get VE info\n");
handle_virtual_exception(self.general_regs_mut(), &ve_info);
handle_virtualization_exception(self);
continue;
}
if exception.typ == CpuExceptionType::FaultOrTrap

View File

@ -0,0 +1,113 @@
// SPDX-License-Identifier: MPL-2.0
use tdx_guest::{handle_virtual_exception as do_handle_virtual_exception, tdcall, TdxTrapFrame};
use crate::cpu::{RawGeneralRegs, UserContext};
pub(crate) fn handle_virtualization_exception(user_context: &mut UserContext) {
let ve_info = tdcall::get_veinfo().expect("#VE handler: fail to get VE info\n");
let mut generalrags_wrapper = GeneralRegsWrapper(&mut *user_context.general_regs_mut());
do_handle_virtual_exception(&mut generalrags_wrapper, &ve_info);
*user_context.general_regs_mut() = *generalrags_wrapper.0;
}
struct GeneralRegsWrapper<'a>(&'a mut RawGeneralRegs);
impl TdxTrapFrame for GeneralRegsWrapper<'_> {
fn rax(&self) -> usize {
self.0.rax
}
fn set_rax(&mut self, rax: usize) {
self.0.rax = rax;
}
fn rbx(&self) -> usize {
self.0.rbx
}
fn set_rbx(&mut self, rbx: usize) {
self.0.rbx = rbx;
}
fn rcx(&self) -> usize {
self.0.rcx
}
fn set_rcx(&mut self, rcx: usize) {
self.0.rcx = rcx;
}
fn rdx(&self) -> usize {
self.0.rdx
}
fn set_rdx(&mut self, rdx: usize) {
self.0.rdx = rdx;
}
fn rsi(&self) -> usize {
self.0.rsi
}
fn set_rsi(&mut self, rsi: usize) {
self.0.rsi = rsi;
}
fn rdi(&self) -> usize {
self.0.rdi
}
fn set_rdi(&mut self, rdi: usize) {
self.0.rdi = rdi;
}
fn rip(&self) -> usize {
self.0.rip
}
fn set_rip(&mut self, rip: usize) {
self.0.rip = rip;
}
fn r8(&self) -> usize {
self.0.r8
}
fn set_r8(&mut self, r8: usize) {
self.0.r8 = r8;
}
fn r9(&self) -> usize {
self.0.r9
}
fn set_r9(&mut self, r9: usize) {
self.0.r9 = r9;
}
fn r10(&self) -> usize {
self.0.r10
}
fn set_r10(&mut self, r10: usize) {
self.0.r10 = r10;
}
fn r11(&self) -> usize {
self.0.r11
}
fn set_r11(&mut self, r11: usize) {
self.0.r11 = r11;
}
fn r12(&self) -> usize {
self.0.r12
}
fn set_r12(&mut self, r12: usize) {
self.0.r12 = r12;
}
fn r13(&self) -> usize {
self.0.r13
}
fn set_r13(&mut self, r13: usize) {
self.0.r13 = r13;
}
fn r14(&self) -> usize {
self.0.r14
}
fn set_r14(&mut self, r14: usize) {
self.0.r14 = r14;
}
fn r15(&self) -> usize {
self.0.r15
}
fn set_r15(&mut self, r15: usize) {
self.0.r15 = r15;
}
fn rbp(&self) -> usize {
self.0.rbp
}
fn set_rbp(&mut self, rbp: usize) {
self.0.rbp = rbp;
}
}

View File

@ -1,19 +1,12 @@
// SPDX-License-Identifier: MPL-2.0
use iced_x86::{Code, Decoder, DecoderOptions, Instruction, Register};
use log::warn;
use tdx_guest::{
serial_println, tdcall,
tdcall::{accept_page, TdgVeInfo},
tdvmcall,
tdvmcall::{cpuid, hlt, map_gpa, rdmsr, read_mmio, write_mmio, wrmsr, IoSize},
TdxVirtualExceptionType,
};
use tdx_guest::{tdcall::accept_page, tdvmcall::map_gpa, TdxTrapFrame};
use trapframe::TrapFrame;
use crate::{
mm::{
kspace::{BOOT_PAGE_TABLE, KERNEL_BASE_VADDR, KERNEL_END_VADDR, KERNEL_PAGE_TABLE},
kspace::{BOOT_PAGE_TABLE, KERNEL_PAGE_TABLE},
paddr_to_vaddr,
page_prop::{PageProperty, PrivilegedPageFlags as PrivFlags},
PAGE_SIZE,
@ -24,62 +17,6 @@ use crate::{
const SHARED_BIT: u8 = 51;
const SHARED_MASK: u64 = 1u64 << SHARED_BIT;
// Intel TDX guest physical address. Maybe protected(private) gpa or unprotected(shared) gpa.
pub type TdxGpa = usize;
pub trait TdxTrapFrame {
fn rax(&self) -> usize;
fn set_rax(&mut self, rax: usize);
fn rbx(&self) -> usize;
fn set_rbx(&mut self, rbx: usize);
fn rcx(&self) -> usize;
fn set_rcx(&mut self, rcx: usize);
fn rdx(&self) -> usize;
fn set_rdx(&mut self, rdx: usize);
fn rsi(&self) -> usize;
fn set_rsi(&mut self, rsi: usize);
fn rdi(&self) -> usize;
fn set_rdi(&mut self, rdi: usize);
fn rip(&self) -> usize;
fn set_rip(&mut self, rip: usize);
fn r8(&self) -> usize;
fn set_r8(&mut self, r8: usize);
fn r9(&self) -> usize;
fn set_r9(&mut self, r9: usize);
fn r10(&self) -> usize;
fn set_r10(&mut self, r10: usize);
fn r11(&self) -> usize;
fn set_r11(&mut self, r11: usize);
fn r12(&self) -> usize;
fn set_r12(&mut self, r12: usize);
fn r13(&self) -> usize;
fn set_r13(&mut self, r13: usize);
fn r14(&self) -> usize;
fn set_r14(&mut self, r14: usize);
fn r15(&self) -> usize;
fn set_r15(&mut self, r15: usize);
fn rbp(&self) -> usize;
fn set_rbp(&mut self, rbp: usize);
}
enum InstrMmioType {
Write,
WriteImm,
Read,
ReadZeroExtend,
ReadSignExtend,
Movs,
}
#[derive(Debug)]
enum MmioError {
Unimplemented,
InvalidInstruction,
InvalidAddress,
DecodeFailed,
TdVmcallError,
}
#[derive(Debug)]
pub enum PageConvertError {
PageTable,
@ -87,313 +24,6 @@ pub enum PageConvertError {
TdVmcall,
}
pub fn handle_virtual_exception(trapframe: &mut dyn TdxTrapFrame, ve_info: &TdgVeInfo) {
let mut instr_len = ve_info.exit_instruction_length;
match ve_info.exit_reason.into() {
TdxVirtualExceptionType::Hlt => {
serial_println!("Ready to halt");
hlt();
}
TdxVirtualExceptionType::Io => {
if !handle_io(trapframe, ve_info) {
serial_println!("Handle tdx ioexit errors, ready to halt");
hlt();
}
}
TdxVirtualExceptionType::MsrRead => {
let msr = unsafe { rdmsr(trapframe.rcx() as u32).unwrap() };
trapframe.set_rax((msr as u32 & u32::MAX) as usize);
trapframe.set_rdx(((msr >> 32) as u32 & u32::MAX) as usize);
}
TdxVirtualExceptionType::MsrWrite => {
let data = trapframe.rax() as u64 | ((trapframe.rdx() as u64) << 32);
unsafe { wrmsr(trapframe.rcx() as u32, data).unwrap() };
}
TdxVirtualExceptionType::CpuId => {
let cpuid_info = cpuid(trapframe.rax() as u32, trapframe.rcx() as u32).unwrap();
let mask = 0xFFFF_FFFF_0000_0000_usize;
trapframe.set_rax((trapframe.rax() & mask) | cpuid_info.eax);
trapframe.set_rbx((trapframe.rbx() & mask) | cpuid_info.ebx);
trapframe.set_rcx((trapframe.rcx() & mask) | cpuid_info.ecx);
trapframe.set_rdx((trapframe.rdx() & mask) | cpuid_info.edx);
}
TdxVirtualExceptionType::EptViolation => {
if is_protected_gpa(ve_info.guest_physical_address as TdxGpa) {
serial_println!("Unexpected EPT-violation on private memory");
hlt();
}
instr_len = handle_mmio(trapframe, ve_info).unwrap() as u32;
}
TdxVirtualExceptionType::Other => {
serial_println!("Unknown TDX vitrual exception type");
hlt();
}
_ => return,
}
trapframe.set_rip(trapframe.rip() + instr_len as usize);
}
fn handle_io(trapframe: &mut dyn TdxTrapFrame, ve_info: &tdcall::TdgVeInfo) -> bool {
let size = match ve_info.exit_qualification & 0x3 {
0 => IoSize::Size1,
1 => IoSize::Size2,
3 => IoSize::Size4,
_ => panic!("Invalid size value"),
};
let direction = if (ve_info.exit_qualification >> 3) & 0x1 == 0 {
tdvmcall::Direction::Out
} else {
tdvmcall::Direction::In
};
let _operand = if (ve_info.exit_qualification >> 6) & 0x1 == 0 {
tdvmcall::Operand::Dx
} else {
tdvmcall::Operand::Immediate
};
let port = (ve_info.exit_qualification >> 16) as u16;
match direction {
tdvmcall::Direction::In => {
trapframe.set_rax(tdvmcall::io_read(size, port).unwrap() as usize);
}
tdvmcall::Direction::Out => {
tdvmcall::io_write(size, port, trapframe.rax() as u32).unwrap();
}
};
true
}
fn is_protected_gpa(gpa: TdxGpa) -> bool {
(gpa as u64 & SHARED_MASK) == 0
}
fn handle_mmio(trapframe: &mut dyn TdxTrapFrame, ve_info: &TdgVeInfo) -> Result<usize, MmioError> {
// Get instruction
let instr = decode_instr(trapframe.rip())?;
// Decode MMIO instruction
match decode_mmio(&instr) {
Some((mmio, size)) => {
match mmio {
InstrMmioType::Write => {
let value = match instr.op1_register() {
Register::RCX => trapframe.rcx() as u64,
Register::ECX => (trapframe.rcx() & 0xFFFF_FFFF) as u64,
Register::CX => (trapframe.rcx() & 0xFFFF) as u64,
Register::CL => (trapframe.rcx() & 0xFF) as u64,
_ => todo!(),
};
// SAFETY: The mmio_gpa obtained from `ve_info` is valid, and the value and size parsed from the instruction are valid.
unsafe {
write_mmio(size, ve_info.guest_physical_address, value)
.map_err(|_| MmioError::TdVmcallError)?
}
}
InstrMmioType::WriteImm => {
let value = instr.immediate(0);
// SAFETY: The mmio_gpa obtained from `ve_info` is valid, and the value and size parsed from the instruction are valid.
unsafe {
write_mmio(size, ve_info.guest_physical_address, value)
.map_err(|_| MmioError::TdVmcallError)?
}
}
InstrMmioType::Read =>
// SAFETY: The mmio_gpa obtained from `ve_info` is valid, and the size parsed from the instruction is valid.
unsafe {
let read_res = read_mmio(size, ve_info.guest_physical_address)
.map_err(|_| MmioError::TdVmcallError)?
as usize;
match instr.op0_register() {
Register::RAX => trapframe.set_rax(read_res),
Register::EAX => {
trapframe.set_rax((trapframe.rax() & 0xFFFF_FFFF_0000_0000) | read_res)
}
Register::AX => {
trapframe.set_rax((trapframe.rax() & 0xFFFF_FFFF_FFFF_0000) | read_res)
}
Register::AL => {
trapframe.set_rax((trapframe.rax() & 0xFFFF_FFFF_FFFF_FF00) | read_res)
}
Register::RBX => trapframe.set_rbx(read_res),
Register::EBX => {
trapframe.set_rbx((trapframe.rbx() & 0xFFFF_FFFF_0000_0000) | read_res)
}
Register::BX => {
trapframe.set_rbx((trapframe.rbx() & 0xFFFF_FFFF_FFFF_0000) | read_res)
}
Register::BL => {
trapframe.set_rbx((trapframe.rbx() & 0xFFFF_FFFF_FFFF_FF00) | read_res)
}
Register::RCX => trapframe.set_rcx(read_res),
Register::ECX => {
trapframe.set_rcx((trapframe.rcx() & 0xFFFF_FFFF_0000_0000) | read_res)
}
Register::CX => {
trapframe.set_rcx((trapframe.rcx() & 0xFFFF_FFFF_FFFF_0000) | read_res)
}
Register::CL => {
trapframe.set_rcx((trapframe.rcx() & 0xFFFF_FFFF_FFFF_FF00) | read_res)
}
Register::RDX => trapframe.set_rdx(read_res),
Register::EDX => {
trapframe.set_rdx((trapframe.rdx() & 0xFFFF_FFFF_0000_0000) | read_res)
}
Register::DX => {
trapframe.set_rdx((trapframe.rdx() & 0xFFFF_FFFF_FFFF_0000) | read_res)
}
Register::DL => {
trapframe.set_rdx((trapframe.rdx() & 0xFFFF_FFFF_FFFF_FF00) | read_res)
}
Register::SIL => {
trapframe.set_rsi((trapframe.rsi() & 0xFFFF_FFFF_FFFF_FF00) | read_res)
}
Register::DIL => {
trapframe.set_rdi((trapframe.rdi() & 0xFFFF_FFFF_FFFF_FF00) | read_res)
}
Register::R8L => {
trapframe.set_r8((trapframe.r8() & 0xFFFF_FFFF_FFFF_FF00) | read_res)
}
Register::R9L => {
trapframe.set_r9((trapframe.r9() & 0xFFFF_FFFF_FFFF_FF00) | read_res)
}
Register::R10L => {
trapframe.set_r10((trapframe.r10() & 0xFFFF_FFFF_FFFF_FF00) | read_res)
}
Register::R11L => {
trapframe.set_r11((trapframe.r11() & 0xFFFF_FFFF_FFFF_FF00) | read_res)
}
Register::R11W => {
trapframe.set_r11((trapframe.r11() & 0xFFFF_FFFF_FFFF_0000) | read_res)
}
Register::R12L => {
trapframe.set_r12((trapframe.r12() & 0xFFFF_FFFF_FFFF_FF00) | read_res)
}
Register::R13L => {
trapframe.set_r13((trapframe.r13() & 0xFFFF_FFFF_FFFF_FF00) | read_res)
}
Register::R13W => {
trapframe.set_r13((trapframe.r13() & 0xFFFF_FFFF_FFFF_0000) | read_res)
}
Register::R14L => {
trapframe.set_r14((trapframe.r14() & 0xFFFF_FFFF_FFFF_FF00) | read_res)
}
Register::R14D => {
trapframe.set_r14((trapframe.r14() & 0xFFFF_FFFF_0000_0000) | read_res)
}
Register::R15L => {
trapframe.set_r15((trapframe.r15() & 0xFFFF_FFFF_FFFF_FF00) | read_res)
}
Register::BP => {
trapframe.set_rbp((trapframe.rbp() & 0xFFFF_FFFF_FFFF_0000) | read_res)
}
Register::BPL => {
trapframe.set_rbp((trapframe.rbp() & 0xFFFF_FFFF_FFFF_FF00) | read_res)
}
_ => return Err(MmioError::Unimplemented),
}
},
InstrMmioType::ReadZeroExtend =>
// SAFETY: The mmio_gpa obtained from `ve_info` is valid, and the size parsed from the instruction is valid.
unsafe {
let read_res = read_mmio(size, ve_info.guest_physical_address)
.map_err(|_| MmioError::TdVmcallError)?
as usize;
match instr.op0_register() {
Register::RAX | Register::EAX | Register::AX | Register::AL => {
trapframe.set_rax(read_res)
}
Register::RBX | Register::EBX | Register::BX | Register::BL => {
trapframe.set_rbx(read_res)
}
Register::RCX | Register::ECX | Register::CX | Register::CL => {
trapframe.set_rcx(read_res)
}
_ => return Err(MmioError::Unimplemented),
}
},
InstrMmioType::ReadSignExtend => return Err(MmioError::Unimplemented),
// MMIO was accessed with an instruction that could not be decoded or handled properly.
InstrMmioType::Movs => return Err(MmioError::InvalidInstruction),
}
}
None => {
return Err(MmioError::DecodeFailed);
}
}
Ok(instr.len())
}
fn decode_instr(rip: usize) -> Result<Instruction, MmioError> {
if !(KERNEL_BASE_VADDR..KERNEL_END_VADDR).contains(&rip) {
return Err(MmioError::InvalidAddress);
}
let code_data = {
const MAX_X86_INSTR_LEN: usize = 15;
let mut data = [0u8; MAX_X86_INSTR_LEN];
// SAFETY:
// This is safe because we are ensuring that 'rip' is a valid kernel virtual address before this operation.
// We are also ensuring that the size of the data we are copying does not exceed 'MAX_X86_INSTR_LEN'.
// Therefore, we are not reading any memory that we shouldn't be, and we are not causing any undefined behavior.
unsafe {
core::ptr::copy_nonoverlapping(rip as *const u8, data.as_mut_ptr(), data.len());
}
data
};
let mut decoder = Decoder::with_ip(64, &code_data, rip as u64, DecoderOptions::NONE);
let mut instr = Instruction::default();
decoder.decode_out(&mut instr);
if instr.is_invalid() {
return Err(MmioError::InvalidInstruction);
}
Ok(instr)
}
fn decode_mmio(instr: &Instruction) -> Option<(InstrMmioType, IoSize)> {
match instr.code() {
// 0x88
Code::Mov_rm8_r8 => Some((InstrMmioType::Write, IoSize::Size1)),
// 0x89
Code::Mov_rm16_r16 => Some((InstrMmioType::Write, IoSize::Size2)),
Code::Mov_rm32_r32 => Some((InstrMmioType::Write, IoSize::Size4)),
Code::Mov_rm64_r64 => Some((InstrMmioType::Write, IoSize::Size8)),
// 0xc6
Code::Mov_rm8_imm8 => Some((InstrMmioType::WriteImm, IoSize::Size1)),
// 0xc7
Code::Mov_rm16_imm16 => Some((InstrMmioType::WriteImm, IoSize::Size2)),
Code::Mov_rm32_imm32 => Some((InstrMmioType::WriteImm, IoSize::Size4)),
Code::Mov_rm64_imm32 => Some((InstrMmioType::WriteImm, IoSize::Size8)),
// 0x8a
Code::Mov_r8_rm8 => Some((InstrMmioType::Read, IoSize::Size1)),
// 0x8b
Code::Mov_r16_rm16 => Some((InstrMmioType::Read, IoSize::Size2)),
Code::Mov_r32_rm32 => Some((InstrMmioType::Read, IoSize::Size4)),
Code::Mov_r64_rm64 => Some((InstrMmioType::Read, IoSize::Size8)),
// 0xa4
Code::Movsb_m8_m8 => Some((InstrMmioType::Movs, IoSize::Size1)),
// 0xa5
Code::Movsw_m16_m16 => Some((InstrMmioType::Movs, IoSize::Size2)),
Code::Movsd_m32_m32 => Some((InstrMmioType::Movs, IoSize::Size4)),
Code::Movsq_m64_m64 => Some((InstrMmioType::Movs, IoSize::Size8)),
// 0x0f 0xb6
Code::Movzx_r16_rm8 | Code::Movzx_r32_rm8 | Code::Movzx_r64_rm8 => {
Some((InstrMmioType::ReadZeroExtend, IoSize::Size1))
}
// 0x0f 0xb7
Code::Movzx_r16_rm16 | Code::Movzx_r32_rm16 | Code::Movzx_r64_rm16 => {
Some((InstrMmioType::ReadZeroExtend, IoSize::Size2))
}
// 0x0f 0xbe
Code::Movsx_r16_rm8 | Code::Movsx_r32_rm8 | Code::Movsx_r64_rm8 => {
Some((InstrMmioType::ReadSignExtend, IoSize::Size1))
}
// 0x0f 0xbf
Code::Movsx_r16_rm16 | Code::Movsx_r32_rm16 | Code::Movsx_r64_rm16 => {
Some((InstrMmioType::ReadSignExtend, IoSize::Size2))
}
_ => None,
}
}
/// Sets the given physical address range to Intel TDX shared pages.
/// Clears the data within the given address range.
/// Make sure the provided physical address is page size aligned.
@ -484,102 +114,104 @@ pub unsafe fn protect_gpa_range(gpa: Paddr, page_num: usize) -> Result<(), PageC
Ok(())
}
pub struct TrapFrameWrapper<'a>(pub &'a mut TrapFrame);
#[cfg(feature = "cvm_guest")]
impl TdxTrapFrame for TrapFrame {
impl TdxTrapFrame for TrapFrameWrapper<'_> {
fn rax(&self) -> usize {
self.rax
self.0.rax
}
fn set_rax(&mut self, rax: usize) {
self.rax = rax;
self.0.rax = rax;
}
fn rbx(&self) -> usize {
self.rbx
self.0.rbx
}
fn set_rbx(&mut self, rbx: usize) {
self.rbx = rbx;
self.0.rbx = rbx;
}
fn rcx(&self) -> usize {
self.rcx
self.0.rcx
}
fn set_rcx(&mut self, rcx: usize) {
self.rcx = rcx;
self.0.rcx = rcx;
}
fn rdx(&self) -> usize {
self.rdx
self.0.rdx
}
fn set_rdx(&mut self, rdx: usize) {
self.rdx = rdx;
self.0.rdx = rdx;
}
fn rsi(&self) -> usize {
self.rsi
self.0.rsi
}
fn set_rsi(&mut self, rsi: usize) {
self.rsi = rsi;
self.0.rsi = rsi;
}
fn rdi(&self) -> usize {
self.rdi
self.0.rdi
}
fn set_rdi(&mut self, rdi: usize) {
self.rdi = rdi;
self.0.rdi = rdi;
}
fn rip(&self) -> usize {
self.rip
self.0.rip
}
fn set_rip(&mut self, rip: usize) {
self.rip = rip;
self.0.rip = rip;
}
fn r8(&self) -> usize {
self.r8
self.0.r8
}
fn set_r8(&mut self, r8: usize) {
self.r8 = r8;
self.0.r8 = r8;
}
fn r9(&self) -> usize {
self.r9
self.0.r9
}
fn set_r9(&mut self, r9: usize) {
self.r9 = r9;
self.0.r9 = r9;
}
fn r10(&self) -> usize {
self.r10
self.0.r10
}
fn set_r10(&mut self, r10: usize) {
self.r10 = r10;
self.0.r10 = r10;
}
fn r11(&self) -> usize {
self.r11
self.0.r11
}
fn set_r11(&mut self, r11: usize) {
self.r11 = r11;
self.0.r11 = r11;
}
fn r12(&self) -> usize {
self.r12
self.0.r12
}
fn set_r12(&mut self, r12: usize) {
self.r12 = r12;
self.0.r12 = r12;
}
fn r13(&self) -> usize {
self.r13
self.0.r13
}
fn set_r13(&mut self, r13: usize) {
self.r13 = r13;
self.0.r13 = r13;
}
fn r14(&self) -> usize {
self.r14
self.0.r14
}
fn set_r14(&mut self, r14: usize) {
self.r14 = r14;
self.0.r14 = r14;
}
fn r15(&self) -> usize {
self.r15
self.0.r15
}
fn set_r15(&mut self, r15: usize) {
self.r15 = r15;
self.0.r15 = r15;
}
fn rbp(&self) -> usize {
self.rbp
self.0.rbp
}
fn set_rbp(&mut self, rbp: usize) {
self.rbp = rbp;
self.0.rbp = rbp;
}
}

View File

@ -22,8 +22,8 @@ use crate::{
cfg_if! {
if #[cfg(feature = "cvm_guest")] {
use tdx_guest::{tdcall, tdx_is_enabled};
use crate::arch::{cpu::VIRTUALIZATION_EXCEPTION, tdx_guest::handle_virtual_exception};
use tdx_guest::{tdcall, tdx_is_enabled, handle_virtual_exception};
use crate::arch::{cpu::VIRTUALIZATION_EXCEPTION, tdx_guest::TrapFrameWrapper};
}
}
@ -46,7 +46,9 @@ extern "sysv64" fn trap_handler(f: &mut TrapFrame) {
#[cfg(feature = "cvm_guest")]
&VIRTUALIZATION_EXCEPTION => {
let ve_info = tdcall::get_veinfo().expect("#VE handler: fail to get VE info\n");
handle_virtual_exception(f, &ve_info);
let mut trapframe_wrapper = TrapFrameWrapper(&mut *f);
handle_virtual_exception(&mut trapframe_wrapper, &ve_info);
*f = *trapframe_wrapper.0;
}
&PAGE_FAULT => {
let page_fault_addr = x86_64::registers::control::Cr2::read().as_u64();