mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-26 10:53:25 +00:00
Add a PTE extension mechanism
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
513ac97969
commit
82bdadf754
@ -6,7 +6,7 @@ use core::mem::size_of;
|
||||
use log::warn;
|
||||
use pod::Pod;
|
||||
|
||||
use super::second_stage::{DeviceMode, PageTableConsts, PageTableEntry};
|
||||
use super::second_stage::{DeviceMode, PageTableConsts, PageTableEntry, PageTableFlags};
|
||||
use crate::{
|
||||
bus::pci::PciDeviceLocation,
|
||||
vm::{
|
||||
@ -296,6 +296,8 @@ impl ContextTable {
|
||||
&(paddr..paddr + PAGE_SIZE),
|
||||
MapProperty {
|
||||
perm: VmPerm::RW,
|
||||
global: false,
|
||||
extension: PageTableFlags::empty().bits(),
|
||||
cache: CachePolicy::Uncacheable,
|
||||
},
|
||||
);
|
||||
|
@ -122,7 +122,12 @@ impl PageTableEntryTrait for PageTableEntry {
|
||||
status |= MapStatus::DIRTY;
|
||||
}
|
||||
MapInfo {
|
||||
prop: MapProperty { perm, cache },
|
||||
prop: MapProperty {
|
||||
perm,
|
||||
global: false,
|
||||
extension: self.0 & !Self::PHYS_MASK as u64,
|
||||
cache,
|
||||
},
|
||||
status,
|
||||
}
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ impl PageTableEntryTrait for PageTableEntry {
|
||||
if prop.perm.contains(VmPerm::U) {
|
||||
flags |= PageTableFlags::USER;
|
||||
}
|
||||
if prop.perm.contains(VmPerm::G) {
|
||||
if prop.global {
|
||||
flags |= PageTableFlags::GLOBAL;
|
||||
}
|
||||
}
|
||||
@ -143,6 +143,10 @@ impl PageTableEntryTrait for PageTableEntry {
|
||||
if huge {
|
||||
flags |= PageTableFlags::HUGE;
|
||||
}
|
||||
#[cfg(feature = "intel_tdx")]
|
||||
if prop.extension as usize & PageTableFlags::SHARED.bits() != 0 {
|
||||
flags |= PageTableFlags::SHARED;
|
||||
}
|
||||
Self(paddr & Self::PHYS_ADDR_MASK | flags.bits())
|
||||
}
|
||||
|
||||
@ -164,9 +168,7 @@ impl PageTableEntryTrait for PageTableEntry {
|
||||
if self.0 & PageTableFlags::USER.bits() != 0 {
|
||||
perm |= VmPerm::U;
|
||||
}
|
||||
if self.0 & PageTableFlags::GLOBAL.bits() != 0 {
|
||||
perm |= VmPerm::G;
|
||||
}
|
||||
let global = self.0 & PageTableFlags::GLOBAL.bits() != 0;
|
||||
let cache = if self.0 & PageTableFlags::NO_CACHE.bits() != 0 {
|
||||
CachePolicy::Uncacheable
|
||||
} else if self.0 & PageTableFlags::WRITE_THROUGH.bits() != 0 {
|
||||
@ -182,7 +184,12 @@ impl PageTableEntryTrait for PageTableEntry {
|
||||
status |= MapStatus::DIRTY;
|
||||
}
|
||||
MapInfo {
|
||||
prop: MapProperty { perm, cache },
|
||||
prop: MapProperty {
|
||||
perm,
|
||||
global,
|
||||
extension: (self.0 & !Self::PHYS_ADDR_MASK) as u64,
|
||||
cache,
|
||||
},
|
||||
status,
|
||||
}
|
||||
}
|
||||
|
@ -13,11 +13,11 @@ use tdx_guest::{
|
||||
use crate::{
|
||||
arch::mm::PageTableFlags,
|
||||
vm::{
|
||||
kspace::KERNEL_PAGE_TABLE,
|
||||
paddr_to_vaddr,
|
||||
page_table::{PageTableError, KERNEL_PAGE_TABLE},
|
||||
KERNEL_BASE_VADDR, KERNEL_END_VADDR,
|
||||
page_table::{MapProperty, PageTableError},
|
||||
KERNEL_BASE_VADDR, KERNEL_END_VADDR, PAGE_SIZE,
|
||||
},
|
||||
PAGE_SIZE,
|
||||
};
|
||||
|
||||
const SHARED_BIT: u8 = 51;
|
||||
@ -122,7 +122,7 @@ pub fn handle_virtual_exception(trapframe: &mut dyn TdxTrapFrame, ve_info: &TdgV
|
||||
serial_println!("Unexpected EPT-violation on private memory");
|
||||
hlt();
|
||||
}
|
||||
instr_len = handle_mmio(trapframe, &ve_info).unwrap() as u32;
|
||||
instr_len = handle_mmio(trapframe, ve_info).unwrap() as u32;
|
||||
}
|
||||
TdxVirtualExceptionType::Other => {
|
||||
serial_println!("Unknown TDX vitrual exception type");
|
||||
@ -186,7 +186,7 @@ fn handle_mmio(trapframe: &mut dyn TdxTrapFrame, ve_info: &TdgVeInfo) -> Result<
|
||||
// Safety: The mmio_gpa obtained from `ve_info` is valid, and the value and size parsed from the instruction are valid.
|
||||
unsafe {
|
||||
write_mmio(size, ve_info.guest_physical_address, value)
|
||||
.map_err(|e| MmioError::TdVmcallError(e))?
|
||||
.map_err(MmioError::TdVmcallError)?
|
||||
}
|
||||
}
|
||||
InstrMmioType::WriteImm => {
|
||||
@ -194,14 +194,14 @@ fn handle_mmio(trapframe: &mut dyn TdxTrapFrame, ve_info: &TdgVeInfo) -> Result<
|
||||
// Safety: The mmio_gpa obtained from `ve_info` is valid, and the value and size parsed from the instruction are valid.
|
||||
unsafe {
|
||||
write_mmio(size, ve_info.guest_physical_address, value)
|
||||
.map_err(|e| MmioError::TdVmcallError(e))?
|
||||
.map_err(MmioError::TdVmcallError)?
|
||||
}
|
||||
}
|
||||
InstrMmioType::Read =>
|
||||
// Safety: The mmio_gpa obtained from `ve_info` is valid, and the size parsed from the instruction is valid.
|
||||
unsafe {
|
||||
let read_res = read_mmio(size, ve_info.guest_physical_address)
|
||||
.map_err(|e| MmioError::TdVmcallError(e))?
|
||||
.map_err(MmioError::TdVmcallError)?
|
||||
as usize;
|
||||
match instr.op0_register() {
|
||||
Register::RAX => trapframe.set_rax(read_res),
|
||||
@ -296,7 +296,7 @@ fn handle_mmio(trapframe: &mut dyn TdxTrapFrame, ve_info: &TdgVeInfo) -> Result<
|
||||
// Safety: The mmio_gpa obtained from `ve_info` is valid, and the size parsed from the instruction is valid.
|
||||
unsafe {
|
||||
let read_res = read_mmio(size, ve_info.guest_physical_address)
|
||||
.map_err(|e| MmioError::TdVmcallError(e))?
|
||||
.map_err(MmioError::TdVmcallError)?
|
||||
as usize;
|
||||
match instr.op0_register() {
|
||||
Register::RAX | Register::EAX | Register::AX | Register::AL => {
|
||||
@ -324,7 +324,7 @@ fn handle_mmio(trapframe: &mut dyn TdxTrapFrame, ve_info: &TdgVeInfo) -> Result<
|
||||
}
|
||||
|
||||
fn decode_instr(rip: usize) -> Result<Instruction, MmioError> {
|
||||
if !(KERNEL_BASE_VADDR..KERNEL_END_VADDR).contains(rip) {
|
||||
if !(KERNEL_BASE_VADDR..KERNEL_END_VADDR).contains(&rip) {
|
||||
return Err(MmioError::InvalidAddress);
|
||||
}
|
||||
let code_data = {
|
||||
@ -417,19 +417,18 @@ pub unsafe fn unprotect_gpa_range(gpa: TdxGpa, page_num: usize) -> Result<(), Pa
|
||||
let vaddr = paddr_to_vaddr(gpa);
|
||||
let mut pt = KERNEL_PAGE_TABLE.get().unwrap().lock();
|
||||
unsafe {
|
||||
for i in 0..page_num {
|
||||
pt.protect(
|
||||
vaddr + (i * PAGE_SIZE),
|
||||
PageTableFlags::SHARED | PageTableFlags::WRITABLE | PageTableFlags::PRESENT,
|
||||
)
|
||||
.map_err(|e| PageConvertError::PageTableError(e))?;
|
||||
}
|
||||
pt.protect_unchecked(&(vaddr..page_num * PAGE_SIZE), |info| MapProperty {
|
||||
perm: info.prop.perm,
|
||||
extension: PageTableFlags::SHARED.bits() as u64,
|
||||
cache: info.prop.cache,
|
||||
})
|
||||
.map_err(PageConvertError::PageTableError)?;
|
||||
};
|
||||
map_gpa(
|
||||
(gpa & (!PAGE_MASK)) as u64 | SHARED_MASK,
|
||||
(page_num * PAGE_SIZE) as u64,
|
||||
)
|
||||
.map_err(|e| PageConvertError::TdVmcallError(e))
|
||||
.map_err(PageConvertError::TdVmcallError)
|
||||
}
|
||||
|
||||
/// Sets the given physical address range to Intel TDX private pages.
|
||||
@ -454,20 +453,20 @@ pub unsafe fn protect_gpa_range(gpa: TdxGpa, page_num: usize) -> Result<(), Page
|
||||
let vaddr = paddr_to_vaddr(gpa);
|
||||
let mut pt = KERNEL_PAGE_TABLE.get().unwrap().lock();
|
||||
unsafe {
|
||||
for i in 0..page_num {
|
||||
pt.protect(
|
||||
vaddr + (i * PAGE_SIZE),
|
||||
PageTableFlags::WRITABLE | PageTableFlags::PRESENT,
|
||||
)
|
||||
.map_err(|e| PageConvertError::PageTableError(e))?;
|
||||
}
|
||||
pt.protect_unchecked(&(vaddr..page_num * PAGE_SIZE), |info| MapProperty {
|
||||
perm: info.prop.perm,
|
||||
extension: (PageTableFlags::from_bits_truncate(info.prop.extension as usize)
|
||||
- PageTableFlags::SHARED)
|
||||
.bits() as u64,
|
||||
cache: info.prop.cache,
|
||||
})
|
||||
.map_err(PageConvertError::PageTableError)?;
|
||||
};
|
||||
map_gpa((gpa & PAGE_MASK) as u64, (page_num * PAGE_SIZE) as u64)
|
||||
.map_err(|e| PageConvertError::TdVmcallError(e))?;
|
||||
.map_err(PageConvertError::TdVmcallError)?;
|
||||
for i in 0..page_num {
|
||||
unsafe {
|
||||
accept_page(0, (gpa + i * PAGE_SIZE) as u64)
|
||||
.map_err(|e| PageConvertError::TdCallError(e))?;
|
||||
accept_page(0, (gpa + i * PAGE_SIZE) as u64).map_err(PageConvertError::TdCallError)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
@ -13,7 +13,7 @@ use crate::{
|
||||
sync::{SpinLock, SpinLockGuard},
|
||||
user::UserSpace,
|
||||
vm::{
|
||||
kspace::KERNEL_PAGE_TABLE, page_table::MapProperty, VmAllocOptions, VmPerm, VmSegment,
|
||||
kspace::KERNEL_PAGE_TABLE, page_table::perm_op, VmAllocOptions, VmPerm, VmSegment,
|
||||
PAGE_SIZE,
|
||||
},
|
||||
};
|
||||
@ -72,16 +72,10 @@ impl KernelStack {
|
||||
// Safety: the physical guard page address is exclusively used since we allocated it.
|
||||
unsafe {
|
||||
page_table
|
||||
.protect_unchecked(&(guard_page_vaddr..guard_page_vaddr + PAGE_SIZE), |info| {
|
||||
assert!(
|
||||
info.prop.perm.contains(VmPerm::RW),
|
||||
"linear mapping shoud be readable and writable"
|
||||
);
|
||||
MapProperty {
|
||||
perm: info.prop.perm - VmPerm::RW,
|
||||
cache: info.prop.cache,
|
||||
}
|
||||
})
|
||||
.protect_unchecked(
|
||||
&(guard_page_vaddr..guard_page_vaddr + PAGE_SIZE),
|
||||
perm_op(|p| p - VmPerm::RW),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
Ok(Self {
|
||||
@ -107,16 +101,10 @@ impl Drop for KernelStack {
|
||||
// Safety: the physical guard page address is exclusively used since we allocated it.
|
||||
unsafe {
|
||||
page_table
|
||||
.protect_unchecked(&(guard_page_vaddr..guard_page_vaddr + PAGE_SIZE), |info| {
|
||||
assert!(
|
||||
!info.prop.perm.contains(VmPerm::RW),
|
||||
"we should have removed the permission of the guard page"
|
||||
);
|
||||
MapProperty {
|
||||
perm: info.prop.perm | VmPerm::RW,
|
||||
cache: info.prop.cache,
|
||||
}
|
||||
})
|
||||
.protect_unchecked(
|
||||
&(guard_page_vaddr..guard_page_vaddr + PAGE_SIZE),
|
||||
perm_op(|p| p | VmPerm::RW),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -8,10 +8,9 @@ use log::debug;
|
||||
use tdx_guest::tdcall;
|
||||
use trapframe::TrapFrame;
|
||||
|
||||
#[cfg(feature = "intel_tdx")]
|
||||
use crate::arch::tdx_guest::{handle_virtual_exception, TdxTrapFrame};
|
||||
#[cfg(feature = "intel_tdx")]
|
||||
use crate::arch::{
|
||||
cpu::VIRTUALIZATION_EXCEPTION,
|
||||
mm::PageTableFlags,
|
||||
tdx_guest::{handle_virtual_exception, TdxTrapFrame},
|
||||
};
|
||||
@ -233,7 +232,12 @@ fn handle_kernel_page_fault(f: &TrapFrame) {
|
||||
&(vaddr..vaddr + PAGE_SIZE),
|
||||
&(paddr..paddr + PAGE_SIZE),
|
||||
MapProperty {
|
||||
perm: VmPerm::RW | VmPerm::G,
|
||||
perm: VmPerm::RW,
|
||||
global: true,
|
||||
#[cfg(feature = "intel_tdx")]
|
||||
extension: PageTableFlags::SHARED.bits() as u64,
|
||||
#[cfg(not(feature = "intel_tdx"))]
|
||||
extension: 0,
|
||||
cache: CachePolicy::Uncacheable,
|
||||
},
|
||||
)
|
||||
|
@ -14,7 +14,7 @@ use crate::{
|
||||
vm::{
|
||||
dma::{dma_type, Daddr, DmaType},
|
||||
kspace::{paddr_to_vaddr, KERNEL_PAGE_TABLE},
|
||||
page_table::{CachePolicy, MapProperty},
|
||||
page_table::{cache_policy_op, CachePolicy},
|
||||
HasPaddr, Paddr, VmIo, VmReader, VmSegment, VmWriter, PAGE_SIZE,
|
||||
},
|
||||
};
|
||||
@ -62,10 +62,7 @@ impl DmaCoherent {
|
||||
// Safety: the address is in the range of `vm_segment`.
|
||||
unsafe {
|
||||
page_table
|
||||
.protect_unchecked(&va_range, |info| MapProperty {
|
||||
perm: info.prop.perm,
|
||||
cache: CachePolicy::Uncacheable,
|
||||
})
|
||||
.protect_unchecked(&va_range, cache_policy_op(CachePolicy::Uncacheable))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
@ -152,10 +149,7 @@ impl Drop for DmaCoherentInner {
|
||||
// Safety: the address is in the range of `vm_segment`.
|
||||
unsafe {
|
||||
page_table
|
||||
.protect_unchecked(&va_range, |info| MapProperty {
|
||||
perm: info.prop.perm,
|
||||
cache: CachePolicy::Writeback,
|
||||
})
|
||||
.protect_unchecked(&va_range, cache_policy_op(CachePolicy::Writeback))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
@ -220,7 +214,7 @@ mod test {
|
||||
let vaddr = paddr_to_vaddr(vm_segment.paddr());
|
||||
assert!(
|
||||
page_table
|
||||
.query(vaddr..vaddr + PAGE_SIZE)
|
||||
.query(&(vaddr..vaddr + PAGE_SIZE))
|
||||
.unwrap()
|
||||
.next()
|
||||
.unwrap()
|
||||
|
@ -74,7 +74,9 @@ pub fn init_kernel_page_table() {
|
||||
let from = LINEAR_MAPPING_BASE_VADDR..LINEAR_MAPPING_BASE_VADDR + linear_mapping_size;
|
||||
let to = 0..linear_mapping_size;
|
||||
let prop = MapProperty {
|
||||
perm: VmPerm::RW | VmPerm::G,
|
||||
perm: VmPerm::RW,
|
||||
global: true,
|
||||
extension: 0,
|
||||
cache: CachePolicy::Writeback,
|
||||
};
|
||||
// Safety: we are doing the linear mapping for the kernel.
|
||||
@ -87,7 +89,9 @@ pub fn init_kernel_page_table() {
|
||||
let to = 0x8_0000_0000..0x9_0000_0000;
|
||||
let from = LINEAR_MAPPING_BASE_VADDR + to.start..LINEAR_MAPPING_BASE_VADDR + to.end;
|
||||
let prop = MapProperty {
|
||||
perm: VmPerm::RW | VmPerm::G,
|
||||
perm: VmPerm::RW,
|
||||
global: true,
|
||||
extension: 0,
|
||||
cache: CachePolicy::Uncacheable,
|
||||
};
|
||||
// Safety: we are doing I/O mappings for the kernel.
|
||||
@ -105,7 +109,9 @@ pub fn init_kernel_page_table() {
|
||||
region.base().align_down(PAGE_SIZE)..(region.base() + region.len()).align_up(PAGE_SIZE);
|
||||
let from = to.start + offset..to.end + offset;
|
||||
let prop = MapProperty {
|
||||
perm: VmPerm::RWX | VmPerm::G,
|
||||
perm: VmPerm::RWX,
|
||||
global: true,
|
||||
extension: 0,
|
||||
cache: CachePolicy::Writeback,
|
||||
};
|
||||
// Safety: we are doing mappings for the kernel.
|
||||
|
@ -257,6 +257,8 @@ where
|
||||
frame.inner.start_paddr(),
|
||||
MapProperty {
|
||||
perm: VmPerm::RWX,
|
||||
global: true,
|
||||
extension: 0,
|
||||
cache: CachePolicy::Uncacheable,
|
||||
},
|
||||
false,
|
||||
|
@ -45,7 +45,7 @@ pub trait PageTableConstsTrait: Debug + 'static {
|
||||
|
||||
bitflags::bitflags! {
|
||||
/// The status of a memory mapping recorded by the hardware.
|
||||
pub struct MapStatus: u32 {
|
||||
pub struct MapStatus: u8 {
|
||||
const ACCESSED = 0b0000_0001;
|
||||
const DIRTY = 0b0000_0010;
|
||||
}
|
||||
@ -114,6 +114,13 @@ pub enum CachePolicy {
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct MapProperty {
|
||||
pub perm: VmPerm,
|
||||
/// Global.
|
||||
/// A global page is not evicted from the TLB when TLB is flushed.
|
||||
pub global: bool,
|
||||
/// The properties of a memory mapping that is used and defined as flags in PTE
|
||||
/// in specific architectures on an ad hoc basis. The logics provided by the
|
||||
/// page table module will not be affected by this field.
|
||||
pub extension: u64,
|
||||
pub cache: CachePolicy,
|
||||
}
|
||||
|
||||
@ -125,6 +132,7 @@ pub struct MapProperty {
|
||||
/// let page_table = KERNEL_PAGE_TABLE.get().unwrap().lock();
|
||||
/// let prop = MapProperty {
|
||||
/// perm: VmPerm::R,
|
||||
/// global: true,
|
||||
/// extension: 0,
|
||||
/// cache: CachePolicy::Writeback,
|
||||
/// };
|
||||
@ -147,6 +155,7 @@ pub struct MapProperty {
|
||||
/// assert!(info.prop.perm.contains(VmPerm::R));
|
||||
/// MapProperty {
|
||||
/// perm: info.prop.perm | VmPerm::W,
|
||||
/// global: info.prop.global,
|
||||
/// extension: info.prop.extension,
|
||||
/// cache: info.prop.cache,
|
||||
/// }
|
||||
@ -178,6 +187,8 @@ impl Fn<(MapInfo,)> for MapProperty {
|
||||
pub fn cache_policy_op(cache: CachePolicy) -> impl MapOp {
|
||||
move |info| MapProperty {
|
||||
perm: info.prop.perm,
|
||||
global: info.prop.global,
|
||||
extension: info.prop.extension,
|
||||
cache,
|
||||
}
|
||||
}
|
||||
@ -186,14 +197,27 @@ pub fn cache_policy_op(cache: CachePolicy) -> impl MapOp {
|
||||
pub fn perm_op(op: impl Fn(VmPerm) -> VmPerm) -> impl MapOp {
|
||||
move |info| MapProperty {
|
||||
perm: op(info.prop.perm),
|
||||
global: info.prop.global,
|
||||
extension: info.prop.extension,
|
||||
cache: info.prop.cache,
|
||||
}
|
||||
}
|
||||
|
||||
impl MapProperty {
|
||||
pub fn new_general(perm: VmPerm) -> Self {
|
||||
Self {
|
||||
perm,
|
||||
global: false,
|
||||
extension: 0,
|
||||
cache: CachePolicy::Writeback,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_invalid() -> Self {
|
||||
Self {
|
||||
perm: VmPerm::empty(),
|
||||
global: false,
|
||||
extension: 0,
|
||||
cache: CachePolicy::Uncacheable,
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ use core::ops::Range;
|
||||
|
||||
use bitflags::bitflags;
|
||||
|
||||
use super::{is_page_aligned, page_table::CachePolicy, MapArea, MemorySet, VmFrameVec, VmIo};
|
||||
use super::{is_page_aligned, MapArea, MemorySet, VmFrameVec, VmIo};
|
||||
use crate::{
|
||||
arch::mm::PageTableFlags,
|
||||
prelude::*,
|
||||
@ -77,10 +77,7 @@ impl VmSpace {
|
||||
memory_set.map(MapArea::new(
|
||||
addr,
|
||||
PAGE_SIZE,
|
||||
MapProperty {
|
||||
perm: options.perm,
|
||||
cache: CachePolicy::Writeback,
|
||||
},
|
||||
MapProperty::new_general(options.perm),
|
||||
frames,
|
||||
));
|
||||
}
|
||||
@ -145,13 +142,9 @@ impl VmSpace {
|
||||
let end_page = range.end / PAGE_SIZE;
|
||||
for page_idx in start_page..end_page {
|
||||
let addr = page_idx * PAGE_SIZE;
|
||||
self.memory_set.lock().protect(
|
||||
addr,
|
||||
MapProperty {
|
||||
perm,
|
||||
cache: CachePolicy::Writeback,
|
||||
},
|
||||
)
|
||||
self.memory_set
|
||||
.lock()
|
||||
.protect(addr, MapProperty::new_general(perm))
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -267,9 +260,6 @@ bitflags! {
|
||||
const X = 0b00000100;
|
||||
/// User accessible.
|
||||
const U = 0b00001000;
|
||||
/// Global.
|
||||
/// A global page is not evicted from the TLB when TLB is flushed.
|
||||
const G = 0b00010000;
|
||||
/// Readable + writable.
|
||||
const RW = Self::R.bits | Self::W.bits;
|
||||
/// Readable + execuable.
|
||||
|
@ -68,8 +68,8 @@ impl FileIo for TdxGuest {
|
||||
fn ioctl(&self, cmd: IoctlCmd, arg: usize) -> Result<i32> {
|
||||
match cmd {
|
||||
IoctlCmd::TDXGETREPORT => {
|
||||
let mut tdx_report: TdxReportRequest = read_val_from_user(arg)?;
|
||||
match get_report(&mut tdx_report.tdreport, &tdx_report.reportdata) {
|
||||
let tdx_report: TdxReportRequest = read_val_from_user(arg)?;
|
||||
match get_report(&tdx_report.tdreport, &tdx_report.reportdata) {
|
||||
Ok(_) => {}
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
|
Reference in New Issue
Block a user