mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-22 17:03:23 +00:00
Unify headers of safety comments
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
07fbbcfd8c
commit
83b88229a3
@ -24,7 +24,7 @@ fn init_bootloader_name(bootloader_name: &'static Once<String>) {
|
||||
let mut name = "";
|
||||
let info = MB1_INFO.get().unwrap();
|
||||
if info.boot_loader_name != 0 {
|
||||
// Safety: the bootloader name is C-style zero-terminated string.
|
||||
// SAFETY: the bootloader name is C-style zero-terminated string.
|
||||
unsafe {
|
||||
let cstr = paddr_to_vaddr(info.boot_loader_name as usize) as *const u8;
|
||||
let mut len = 0;
|
||||
@ -45,7 +45,7 @@ fn init_kernel_commandline(kernel_cmdline: &'static Once<KCmdlineArg>) {
|
||||
let mut cmdline = "";
|
||||
let info = MB1_INFO.get().unwrap();
|
||||
if info.cmdline != 0 {
|
||||
// Safety: the command line is C-style zero-terminated string.
|
||||
// SAFETY: the command line is C-style zero-terminated string.
|
||||
unsafe {
|
||||
let cstr = paddr_to_vaddr(info.cmdline as usize) as *const u8;
|
||||
let mut len = 0;
|
||||
|
@ -56,7 +56,7 @@ impl RemappingRegisters {
|
||||
};
|
||||
|
||||
let vaddr: usize = paddr_to_vaddr(base_address as usize);
|
||||
// Safety: All offsets and sizes are strictly adhered to in the manual, and the base address is obtained from Drhd.
|
||||
// SAFETY: All offsets and sizes are strictly adhered to in the manual, and the base address is obtained from Drhd.
|
||||
let mut remapping_reg = unsafe {
|
||||
fault::init(vaddr);
|
||||
let version = Volatile::new_read_only(&*(vaddr as *const u32));
|
||||
|
@ -68,7 +68,7 @@ impl Dmar {
|
||||
return None;
|
||||
}
|
||||
let acpi_table_lock = super::ACPI_TABLES.get().unwrap().lock();
|
||||
// Safety: The DmarHeader is the header for the DMAR structure, it fits all the field described in Intel manual.
|
||||
// SAFETY: The DmarHeader is the header for the DMAR structure, it fits all the field described in Intel manual.
|
||||
let dmar_mapping = unsafe {
|
||||
acpi_table_lock
|
||||
.get_sdt::<DmarHeader>(Signature::DMAR)
|
||||
@ -77,7 +77,7 @@ impl Dmar {
|
||||
|
||||
let physical_address = dmar_mapping.physical_start();
|
||||
let len = dmar_mapping.mapped_length();
|
||||
// Safety: The target address is the start of the remapping structures,
|
||||
// SAFETY: The target address is the start of the remapping structures,
|
||||
// and the length is valid since the value is read from the length field in SDTHeader minus the size of DMAR header.
|
||||
let dmar_slice = unsafe {
|
||||
core::slice::from_raw_parts_mut(
|
||||
@ -89,7 +89,7 @@ impl Dmar {
|
||||
let mut remapping_structures = Vec::new();
|
||||
let mut index = 0;
|
||||
let mut remain_length = len - size_of::<DmarHeader>();
|
||||
// Safety: Indexes and offsets are strictly followed by the manual.
|
||||
// SAFETY: Indexes and offsets are strictly followed by the manual.
|
||||
unsafe {
|
||||
while remain_length > 0 {
|
||||
// Common header: type: u16, length: u16
|
||||
|
@ -110,7 +110,7 @@ impl IoApicAccess {
|
||||
}
|
||||
|
||||
pub fn read(&mut self, register: u8) -> u32 {
|
||||
// Safety: Since the base address is valid, the read/write should be safe.
|
||||
// SAFETY: Since the base address is valid, the read/write should be safe.
|
||||
unsafe {
|
||||
self.register.write_volatile(register as u32);
|
||||
self.data.read_volatile()
|
||||
@ -118,7 +118,7 @@ impl IoApicAccess {
|
||||
}
|
||||
|
||||
pub fn write(&mut self, register: u8, data: u32) {
|
||||
// Safety: Since the base address is valid, the read/write should be safe.
|
||||
// SAFETY: Since the base address is valid, the read/write should be safe.
|
||||
unsafe {
|
||||
self.register.write_volatile(register as u32);
|
||||
self.data.write_volatile(data);
|
||||
@ -156,7 +156,7 @@ pub fn init() {
|
||||
// Need to find a way to determine if it is a valid address or not.
|
||||
const IO_APIC_DEFAULT_ADDRESS: usize = 0xFEC0_0000;
|
||||
#[cfg(feature = "intel_tdx")]
|
||||
// Safety:
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the `IO_APIC_DEFAULT_ADDRESS` is a valid MMIO address before this operation.
|
||||
// The `IO_APIC_DEFAULT_ADDRESS` is a well-known address used for IO APICs in x86 systems, and it is page-aligned, which is a requirement for the `unprotect_gpa_range` function.
|
||||
// We are also ensuring that we are only unprotecting a single page.
|
||||
|
@ -32,7 +32,7 @@ impl X2Apic {
|
||||
const EXTD_BIT_IDX: u8 = 10;
|
||||
(1 << EN_BIT_IDX) | (1 << EXTD_BIT_IDX)
|
||||
};
|
||||
// Safety:
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the operations are performed on valid MSRs.
|
||||
// We are using them to read and write to the `IA32_APIC_BASE` and `IA32_X2APIC_SIVR` MSRs, which are well-defined and valid MSRs in x86 systems.
|
||||
// Therefore, we are not causing any undefined behavior or violating any of the requirements of the `rdmsr` and `wrmsr` functions.
|
||||
|
@ -64,7 +64,7 @@ pub fn tlb_flush(vaddr: Vaddr) {
|
||||
}
|
||||
|
||||
pub fn tlb_flush_all_including_global() {
|
||||
// Safety: updates to CR4 here only change the global-page bit, the side effect
|
||||
// SAFETY: updates to CR4 here only change the global-page bit, the side effect
|
||||
// is only to invalidate the TLB, which doesn't affect the memory safety.
|
||||
unsafe {
|
||||
// To invalidate all entries, including global-page
|
||||
|
@ -70,7 +70,7 @@ pub fn tsc_freq() -> u64 {
|
||||
|
||||
/// Reads the current value of the processor’s time-stamp counter (TSC).
|
||||
pub fn read_tsc() -> u64 {
|
||||
// Safety: It is safe to read a time-related counter.
|
||||
// SAFETY: It is safe to read a time-related counter.
|
||||
unsafe { _rdtsc() }
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,7 @@ pub fn exit_qemu(exit_code: QemuExitCode) -> ! {
|
||||
use x86_64::instructions::port::Port;
|
||||
let mut port = Port::new(0xf4);
|
||||
|
||||
// Safety: The write to the ISA debug exit port is safe and `0xf4` should
|
||||
// SAFETY: The write to the ISA debug exit port is safe and `0xf4` should
|
||||
// be the port number.
|
||||
unsafe {
|
||||
port.write(exit_code as u32);
|
||||
|
@ -184,7 +184,7 @@ fn handle_mmio(trapframe: &mut dyn TdxTrapFrame, ve_info: &TdgVeInfo) -> Result<
|
||||
Register::CL => (trapframe.rcx() & 0xFF) as u64,
|
||||
_ => todo!(),
|
||||
};
|
||||
// Safety: The mmio_gpa obtained from `ve_info` is valid, and the value and size parsed from the instruction are valid.
|
||||
// SAFETY: The mmio_gpa obtained from `ve_info` is valid, and the value and size parsed from the instruction are valid.
|
||||
unsafe {
|
||||
write_mmio(size, ve_info.guest_physical_address, value)
|
||||
.map_err(MmioError::TdVmcallError)?
|
||||
@ -192,14 +192,14 @@ fn handle_mmio(trapframe: &mut dyn TdxTrapFrame, ve_info: &TdgVeInfo) -> Result<
|
||||
}
|
||||
InstrMmioType::WriteImm => {
|
||||
let value = instr.immediate(0);
|
||||
// Safety: The mmio_gpa obtained from `ve_info` is valid, and the value and size parsed from the instruction are valid.
|
||||
// SAFETY: The mmio_gpa obtained from `ve_info` is valid, and the value and size parsed from the instruction are valid.
|
||||
unsafe {
|
||||
write_mmio(size, ve_info.guest_physical_address, value)
|
||||
.map_err(MmioError::TdVmcallError)?
|
||||
}
|
||||
}
|
||||
InstrMmioType::Read =>
|
||||
// Safety: The mmio_gpa obtained from `ve_info` is valid, and the size parsed from the instruction is valid.
|
||||
// SAFETY: The mmio_gpa obtained from `ve_info` is valid, and the size parsed from the instruction is valid.
|
||||
unsafe {
|
||||
let read_res = read_mmio(size, ve_info.guest_physical_address)
|
||||
.map_err(MmioError::TdVmcallError)?
|
||||
@ -294,7 +294,7 @@ fn handle_mmio(trapframe: &mut dyn TdxTrapFrame, ve_info: &TdgVeInfo) -> Result<
|
||||
}
|
||||
},
|
||||
InstrMmioType::ReadZeroExtend =>
|
||||
// Safety: The mmio_gpa obtained from `ve_info` is valid, and the size parsed from the instruction is valid.
|
||||
// SAFETY: The mmio_gpa obtained from `ve_info` is valid, and the size parsed from the instruction is valid.
|
||||
unsafe {
|
||||
let read_res = read_mmio(size, ve_info.guest_physical_address)
|
||||
.map_err(MmioError::TdVmcallError)?
|
||||
@ -331,7 +331,7 @@ fn decode_instr(rip: usize) -> Result<Instruction, MmioError> {
|
||||
let code_data = {
|
||||
const MAX_X86_INSTR_LEN: usize = 15;
|
||||
let mut data = [0u8; MAX_X86_INSTR_LEN];
|
||||
// Safety:
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that 'rip' is a valid kernel virtual address before this operation.
|
||||
// We are also ensuring that the size of the data we are copying does not exceed 'MAX_X86_INSTR_LEN'.
|
||||
// Therefore, we are not reading any memory that we shouldn't be, and we are not causing any undefined behavior.
|
||||
|
@ -22,11 +22,11 @@ pub struct MmioCommonDevice {
|
||||
impl MmioCommonDevice {
|
||||
pub(super) fn new(paddr: Paddr, handle: IrqLine) -> Self {
|
||||
// Read magic value
|
||||
// Safety: It only read the value and judge if the magic value fit 0x74726976
|
||||
// SAFETY: It only read the value and judge if the magic value fit 0x74726976
|
||||
unsafe {
|
||||
debug_assert_eq!(*(paddr_to_vaddr(paddr) as *const u32), VIRTIO_MMIO_MAGIC);
|
||||
}
|
||||
// Safety: This range is virtio-mmio device space.
|
||||
// SAFETY: This range is virtio-mmio device space.
|
||||
let io_mem = unsafe { IoMem::new(paddr..paddr + 0x200) };
|
||||
let res = Self {
|
||||
io_mem,
|
||||
|
@ -27,7 +27,7 @@ static IRQS: SpinLock<Vec<IrqLine>> = SpinLock::new(Vec::new());
|
||||
|
||||
pub fn init() {
|
||||
#[cfg(feature = "intel_tdx")]
|
||||
// Safety:
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the address range 0xFEB0_0000 to 0xFEB0_4000 is valid before this operation.
|
||||
// The address range is page-aligned and falls within the MMIO range, which is a requirement for the `unprotect_gpa_range` function.
|
||||
// We are also ensuring that we are only unprotecting four pages.
|
||||
@ -55,10 +55,10 @@ fn iter_range(range: Range<usize>) {
|
||||
let mut device_count = 0;
|
||||
while current > range.start {
|
||||
current -= 0x100;
|
||||
// Safety: It only read the value and judge if the magic value fit 0x74726976
|
||||
// SAFETY: It only read the value and judge if the magic value fit 0x74726976
|
||||
let value = unsafe { *(paddr_to_vaddr(current) as *const u32) };
|
||||
if value == VIRTIO_MMIO_MAGIC {
|
||||
// Safety: It only read the device id
|
||||
// SAFETY: It only read the device id
|
||||
let device_id = unsafe { *(paddr_to_vaddr(current + 8) as *const u32) };
|
||||
device_count += 1;
|
||||
if device_id == 0 {
|
||||
|
@ -96,7 +96,7 @@ impl CapabilityMsixData {
|
||||
// Set message address 0xFEE0_0000
|
||||
for i in 0..table_size {
|
||||
#[cfg(feature = "intel_tdx")]
|
||||
// Safety:
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the physical address of the MSI-X table is valid before this operation.
|
||||
// We are also ensuring that we are only unprotecting a single page.
|
||||
// The MSI-X table will not exceed one page size, because the size of an MSI-X entry is 16 bytes, and 256 entries are required to fill a page,
|
||||
|
@ -203,7 +203,7 @@ impl IoBar {
|
||||
if self.size < size_of::<T>() as u32 || offset > self.size - size_of::<T>() as u32 {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
// Safety: The range of ports accessed is within the scope managed by the IoBar and
|
||||
// SAFETY: The range of ports accessed is within the scope managed by the IoBar and
|
||||
// an out-of-bounds check is performed.
|
||||
unsafe { Ok(T::read_from_port((self.base + offset) as u16)) }
|
||||
}
|
||||
@ -217,7 +217,7 @@ impl IoBar {
|
||||
if size_of::<T>() as u32 > self.size || offset > self.size - size_of::<T>() as u32 {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
// Safety: The range of ports accessed is within the scope managed by the IoBar and
|
||||
// SAFETY: The range of ports accessed is within the scope managed by the IoBar and
|
||||
// an out-of-bounds check is performed.
|
||||
unsafe { T::write_to_port((self.base + offset) as u16, value) }
|
||||
Ok(())
|
||||
|
@ -62,7 +62,7 @@ macro_rules! cpu_local {
|
||||
/// TODO: re-implement `CpuLocal`
|
||||
pub struct CpuLocal<T>(UnsafeCell<T>);
|
||||
|
||||
// Safety. At any given time, only one task can access the inner value T of a cpu-local variable.
|
||||
// SAFETY: At any given time, only one task can access the inner value T of a cpu-local variable.
|
||||
unsafe impl<T> Sync for CpuLocal<T> {}
|
||||
|
||||
impl<T> CpuLocal<T> {
|
||||
@ -82,7 +82,7 @@ impl<T> CpuLocal<T> {
|
||||
// FIXME: implement disable preemption
|
||||
// Disable interrupts when accessing cpu-local variable
|
||||
let _guard = disable_local();
|
||||
// Safety. Now that the local IRQs are disabled, this CPU-local object can only be
|
||||
// SAFETY: Now that the local IRQs are disabled, this CPU-local object can only be
|
||||
// accessed by the current task/thread. So it is safe to get its immutable reference
|
||||
// regardless of whether `T` implements `Sync` or not.
|
||||
let val_ref = unsafe { this.do_borrow() };
|
||||
|
@ -71,7 +71,7 @@ pub fn init() {
|
||||
// TODO: We activate the kernel page table here because the new kernel page table
|
||||
// has mappings for MMIO which is required for the components initialization. We
|
||||
// should refactor the initialization process to avoid this.
|
||||
// Safety: we are activating the unique kernel page table.
|
||||
// SAFETY: we are activating the unique kernel page table.
|
||||
unsafe {
|
||||
vm::kspace::KERNEL_PAGE_TABLE
|
||||
.get()
|
||||
|
@ -48,7 +48,7 @@ impl AtomicBits {
|
||||
assert!(index < self.num_bits);
|
||||
let i = index / 64;
|
||||
let j = index % 64;
|
||||
// Safety. Variable i is in range as variable index is in range.
|
||||
// SAFETY: Variable i is in range as variable index is in range.
|
||||
let u64_atomic = unsafe { self.u64s.get_unchecked(i) };
|
||||
(u64_atomic.load(Relaxed) & 1 << j) != 0
|
||||
}
|
||||
@ -58,7 +58,7 @@ impl AtomicBits {
|
||||
assert!(index < self.num_bits);
|
||||
let i = index / 64;
|
||||
let j = index % 64;
|
||||
// Safety. Variable i is in range as variable index is in range.
|
||||
// SAFETY: Variable i is in range as variable index is in range.
|
||||
let u64_atomic = unsafe { self.u64s.get_unchecked(i) };
|
||||
if new_bit {
|
||||
u64_atomic.fetch_or(1 << j, Relaxed);
|
||||
|
@ -124,7 +124,7 @@ impl<T: ?Sized + fmt::Debug> fmt::Debug for SpinLock<T> {
|
||||
}
|
||||
}
|
||||
|
||||
// Safety. Only a single lock holder is permitted to access the inner data of Spinlock.
|
||||
// SAFETY: Only a single lock holder is permitted to access the inner data of Spinlock.
|
||||
unsafe impl<T: ?Sized + Send> Send for SpinLock<T> {}
|
||||
unsafe impl<T: ?Sized + Send> Sync for SpinLock<T> {}
|
||||
|
||||
@ -170,6 +170,6 @@ impl<T: ?Sized + fmt::Debug, R: Deref<Target = SpinLock<T>>> fmt::Debug for Spin
|
||||
|
||||
impl<T: ?Sized, R: Deref<Target = SpinLock<T>>> !Send for SpinLockGuard_<T, R> {}
|
||||
|
||||
// Safety. `SpinLockGuard_` can be shared between tasks/threads in same CPU.
|
||||
// SAFETY: `SpinLockGuard_` can be shared between tasks/threads in same CPU.
|
||||
// As `lock()` is only called when there are no race conditions caused by interrupts.
|
||||
unsafe impl<T: ?Sized + Sync, R: Deref<Target = SpinLock<T>> + Sync> Sync for SpinLockGuard_<T, R> {}
|
||||
|
@ -68,7 +68,7 @@ impl KernelStack {
|
||||
let guard_page_paddr = stack_segment.start_paddr();
|
||||
crate::vm::paddr_to_vaddr(guard_page_paddr)
|
||||
};
|
||||
// Safety: the segment allocated is not used by others so we can protect it.
|
||||
// SAFETY: the segment allocated is not used by others so we can protect it.
|
||||
unsafe {
|
||||
page_table
|
||||
.protect(&(guard_page_vaddr..guard_page_vaddr + PAGE_SIZE), |p| {
|
||||
@ -96,7 +96,7 @@ impl Drop for KernelStack {
|
||||
let guard_page_paddr = self.segment.start_paddr();
|
||||
crate::vm::paddr_to_vaddr(guard_page_paddr)
|
||||
};
|
||||
// Safety: the segment allocated is not used by others so we can protect it.
|
||||
// SAFETY: the segment allocated is not used by others so we can protect it.
|
||||
unsafe {
|
||||
page_table
|
||||
.protect(&(guard_page_vaddr..guard_page_vaddr + PAGE_SIZE), |p| {
|
||||
|
@ -45,7 +45,7 @@ impl IrqLine {
|
||||
}
|
||||
|
||||
fn new(irq_num: u8) -> Self {
|
||||
// Safety: The IRQ number is allocated through `RecycleAllocator`, and it is guaranteed that the
|
||||
// SAFETY: The IRQ number is allocated through `RecycleAllocator`, and it is guaranteed that the
|
||||
// IRQ is not one of the important IRQ like cpu exception IRQ.
|
||||
Self {
|
||||
irq_num,
|
||||
|
@ -59,7 +59,7 @@ impl DmaCoherent {
|
||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||
let vaddr = paddr_to_vaddr(start_paddr);
|
||||
let va_range = vaddr..vaddr + (frame_count * PAGE_SIZE);
|
||||
// Safety: the physical mappings is only used by DMA so protecting it is safe.
|
||||
// SAFETY: the physical mappings is only used by DMA so protecting it is safe.
|
||||
unsafe {
|
||||
page_table
|
||||
.protect(&va_range, |p| p.cache = CachePolicy::Uncacheable)
|
||||
@ -69,7 +69,7 @@ impl DmaCoherent {
|
||||
let start_daddr = match dma_type() {
|
||||
DmaType::Direct => {
|
||||
#[cfg(feature = "intel_tdx")]
|
||||
// Safety:
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
||||
// The `check_and_insert_dma_mapping` function checks if the physical address range is already mapped.
|
||||
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
|
||||
@ -84,7 +84,7 @@ impl DmaCoherent {
|
||||
DmaType::Iommu => {
|
||||
for i in 0..frame_count {
|
||||
let paddr = start_paddr + (i * PAGE_SIZE);
|
||||
// Safety: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `vm_segment`.
|
||||
// SAFETY: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `vm_segment`.
|
||||
unsafe {
|
||||
iommu::map(paddr as Daddr, paddr).unwrap();
|
||||
}
|
||||
@ -124,7 +124,7 @@ impl Drop for DmaCoherentInner {
|
||||
match dma_type() {
|
||||
DmaType::Direct => {
|
||||
#[cfg(feature = "intel_tdx")]
|
||||
// Safety:
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
||||
// The `start_paddr()` ensures the `start_paddr` is page-aligned.
|
||||
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
|
||||
@ -146,7 +146,7 @@ impl Drop for DmaCoherentInner {
|
||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||
let vaddr = paddr_to_vaddr(start_paddr);
|
||||
let va_range = vaddr..vaddr + (frame_count * PAGE_SIZE);
|
||||
// Safety: the physical mappings is only used by DMA so protecting it is safe.
|
||||
// SAFETY: the physical mappings is only used by DMA so protecting it is safe.
|
||||
unsafe {
|
||||
page_table
|
||||
.protect(&va_range, |p| p.cache = CachePolicy::Writeback)
|
||||
|
@ -64,7 +64,7 @@ impl DmaStream {
|
||||
let start_daddr = match dma_type() {
|
||||
DmaType::Direct => {
|
||||
#[cfg(feature = "intel_tdx")]
|
||||
// Safety:
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
||||
// The `check_and_insert_dma_mapping` function checks if the physical address range is already mapped.
|
||||
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
|
||||
@ -79,7 +79,7 @@ impl DmaStream {
|
||||
DmaType::Iommu => {
|
||||
for i in 0..frame_count {
|
||||
let paddr = start_paddr + (i * PAGE_SIZE);
|
||||
// Safety: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `vm_segment`.
|
||||
// SAFETY: the `paddr` is restricted by the `start_paddr` and `frame_count` of the `vm_segment`.
|
||||
unsafe {
|
||||
iommu::map(paddr as Daddr, paddr).unwrap();
|
||||
}
|
||||
@ -134,7 +134,7 @@ impl DmaStream {
|
||||
let start_va = self.inner.vm_segment.as_ptr();
|
||||
// TODO: Query the CPU for the cache line size via CPUID, we use 64 bytes as the cache line size here.
|
||||
for i in byte_range.step_by(64) {
|
||||
// Safety: the addresses is limited by a valid `byte_range`.
|
||||
// SAFETY: the addresses is limited by a valid `byte_range`.
|
||||
unsafe {
|
||||
_mm_clflush(start_va.wrapping_add(i));
|
||||
}
|
||||
@ -158,7 +158,7 @@ impl Drop for DmaStreamInner {
|
||||
match dma_type() {
|
||||
DmaType::Direct => {
|
||||
#[cfg(feature = "intel_tdx")]
|
||||
// Safety:
|
||||
// SAFETY:
|
||||
// This is safe because we are ensuring that the physical address range specified by `start_paddr` and `frame_count` is valid before these operations.
|
||||
// The `start_paddr()` ensures the `start_paddr` is page-aligned.
|
||||
// We are also ensuring that we are only modifying the page table entries corresponding to the physical address range specified by `start_paddr` and `frame_count`.
|
||||
|
@ -247,7 +247,7 @@ impl VmFrame {
|
||||
return;
|
||||
}
|
||||
|
||||
// Safety: src and dst is not overlapped.
|
||||
// SAFETY: src and dst is not overlapped.
|
||||
unsafe {
|
||||
crate::arch::mm::fast_copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), PAGE_SIZE);
|
||||
}
|
||||
@ -257,13 +257,13 @@ impl VmFrame {
|
||||
impl<'a> VmFrame {
|
||||
/// Returns a reader to read data from it.
|
||||
pub fn reader(&'a self) -> VmReader<'a> {
|
||||
// Safety: the memory of the page is contiguous and is valid during `'a`.
|
||||
// SAFETY: the memory of the page is contiguous and is valid during `'a`.
|
||||
unsafe { VmReader::from_raw_parts(self.as_ptr(), PAGE_SIZE) }
|
||||
}
|
||||
|
||||
/// Returns a writer to write data into it.
|
||||
pub fn writer(&'a self) -> VmWriter<'a> {
|
||||
// Safety: the memory of the page is contiguous and is valid during `'a`.
|
||||
// SAFETY: the memory of the page is contiguous and is valid during `'a`.
|
||||
unsafe { VmWriter::from_raw_parts_mut(self.as_mut_ptr(), PAGE_SIZE) }
|
||||
}
|
||||
}
|
||||
@ -295,7 +295,7 @@ impl VmIo for VmFrame {
|
||||
impl Drop for VmFrame {
|
||||
fn drop(&mut self) {
|
||||
if self.need_dealloc() && Arc::strong_count(&self.frame_index) == 1 {
|
||||
// Safety: the frame index is valid.
|
||||
// SAFETY: the frame index is valid.
|
||||
unsafe {
|
||||
frame_allocator::dealloc_single(self.frame_index());
|
||||
}
|
||||
@ -433,13 +433,13 @@ impl VmSegment {
|
||||
impl<'a> VmSegment {
|
||||
/// Returns a reader to read data from it.
|
||||
pub fn reader(&'a self) -> VmReader<'a> {
|
||||
// Safety: the memory of the page frames is contiguous and is valid during `'a`.
|
||||
// SAFETY: the memory of the page frames is contiguous and is valid during `'a`.
|
||||
unsafe { VmReader::from_raw_parts(self.as_ptr(), self.nbytes()) }
|
||||
}
|
||||
|
||||
/// Returns a writer to write data into it.
|
||||
pub fn writer(&'a self) -> VmWriter<'a> {
|
||||
// Safety: the memory of the page frames is contiguous and is valid during `'a`.
|
||||
// SAFETY: the memory of the page frames is contiguous and is valid during `'a`.
|
||||
unsafe { VmWriter::from_raw_parts_mut(self.as_mut_ptr(), self.nbytes()) }
|
||||
}
|
||||
}
|
||||
@ -471,7 +471,7 @@ impl VmIo for VmSegment {
|
||||
impl Drop for VmSegment {
|
||||
fn drop(&mut self) {
|
||||
if self.need_dealloc() && Arc::strong_count(&self.inner.start_frame_index) == 1 {
|
||||
// Safety: the range of contiguous page frames is valid.
|
||||
// SAFETY: the range of contiguous page frames is valid.
|
||||
unsafe {
|
||||
frame_allocator::dealloc_contiguous(
|
||||
self.inner.start_frame_index(),
|
||||
@ -533,7 +533,7 @@ impl<'a> VmReader<'a> {
|
||||
|
||||
/// Returns the number of bytes for the remaining data.
|
||||
pub const fn remain(&self) -> usize {
|
||||
// Safety: the end is equal to or greater than the cursor.
|
||||
// SAFETY: the end is equal to or greater than the cursor.
|
||||
unsafe { self.end.sub_ptr(self.cursor) }
|
||||
}
|
||||
|
||||
@ -552,7 +552,7 @@ impl<'a> VmReader<'a> {
|
||||
/// This method ensures the postcondition of `self.remain() <= max_remain`.
|
||||
pub const fn limit(mut self, max_remain: usize) -> Self {
|
||||
if max_remain < self.remain() {
|
||||
// Safety: the new end is less than the old end.
|
||||
// SAFETY: the new end is less than the old end.
|
||||
unsafe { self.end = self.cursor.add(max_remain) };
|
||||
}
|
||||
self
|
||||
@ -567,7 +567,7 @@ impl<'a> VmReader<'a> {
|
||||
pub fn skip(mut self, nbytes: usize) -> Self {
|
||||
assert!(nbytes <= self.remain());
|
||||
|
||||
// Safety: the new cursor is less than or equal to the end.
|
||||
// SAFETY: the new cursor is less than or equal to the end.
|
||||
unsafe { self.cursor = self.cursor.add(nbytes) };
|
||||
self
|
||||
}
|
||||
@ -586,7 +586,7 @@ impl<'a> VmReader<'a> {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Safety: the memory range is valid since `copy_len` is the minimum
|
||||
// SAFETY: the memory range is valid since `copy_len` is the minimum
|
||||
// of the reader's remaining data and the writer's available space.
|
||||
unsafe {
|
||||
crate::arch::mm::fast_copy(self.cursor, writer.cursor, copy_len);
|
||||
@ -614,7 +614,7 @@ impl<'a> VmReader<'a> {
|
||||
|
||||
impl<'a> From<&'a [u8]> for VmReader<'a> {
|
||||
fn from(slice: &'a [u8]) -> Self {
|
||||
// Safety: the range of memory is contiguous and is valid during `'a`.
|
||||
// SAFETY: the range of memory is contiguous and is valid during `'a`.
|
||||
unsafe { Self::from_raw_parts(slice.as_ptr(), slice.len()) }
|
||||
}
|
||||
}
|
||||
@ -658,7 +658,7 @@ impl<'a> VmWriter<'a> {
|
||||
|
||||
/// Returns the number of bytes for the available space.
|
||||
pub const fn avail(&self) -> usize {
|
||||
// Safety: the end is equal to or greater than the cursor.
|
||||
// SAFETY: the end is equal to or greater than the cursor.
|
||||
unsafe { self.end.sub_ptr(self.cursor) }
|
||||
}
|
||||
|
||||
@ -677,7 +677,7 @@ impl<'a> VmWriter<'a> {
|
||||
/// This method ensures the postcondition of `self.avail() <= max_avail`.
|
||||
pub const fn limit(mut self, max_avail: usize) -> Self {
|
||||
if max_avail < self.avail() {
|
||||
// Safety: the new end is less than the old end.
|
||||
// SAFETY: the new end is less than the old end.
|
||||
unsafe { self.end = self.cursor.add(max_avail) };
|
||||
}
|
||||
self
|
||||
@ -692,7 +692,7 @@ impl<'a> VmWriter<'a> {
|
||||
pub fn skip(mut self, nbytes: usize) -> Self {
|
||||
assert!(nbytes <= self.avail());
|
||||
|
||||
// Safety: the new cursor is less than or equal to the end.
|
||||
// SAFETY: the new cursor is less than or equal to the end.
|
||||
unsafe { self.cursor = self.cursor.add(nbytes) };
|
||||
self
|
||||
}
|
||||
@ -711,7 +711,7 @@ impl<'a> VmWriter<'a> {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Safety: the memory range is valid since `copy_len` is the minimum
|
||||
// SAFETY: the memory range is valid since `copy_len` is the minimum
|
||||
// of the reader's remaining data and the writer's available space.
|
||||
unsafe {
|
||||
crate::arch::mm::fast_copy(reader.cursor, self.cursor, copy_len);
|
||||
@ -738,7 +738,7 @@ impl<'a> VmWriter<'a> {
|
||||
let written_num = avail / core::mem::size_of::<T>();
|
||||
|
||||
for i in 0..written_num {
|
||||
// Safety: `written_num` is calculated by the avail size and the size of the type `T`,
|
||||
// SAFETY: `written_num` is calculated by the avail size and the size of the type `T`,
|
||||
// hence the `add` operation and `write` operation are valid and will only manipulate
|
||||
// the memory managed by this writer.
|
||||
unsafe {
|
||||
@ -754,7 +754,7 @@ impl<'a> VmWriter<'a> {
|
||||
|
||||
impl<'a> From<&'a mut [u8]> for VmWriter<'a> {
|
||||
fn from(slice: &'a mut [u8]) -> Self {
|
||||
// Safety: the range of memory is contiguous and is valid during `'a`.
|
||||
// SAFETY: the range of memory is contiguous and is valid during `'a`.
|
||||
unsafe { Self::from_raw_parts_mut(slice.as_mut_ptr(), slice.len()) }
|
||||
}
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ pub(crate) fn alloc(nframes: usize, flags: VmFrameFlags) -> Option<VmFrameVec> {
|
||||
.alloc(nframes)
|
||||
.map(|start| {
|
||||
let mut vector = Vec::new();
|
||||
// Safety: The frame index is valid.
|
||||
// SAFETY: The frame index is valid.
|
||||
unsafe {
|
||||
for i in 0..nframes {
|
||||
let frame = VmFrame::new(
|
||||
@ -40,7 +40,7 @@ pub(crate) fn alloc(nframes: usize, flags: VmFrameFlags) -> Option<VmFrameVec> {
|
||||
|
||||
pub(crate) fn alloc_single(flags: VmFrameFlags) -> Option<VmFrame> {
|
||||
FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).map(|idx|
|
||||
// Safety: The frame index is valid.
|
||||
// SAFETY: The frame index is valid.
|
||||
unsafe { VmFrame::new(idx * PAGE_SIZE, flags.union(VmFrameFlags::NEED_DEALLOC)) })
|
||||
}
|
||||
|
||||
@ -51,7 +51,7 @@ pub(crate) fn alloc_contiguous(nframes: usize, flags: VmFrameFlags) -> Option<Vm
|
||||
.lock()
|
||||
.alloc(nframes)
|
||||
.map(|start|
|
||||
// Safety: The range of page frames is contiguous and valid.
|
||||
// SAFETY: The range of page frames is contiguous and valid.
|
||||
unsafe {
|
||||
VmSegment::new(
|
||||
start * PAGE_SIZE,
|
||||
|
@ -31,7 +31,7 @@ const INIT_KERNEL_HEAP_SIZE: usize = PAGE_SIZE * 256;
|
||||
static mut HEAP_SPACE: [u8; INIT_KERNEL_HEAP_SIZE] = [0; INIT_KERNEL_HEAP_SIZE];
|
||||
|
||||
pub fn init() {
|
||||
// Safety: The HEAP_SPACE is a static memory range, so it's always valid.
|
||||
// SAFETY: The HEAP_SPACE is a static memory range, so it's always valid.
|
||||
unsafe {
|
||||
HEAP_ALLOCATOR.init(HEAP_SPACE.as_ptr(), INIT_KERNEL_HEAP_SIZE);
|
||||
}
|
||||
@ -51,12 +51,12 @@ impl<const ORDER: usize> LockedHeapWithRescue<ORDER> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Safety: The range [start, start + size) must be a valid memory region.
|
||||
/// SAFETY: The range [start, start + size) must be a valid memory region.
|
||||
pub unsafe fn init(&self, start: *const u8, size: usize) {
|
||||
self.heap.lock_irq_disabled().init(start as usize, size);
|
||||
}
|
||||
|
||||
/// Safety: The range [start, start + size) must be a valid memory region.
|
||||
/// SAFETY: The range [start, start + size) must be a valid memory region.
|
||||
unsafe fn add_to_heap(&self, start: usize, size: usize) {
|
||||
self.heap
|
||||
.lock_irq_disabled()
|
||||
@ -122,7 +122,7 @@ fn rescue<const ORDER: usize>(heap: &LockedHeapWithRescue<ORDER>, layout: &Layou
|
||||
// So if the heap is nearly run out, allocating frame will fail too.
|
||||
let vaddr = paddr_to_vaddr(allocation_start * PAGE_SIZE);
|
||||
|
||||
// Safety: the frame is allocated from FramAllocator and never be deallocated,
|
||||
// SAFETY: the frame is allocated from FramAllocator and never be deallocated,
|
||||
// so the addr is always valid.
|
||||
unsafe {
|
||||
debug!(
|
||||
|
@ -91,7 +91,7 @@ pub fn init_kernel_page_table() {
|
||||
cache: CachePolicy::Writeback,
|
||||
priv_flags: PrivilegedPageFlags::GLOBAL,
|
||||
};
|
||||
// Safety: we are doing the linear mapping for the kernel.
|
||||
// SAFETY: we are doing the linear mapping for the kernel.
|
||||
unsafe {
|
||||
kpt.map(&from, &to, prop).unwrap();
|
||||
}
|
||||
@ -108,7 +108,7 @@ pub fn init_kernel_page_table() {
|
||||
cache: CachePolicy::Uncacheable,
|
||||
priv_flags: PrivilegedPageFlags::GLOBAL,
|
||||
};
|
||||
// Safety: we are doing I/O mappings for the kernel.
|
||||
// SAFETY: we are doing I/O mappings for the kernel.
|
||||
unsafe {
|
||||
kpt.map(&from, &to, prop).unwrap();
|
||||
}
|
||||
@ -130,7 +130,7 @@ pub fn init_kernel_page_table() {
|
||||
cache: CachePolicy::Writeback,
|
||||
priv_flags: PrivilegedPageFlags::GLOBAL,
|
||||
};
|
||||
// Safety: we are doing mappings for the kernel.
|
||||
// SAFETY: we are doing mappings for the kernel.
|
||||
unsafe {
|
||||
kpt.map(&from, &to, prop).unwrap();
|
||||
}
|
||||
|
@ -15,7 +15,7 @@
|
||||
#[macro_export]
|
||||
macro_rules! offset_of {
|
||||
($container:ty, $($field:tt)+) => ({
|
||||
// SAFETY. It is ok to have this uninitialized value because
|
||||
// SAFETY: It is ok to have this uninitialized value because
|
||||
// 1) Its memory won't be acccessed;
|
||||
// 2) It will be forgotten rather than being dropped;
|
||||
// 3) Before it gets forgotten, the code won't return prematurely or panic.
|
||||
|
@ -158,7 +158,7 @@ where
|
||||
huge: bool,
|
||||
) {
|
||||
assert!(idx < nr_ptes_per_node::<C>());
|
||||
// Safety: the index is within the bound and the PTE to be written is valid.
|
||||
// SAFETY: the index is within the bound and the PTE to be written is valid.
|
||||
// And the physical address of PTE points to initialized memory.
|
||||
// This applies to all the following `write_pte` invocations.
|
||||
unsafe {
|
||||
@ -196,7 +196,7 @@ where
|
||||
pub(super) fn protect(&mut self, idx: usize, prop: PageProperty, level: usize) {
|
||||
debug_assert!(self.children[idx].is_some());
|
||||
let paddr = self.children[idx].paddr().unwrap();
|
||||
// Safety: the index is within the bound and the PTE is valid.
|
||||
// SAFETY: the index is within the bound and the PTE is valid.
|
||||
unsafe {
|
||||
self.write_pte(
|
||||
idx,
|
||||
@ -207,7 +207,7 @@ where
|
||||
|
||||
fn read_pte(&self, idx: usize) -> E {
|
||||
assert!(idx < nr_ptes_per_node::<C>());
|
||||
// Safety: the index is within the bound and PTE is plain-old-data.
|
||||
// SAFETY: the index is within the bound and PTE is plain-old-data.
|
||||
unsafe { (self.inner.as_ptr() as *const E).add(idx).read() }
|
||||
}
|
||||
|
||||
|
@ -105,7 +105,7 @@ where
|
||||
[(); C::NR_LEVELS]:,
|
||||
{
|
||||
pub(crate) fn activate(&self) {
|
||||
// Safety: The usermode page table is safe to activate since the kernel
|
||||
// SAFETY: The usermode page table is safe to activate since the kernel
|
||||
// mappings are shared.
|
||||
unsafe {
|
||||
self.activate_unchecked();
|
||||
@ -118,7 +118,7 @@ where
|
||||
/// TODO: We may consider making the page table itself copy-on-write.
|
||||
pub(crate) fn fork_copy_on_write(&self) -> Self {
|
||||
let mut cursor = self.cursor_mut(&UserMode::VADDR_RANGE).unwrap();
|
||||
// Safety: Protecting the user page table is safe.
|
||||
// SAFETY: Protecting the user page table is safe.
|
||||
unsafe {
|
||||
cursor
|
||||
.protect(
|
||||
@ -276,7 +276,7 @@ where
|
||||
/// cursors concurrently accessing the same virtual address range, just like what
|
||||
/// happens for the hardware MMU walk.
|
||||
pub(crate) fn query(&self, vaddr: Vaddr) -> Option<(Paddr, PageProperty)> {
|
||||
// Safety: The root frame is a valid page table frame so the address is valid.
|
||||
// SAFETY: The root frame is a valid page table frame so the address is valid.
|
||||
unsafe { page_walk::<E, C>(self.root_paddr(), vaddr) }
|
||||
}
|
||||
|
||||
@ -361,7 +361,7 @@ pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>(
|
||||
let mut cur_pte = {
|
||||
let frame_addr = paddr_to_vaddr(root_paddr);
|
||||
let offset = pte_index::<C>(vaddr, cur_level);
|
||||
// Safety: The offset does not exceed the value of PAGE_SIZE.
|
||||
// SAFETY: The offset does not exceed the value of PAGE_SIZE.
|
||||
unsafe { (frame_addr as *const E).add(offset).read() }
|
||||
};
|
||||
|
||||
@ -377,7 +377,7 @@ pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>(
|
||||
cur_pte = {
|
||||
let frame_addr = paddr_to_vaddr(cur_pte.paddr());
|
||||
let offset = pte_index::<C>(vaddr, cur_level);
|
||||
// Safety: The offset does not exceed the value of PAGE_SIZE.
|
||||
// SAFETY: The offset does not exceed the value of PAGE_SIZE.
|
||||
unsafe { (frame_addr as *const E).add(offset).read() }
|
||||
};
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ impl VmSpace {
|
||||
};
|
||||
|
||||
for frame in frames.into_iter() {
|
||||
// Safety: mapping in the user space with `VmFrame` is safe.
|
||||
// SAFETY: mapping in the user space with `VmFrame` is safe.
|
||||
unsafe {
|
||||
cursor.map(frame, prop);
|
||||
}
|
||||
@ -132,7 +132,7 @@ impl VmSpace {
|
||||
if !UserMode::covers(range) {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
// Safety: unmapping in the user space is safe.
|
||||
// SAFETY: unmapping in the user space is safe.
|
||||
unsafe {
|
||||
self.pt.unmap(range)?;
|
||||
}
|
||||
@ -141,7 +141,7 @@ impl VmSpace {
|
||||
|
||||
/// clear all mappings
|
||||
pub fn clear(&self) {
|
||||
// Safety: unmapping user space is safe, and we don't care unmapping
|
||||
// SAFETY: unmapping user space is safe, and we don't care unmapping
|
||||
// invalid ranges.
|
||||
unsafe {
|
||||
self.pt.unmap(&(0..MAX_USERSPACE_VADDR)).unwrap();
|
||||
@ -169,7 +169,7 @@ impl VmSpace {
|
||||
if !UserMode::covers(range) {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
// Safety: protecting in the user space is safe.
|
||||
// SAFETY: protecting in the user space is safe.
|
||||
unsafe {
|
||||
self.pt.protect(range, op)?;
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ macro_rules! ktest_array {
|
||||
}
|
||||
let item_size = core::mem::size_of::<KtestItem>();
|
||||
let l = (__ktest_array_end as usize - __ktest_array as usize) / item_size;
|
||||
// Safety: __ktest_array is a static section consisting of KtestItem.
|
||||
// SAFETY: __ktest_array is a static section consisting of KtestItem.
|
||||
unsafe { core::slice::from_raw_parts(__ktest_array as *const KtestItem, l) }
|
||||
}};
|
||||
}
|
||||
|
@ -12,13 +12,13 @@ static mut STDOUT: Stdout = Stdout {
|
||||
serial_port: unsafe { SerialPort::new(0x0) },
|
||||
};
|
||||
|
||||
/// safety: this function must only be called once
|
||||
/// SAFETY: this function must only be called once
|
||||
pub unsafe fn init() {
|
||||
STDOUT = Stdout::init();
|
||||
}
|
||||
|
||||
impl Stdout {
|
||||
/// safety: this function must only be called once
|
||||
/// SAFETY: this function must only be called once
|
||||
pub unsafe fn init() -> Self {
|
||||
let mut serial_port = unsafe { SerialPort::new(0x3F8) };
|
||||
serial_port.init();
|
||||
@ -35,7 +35,7 @@ impl Write for Stdout {
|
||||
|
||||
/// This is used when dyn Trait is not supported or fmt::Arguments is fragile to use in PIE.
|
||||
///
|
||||
/// Safety: init() must be called before print_str() and there should be no race conditions.
|
||||
/// SAFETY: init() must be called before print_str() and there should be no race conditions.
|
||||
pub unsafe fn print_str(s: &str) {
|
||||
STDOUT.write_str(s).unwrap();
|
||||
}
|
||||
@ -46,7 +46,7 @@ unsafe fn print_char(c: char) {
|
||||
|
||||
/// This is used when dyn Trait is not supported or fmt::Arguments is fragile to use in PIE.
|
||||
///
|
||||
/// Safety: init() must be called before print_hex() and there should be no race conditions.
|
||||
/// SAFETY: init() must be called before print_hex() and there should be no race conditions.
|
||||
pub unsafe fn print_hex(n: u64) {
|
||||
print_str("0x");
|
||||
for i in (0..16).rev() {
|
||||
@ -65,7 +65,7 @@ pub unsafe fn print_hex(n: u64) {
|
||||
|
||||
/// Glue code for print!() and println!() macros.
|
||||
///
|
||||
/// Safety: init() must be called before print_fmt() and there should be no race conditions.
|
||||
/// SAFETY: init() must be called before print_fmt() and there should be no race conditions.
|
||||
pub unsafe fn print_fmt(args: fmt::Arguments) {
|
||||
STDOUT.write_fmt(args).unwrap();
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ fn load_segment(file: &xmas_elf::ElfFile, program: &xmas_elf::program::ProgramHe
|
||||
let SegmentData::Undefined(header_data) = program.get_data(file).unwrap() else {
|
||||
panic!("[setup] Unexpected segment data type!");
|
||||
};
|
||||
// Safety: the physical address from the ELF file is valid
|
||||
// SAFETY: the physical address from the ELF file is valid
|
||||
let dst_slice = unsafe {
|
||||
core::slice::from_raw_parts_mut(program.physical_addr as *mut u8, program.mem_size as usize)
|
||||
};
|
||||
@ -40,7 +40,7 @@ fn load_segment(file: &xmas_elf::ElfFile, program: &xmas_elf::program::ProgramHe
|
||||
print_hex(program.mem_size as u64);
|
||||
print_str("\n");
|
||||
}
|
||||
// Safety: the ELF file is valid
|
||||
// SAFETY: the ELF file is valid
|
||||
// dst_slice[..program.file_size as usize].copy_from_slice(header_data);
|
||||
unsafe {
|
||||
memcpy(
|
||||
|
@ -38,7 +38,7 @@ fn get_payload(boot_params: &BootParams) -> &'static [u8] {
|
||||
let loaded_offset = x86::get_image_loaded_offset();
|
||||
let payload_offset = (loaded_offset + hdr.payload_offset as isize) as usize;
|
||||
let payload_length = hdr.payload_length as usize;
|
||||
// Safety: the payload_offset and payload_length is valid if we assume that the
|
||||
// SAFETY: the payload_offset and payload_length is valid if we assume that the
|
||||
// boot_params struct is correct.
|
||||
unsafe { core::slice::from_raw_parts_mut(payload_offset as *mut u8, payload_length) }
|
||||
}
|
||||
|
@ -47,10 +47,10 @@ fn efi_phase_boot(
|
||||
system_table: SystemTable<Boot>,
|
||||
boot_params_ptr: *mut BootParams,
|
||||
) -> ! {
|
||||
// Safety: this init function is only called once.
|
||||
// SAFETY: this init function is only called once.
|
||||
unsafe { crate::console::init() };
|
||||
|
||||
// Safety: this is the right time to apply relocations.
|
||||
// SAFETY: this is the right time to apply relocations.
|
||||
unsafe { apply_rela_dyn_relocations() };
|
||||
|
||||
uefi_services::println!("[EFI stub] Relocations applied.");
|
||||
|
@ -43,7 +43,7 @@ fn get_rela_array() -> &'static [Elf64Rela] {
|
||||
print_hex(end as u64);
|
||||
print_str("\n");
|
||||
}
|
||||
// Safety: the linker will ensure that the symbols are valid.
|
||||
// SAFETY: the linker will ensure that the symbols are valid.
|
||||
unsafe { core::slice::from_raw_parts(start, len) }
|
||||
}
|
||||
|
||||
|
@ -14,7 +14,7 @@ pub const ASTER_ENTRY_POINT: u32 = 0x8001000;
|
||||
|
||||
#[export_name = "_bzimage_entry_32"]
|
||||
extern "cdecl" fn bzimage_entry(boot_params_ptr: u32) -> ! {
|
||||
// Safety: this init function is only called once.
|
||||
// SAFETY: this init function is only called once.
|
||||
unsafe { crate::console::init() };
|
||||
|
||||
// println!("[setup] bzImage loaded at {:#x}", x86::relocation::get_image_loaded_offset());
|
||||
@ -24,13 +24,13 @@ extern "cdecl" fn bzimage_entry(boot_params_ptr: u32) -> ! {
|
||||
print_str("\n");
|
||||
}
|
||||
|
||||
// Safety: the boot_params_ptr is a valid pointer to be borrowed.
|
||||
// SAFETY: the boot_params_ptr is a valid pointer to be borrowed.
|
||||
let boot_params = unsafe { &*(boot_params_ptr as *const BootParams) };
|
||||
// Safety: the payload_offset and payload_length is valid.
|
||||
// SAFETY: the payload_offset and payload_length is valid.
|
||||
let payload = crate::get_payload(boot_params);
|
||||
crate::loader::load_elf(payload);
|
||||
|
||||
// Safety: the entrypoint and the ptr is valid.
|
||||
// SAFETY: the entrypoint and the ptr is valid.
|
||||
unsafe { call_aster_entrypoint(ASTER_ENTRY_POINT, boot_params_ptr.try_into().unwrap()) };
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user