mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-15 08:16:47 +00:00
Rename various concepts related to memory management
This commit is contained in:
parent
03a39c94ca
commit
14e1b1a9fc
@ -75,7 +75,7 @@ fn create_user_space(program: &[u8]) -> UserSpace {
|
|||||||
let nframes = program.len().align_up(PAGE_SIZE) / PAGE_SIZE;
|
let nframes = program.len().align_up(PAGE_SIZE) / PAGE_SIZE;
|
||||||
let vm_frames = VmAllocOptions::new(nframes).alloc().unwrap();
|
let vm_frames = VmAllocOptions::new(nframes).alloc().unwrap();
|
||||||
// Phyiscal memory pages can be only accessed
|
// Phyiscal memory pages can be only accessed
|
||||||
// via the VmFrame abstraction.
|
// via the Frame abstraction.
|
||||||
vm_frames.write_bytes(0, program).unwrap();
|
vm_frames.write_bytes(0, program).unwrap();
|
||||||
vm_frames
|
vm_frames
|
||||||
};
|
};
|
||||||
|
@ -15,7 +15,7 @@ use crate::{
|
|||||||
memory_region::{non_overlapping_regions_from, MemoryRegion, MemoryRegionType},
|
memory_region::{non_overlapping_regions_from, MemoryRegion, MemoryRegionType},
|
||||||
BootloaderAcpiArg, BootloaderFramebufferArg,
|
BootloaderAcpiArg, BootloaderFramebufferArg,
|
||||||
},
|
},
|
||||||
vm::kspace::{paddr_to_vaddr, LINEAR_MAPPING_BASE_VADDR},
|
mm::kspace::{paddr_to_vaddr, LINEAR_MAPPING_BASE_VADDR},
|
||||||
};
|
};
|
||||||
|
|
||||||
static BOOT_PARAMS: Once<BootParams> = Once::new();
|
static BOOT_PARAMS: Once<BootParams> = Once::new();
|
||||||
|
@ -12,7 +12,7 @@ use crate::{
|
|||||||
memory_region::{non_overlapping_regions_from, MemoryRegion, MemoryRegionType},
|
memory_region::{non_overlapping_regions_from, MemoryRegion, MemoryRegionType},
|
||||||
BootloaderAcpiArg, BootloaderFramebufferArg,
|
BootloaderAcpiArg, BootloaderFramebufferArg,
|
||||||
},
|
},
|
||||||
vm::kspace::{paddr_to_vaddr, LINEAR_MAPPING_BASE_VADDR},
|
mm::kspace::{paddr_to_vaddr, LINEAR_MAPPING_BASE_VADDR},
|
||||||
};
|
};
|
||||||
|
|
||||||
global_asm!(include_str!("header.S"));
|
global_asm!(include_str!("header.S"));
|
||||||
|
@ -15,7 +15,7 @@ use crate::{
|
|||||||
memory_region::{non_overlapping_regions_from, MemoryRegion, MemoryRegionType},
|
memory_region::{non_overlapping_regions_from, MemoryRegion, MemoryRegionType},
|
||||||
BootloaderAcpiArg, BootloaderFramebufferArg,
|
BootloaderAcpiArg, BootloaderFramebufferArg,
|
||||||
},
|
},
|
||||||
vm::kspace::paddr_to_vaddr,
|
mm::kspace::paddr_to_vaddr,
|
||||||
};
|
};
|
||||||
|
|
||||||
global_asm!(include_str!("header.S"));
|
global_asm!(include_str!("header.S"));
|
||||||
|
@ -9,11 +9,11 @@ use pod::Pod;
|
|||||||
use super::second_stage::{DeviceMode, PageTableEntry, PagingConsts};
|
use super::second_stage::{DeviceMode, PageTableEntry, PagingConsts};
|
||||||
use crate::{
|
use crate::{
|
||||||
bus::pci::PciDeviceLocation,
|
bus::pci::PciDeviceLocation,
|
||||||
vm::{
|
mm::{
|
||||||
dma::Daddr,
|
dma::Daddr,
|
||||||
page_prop::{CachePolicy, PageProperty, PrivilegedPageFlags as PrivFlags},
|
page_prop::{CachePolicy, PageProperty, PrivilegedPageFlags as PrivFlags},
|
||||||
page_table::PageTableError,
|
page_table::PageTableError,
|
||||||
Paddr, PageFlags, PageTable, VmAllocOptions, VmFrame, VmIo, PAGE_SIZE,
|
Frame, Paddr, PageFlags, PageTable, VmAllocOptions, VmIo, PAGE_SIZE,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -36,7 +36,7 @@ impl RootEntry {
|
|||||||
|
|
||||||
pub struct RootTable {
|
pub struct RootTable {
|
||||||
/// Total 256 bus, each entry is 128 bits.
|
/// Total 256 bus, each entry is 128 bits.
|
||||||
root_frame: VmFrame,
|
root_frame: Frame,
|
||||||
// TODO: Use radix tree instead.
|
// TODO: Use radix tree instead.
|
||||||
context_tables: BTreeMap<Paddr, ContextTable>,
|
context_tables: BTreeMap<Paddr, ContextTable>,
|
||||||
}
|
}
|
||||||
@ -233,7 +233,7 @@ pub enum AddressWidth {
|
|||||||
|
|
||||||
pub struct ContextTable {
|
pub struct ContextTable {
|
||||||
/// Total 32 devices, each device has 8 functions.
|
/// Total 32 devices, each device has 8 functions.
|
||||||
entries_frame: VmFrame,
|
entries_frame: Frame,
|
||||||
page_tables: BTreeMap<Paddr, PageTable<DeviceMode, PageTableEntry, PagingConsts>>,
|
page_tables: BTreeMap<Paddr, PageTable<DeviceMode, PageTableEntry, PagingConsts>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ use trapframe::TrapFrame;
|
|||||||
use volatile::{access::ReadWrite, Volatile};
|
use volatile::{access::ReadWrite, Volatile};
|
||||||
|
|
||||||
use super::remapping::Capability;
|
use super::remapping::Capability;
|
||||||
use crate::{trap::IrqLine, vm::Vaddr};
|
use crate::{mm::Vaddr, trap::IrqLine};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct FaultEventRegisters {
|
pub struct FaultEventRegisters {
|
||||||
|
@ -13,8 +13,8 @@ use spin::Once;
|
|||||||
use crate::{
|
use crate::{
|
||||||
arch::iommu::context_table::RootTable,
|
arch::iommu::context_table::RootTable,
|
||||||
bus::pci::PciDeviceLocation,
|
bus::pci::PciDeviceLocation,
|
||||||
|
mm::{dma::Daddr, page_table::PageTableError, Paddr, PageTable},
|
||||||
sync::Mutex,
|
sync::Mutex,
|
||||||
vm::{dma::Daddr, page_table::PageTableError, Paddr, PageTable},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -17,7 +17,7 @@ use crate::{
|
|||||||
ACPI_TABLES,
|
ACPI_TABLES,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
vm::paddr_to_vaddr,
|
mm::paddr_to_vaddr,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -4,7 +4,7 @@ use core::ops::Range;
|
|||||||
|
|
||||||
use pod::Pod;
|
use pod::Pod;
|
||||||
|
|
||||||
use crate::vm::{
|
use crate::mm::{
|
||||||
page_prop::{CachePolicy, PageFlags, PrivilegedPageFlags as PrivFlags},
|
page_prop::{CachePolicy, PageFlags, PrivilegedPageFlags as PrivFlags},
|
||||||
page_table::{PageTableEntryTrait, PageTableMode},
|
page_table::{PageTableEntryTrait, PageTableMode},
|
||||||
Paddr, PageProperty, PagingConstsTrait, PagingLevel, Vaddr,
|
Paddr, PageProperty, PagingConstsTrait, PagingLevel, Vaddr,
|
||||||
|
@ -9,7 +9,7 @@ use super::{
|
|||||||
remapping::{Andd, Atsr, Drhd, Rhsa, Rmrr, Satc, Sidp},
|
remapping::{Andd, Atsr, Drhd, Rhsa, Rmrr, Satc, Sidp},
|
||||||
SdtHeaderWrapper,
|
SdtHeaderWrapper,
|
||||||
};
|
};
|
||||||
use crate::vm::paddr_to_vaddr;
|
use crate::mm::paddr_to_vaddr;
|
||||||
|
|
||||||
/// DMA Remapping structure. When IOMMU is enabled, the structure should be present in the ACPI table,
|
/// DMA Remapping structure. When IOMMU is enabled, the structure should be present in the ACPI table,
|
||||||
/// and the user can use the DRHD table in this structure to obtain the register base addresses used to configure functions such as IOMMU.
|
/// and the user can use the DRHD table in this structure to obtain the register base addresses used to configure functions such as IOMMU.
|
||||||
|
@ -15,8 +15,8 @@ use spin::Once;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
boot::{self, BootloaderAcpiArg},
|
boot::{self, BootloaderAcpiArg},
|
||||||
|
mm::paddr_to_vaddr,
|
||||||
sync::SpinLock,
|
sync::SpinLock,
|
||||||
vm::paddr_to_vaddr,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// RSDP information, key is the signature, value is the virtual address of the signature
|
/// RSDP information, key is the signature, value is the virtual address of the signature
|
||||||
|
@ -12,7 +12,7 @@ use spin::Once;
|
|||||||
#[cfg(feature = "intel_tdx")]
|
#[cfg(feature = "intel_tdx")]
|
||||||
use crate::arch::tdx_guest;
|
use crate::arch::tdx_guest;
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::x86::kernel::acpi::ACPI_TABLES, sync::SpinLock, trap::IrqLine, vm::paddr_to_vaddr, Error,
|
arch::x86::kernel::acpi::ACPI_TABLES, mm::paddr_to_vaddr, sync::SpinLock, trap::IrqLine, Error,
|
||||||
Result,
|
Result,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ use spin::Once;
|
|||||||
use x86::apic::xapic;
|
use x86::apic::xapic;
|
||||||
|
|
||||||
use super::ApicTimer;
|
use super::ApicTimer;
|
||||||
use crate::{sync::Mutex, vm};
|
use crate::{mm, sync::Mutex};
|
||||||
|
|
||||||
const IA32_APIC_BASE_MSR: u32 = 0x1B;
|
const IA32_APIC_BASE_MSR: u32 = 0x1B;
|
||||||
const IA32_APIC_BASE_MSR_BSP: u32 = 0x100; // Processor is a BSP
|
const IA32_APIC_BASE_MSR_BSP: u32 = 0x100; // Processor is a BSP
|
||||||
@ -24,7 +24,7 @@ impl XApic {
|
|||||||
if !Self::has_xapic() {
|
if !Self::has_xapic() {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
let address = vm::paddr_to_vaddr(get_apic_base_address());
|
let address = mm::paddr_to_vaddr(get_apic_base_address());
|
||||||
let region: &'static mut [u32] = unsafe { &mut *(address as *mut [u32; 256]) };
|
let region: &'static mut [u32] = unsafe { &mut *(address as *mut [u32; 256]) };
|
||||||
Some(Self {
|
Some(Self {
|
||||||
mmio_region: region,
|
mmio_region: region,
|
||||||
|
@ -6,7 +6,7 @@ use core::ops::Range;
|
|||||||
use pod::Pod;
|
use pod::Pod;
|
||||||
use x86_64::{instructions::tlb, structures::paging::PhysFrame, VirtAddr};
|
use x86_64::{instructions::tlb, structures::paging::PhysFrame, VirtAddr};
|
||||||
|
|
||||||
use crate::vm::{
|
use crate::mm::{
|
||||||
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags as PrivFlags},
|
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags as PrivFlags},
|
||||||
page_table::PageTableEntryTrait,
|
page_table::PageTableEntryTrait,
|
||||||
Paddr, PagingConstsTrait, PagingLevel, Vaddr, PAGE_SIZE,
|
Paddr, PagingConstsTrait, PagingLevel, Vaddr, PAGE_SIZE,
|
||||||
|
@ -13,7 +13,7 @@ use trapframe::TrapFrame;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::mm::PageTableFlags,
|
arch::mm::PageTableFlags,
|
||||||
vm::{
|
mm::{
|
||||||
kspace::KERNEL_PAGE_TABLE,
|
kspace::KERNEL_PAGE_TABLE,
|
||||||
paddr_to_vaddr,
|
paddr_to_vaddr,
|
||||||
page_prop::{CachePolicy, PageProperty, PrivilegedPageFlags as PrivFlags},
|
page_prop::{CachePolicy, PageProperty, PrivilegedPageFlags as PrivFlags},
|
||||||
|
@ -11,8 +11,8 @@ use volatile::{
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::x86::kernel::{acpi::ACPI_TABLES, apic::ioapic},
|
arch::x86::kernel::{acpi::ACPI_TABLES, apic::ioapic},
|
||||||
|
mm::paddr_to_vaddr,
|
||||||
trap::IrqLine,
|
trap::IrqLine,
|
||||||
vm::paddr_to_vaddr,
|
|
||||||
};
|
};
|
||||||
static HPET_INSTANCE: Once<Hpet> = Once::new();
|
static HPET_INSTANCE: Once<Hpet> = Once::new();
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
use alloc::{vec, vec::Vec};
|
use alloc::{vec, vec::Vec};
|
||||||
use core::mem::swap;
|
use core::mem::swap;
|
||||||
|
|
||||||
use crate::vm::kspace::kernel_loaded_offset;
|
use crate::mm::kspace::kernel_loaded_offset;
|
||||||
|
|
||||||
/// The type of initial memory regions that are needed for the kernel.
|
/// The type of initial memory regions that are needed for the kernel.
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
|
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
|
||||||
|
@ -6,8 +6,8 @@ use log::info;
|
|||||||
use super::VIRTIO_MMIO_MAGIC;
|
use super::VIRTIO_MMIO_MAGIC;
|
||||||
use crate::{
|
use crate::{
|
||||||
io_mem::IoMem,
|
io_mem::IoMem,
|
||||||
|
mm::{paddr_to_vaddr, Paddr, VmIo},
|
||||||
trap::IrqLine,
|
trap::IrqLine,
|
||||||
vm::{paddr_to_vaddr, Paddr, VmIo},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// MMIO Common device.
|
/// MMIO Common device.
|
||||||
|
@ -16,8 +16,8 @@ use self::bus::MmioBus;
|
|||||||
#[cfg(feature = "intel_tdx")]
|
#[cfg(feature = "intel_tdx")]
|
||||||
use crate::arch::tdx_guest;
|
use crate::arch::tdx_guest;
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::kernel::IO_APIC, bus::mmio::device::MmioCommonDevice, sync::SpinLock, trap::IrqLine,
|
arch::kernel::IO_APIC, bus::mmio::device::MmioCommonDevice, mm::paddr_to_vaddr, sync::SpinLock,
|
||||||
vm::paddr_to_vaddr,
|
trap::IrqLine,
|
||||||
};
|
};
|
||||||
|
|
||||||
const VIRTIO_MMIO_MAGIC: u32 = 0x74726976;
|
const VIRTIO_MMIO_MAGIC: u32 = 0x74726976;
|
||||||
|
@ -13,8 +13,8 @@ use crate::{
|
|||||||
common_device::PciCommonDevice,
|
common_device::PciCommonDevice,
|
||||||
device_info::PciDeviceLocation,
|
device_info::PciDeviceLocation,
|
||||||
},
|
},
|
||||||
|
mm::VmIo,
|
||||||
trap::IrqLine,
|
trap::IrqLine,
|
||||||
vm::VmIo,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// MSI-X capability. It will set the BAR space it uses to be hidden.
|
/// MSI-X capability. It will set the BAR space it uses to be hidden.
|
||||||
|
@ -5,4 +5,4 @@ extern crate xarray as xarray_crate;
|
|||||||
|
|
||||||
pub use xarray_crate::{Cursor, CursorMut, XArray, XMark};
|
pub use xarray_crate::{Cursor, CursorMut, XArray, XMark};
|
||||||
|
|
||||||
pub use crate::vm::page::VmFrameRef;
|
pub use crate::mm::page::VmFrameRef;
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
use crate::vm::page_table::PageTableError;
|
use crate::mm::page_table::PageTableError;
|
||||||
|
|
||||||
/// The error type which is returned from the APIs of this crate.
|
/// The error type which is returned from the APIs of this crate.
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||||
|
@ -5,7 +5,7 @@ use core::{mem::size_of, ops::Range};
|
|||||||
use pod::Pod;
|
use pod::Pod;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
vm::{kspace::LINEAR_MAPPING_BASE_VADDR, paddr_to_vaddr, HasPaddr, Paddr, Vaddr, VmIo},
|
mm::{kspace::LINEAR_MAPPING_BASE_VADDR, paddr_to_vaddr, HasPaddr, Paddr, Vaddr, VmIo},
|
||||||
Error, Result,
|
Error, Result,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -38,13 +38,13 @@ pub mod cpu;
|
|||||||
mod error;
|
mod error;
|
||||||
pub mod io_mem;
|
pub mod io_mem;
|
||||||
pub mod logger;
|
pub mod logger;
|
||||||
|
pub mod mm;
|
||||||
pub mod panicking;
|
pub mod panicking;
|
||||||
pub mod prelude;
|
pub mod prelude;
|
||||||
pub mod sync;
|
pub mod sync;
|
||||||
pub mod task;
|
pub mod task;
|
||||||
pub mod trap;
|
pub mod trap;
|
||||||
pub mod user;
|
pub mod user;
|
||||||
pub mod vm;
|
|
||||||
|
|
||||||
#[cfg(feature = "intel_tdx")]
|
#[cfg(feature = "intel_tdx")]
|
||||||
use tdx_guest::init_tdx;
|
use tdx_guest::init_tdx;
|
||||||
@ -64,20 +64,20 @@ pub fn init() {
|
|||||||
td_info.attributes
|
td_info.attributes
|
||||||
);
|
);
|
||||||
|
|
||||||
vm::heap_allocator::init();
|
mm::heap_allocator::init();
|
||||||
|
|
||||||
boot::init();
|
boot::init();
|
||||||
|
|
||||||
vm::page::allocator::init();
|
mm::page::allocator::init();
|
||||||
let mut boot_pt = vm::get_boot_pt();
|
let mut boot_pt = mm::get_boot_pt();
|
||||||
let meta_pages = vm::init_page_meta(&mut boot_pt);
|
let meta_pages = mm::init_page_meta(&mut boot_pt);
|
||||||
vm::misc_init();
|
mm::misc_init();
|
||||||
|
|
||||||
trap::init();
|
trap::init();
|
||||||
arch::after_all_init();
|
arch::after_all_init();
|
||||||
bus::init();
|
bus::init();
|
||||||
|
|
||||||
vm::kspace::init_kernel_page_table(boot_pt, meta_pages);
|
mm::kspace::init_kernel_page_table(boot_pt, meta_pages);
|
||||||
|
|
||||||
invoke_ffi_init_funcs();
|
invoke_ffi_init_funcs();
|
||||||
}
|
}
|
||||||
|
@ -11,11 +11,11 @@ use super::{check_and_insert_dma_mapping, remove_dma_mapping, DmaError, HasDaddr
|
|||||||
use crate::arch::tdx_guest;
|
use crate::arch::tdx_guest;
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::iommu,
|
arch::iommu,
|
||||||
vm::{
|
mm::{
|
||||||
dma::{dma_type, Daddr, DmaType},
|
dma::{dma_type, Daddr, DmaType},
|
||||||
kspace::{paddr_to_vaddr, KERNEL_PAGE_TABLE},
|
kspace::{paddr_to_vaddr, KERNEL_PAGE_TABLE},
|
||||||
page_prop::CachePolicy,
|
page_prop::CachePolicy,
|
||||||
HasPaddr, Paddr, VmIo, VmReader, VmSegment, VmWriter, PAGE_SIZE,
|
HasPaddr, Paddr, Segment, VmIo, VmReader, VmWriter, PAGE_SIZE,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -32,7 +32,7 @@ pub struct DmaCoherent {
|
|||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct DmaCoherentInner {
|
struct DmaCoherentInner {
|
||||||
vm_segment: VmSegment,
|
vm_segment: Segment,
|
||||||
start_daddr: Daddr,
|
start_daddr: Daddr,
|
||||||
is_cache_coherent: bool,
|
is_cache_coherent: bool,
|
||||||
}
|
}
|
||||||
@ -47,7 +47,7 @@ impl DmaCoherent {
|
|||||||
///
|
///
|
||||||
/// The method fails if any part of the given VM segment
|
/// The method fails if any part of the given VM segment
|
||||||
/// already belongs to a DMA mapping.
|
/// already belongs to a DMA mapping.
|
||||||
pub fn map(vm_segment: VmSegment, is_cache_coherent: bool) -> Result<Self, DmaError> {
|
pub fn map(vm_segment: Segment, is_cache_coherent: bool) -> Result<Self, DmaError> {
|
||||||
let frame_count = vm_segment.nframes();
|
let frame_count = vm_segment.nframes();
|
||||||
let start_paddr = vm_segment.start_paddr();
|
let start_paddr = vm_segment.start_paddr();
|
||||||
if !check_and_insert_dma_mapping(start_paddr, frame_count) {
|
if !check_and_insert_dma_mapping(start_paddr, frame_count) {
|
||||||
@ -109,7 +109,7 @@ impl HasDaddr for DmaCoherent {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Deref for DmaCoherent {
|
impl Deref for DmaCoherent {
|
||||||
type Target = VmSegment;
|
type Target = Segment;
|
||||||
fn deref(&self) -> &Self::Target {
|
fn deref(&self) -> &Self::Target {
|
||||||
&self.inner.vm_segment
|
&self.inner.vm_segment
|
||||||
}
|
}
|
||||||
@ -190,7 +190,7 @@ mod test {
|
|||||||
use alloc::vec;
|
use alloc::vec;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::vm::VmAllocOptions;
|
use crate::mm::VmAllocOptions;
|
||||||
|
|
||||||
#[ktest]
|
#[ktest]
|
||||||
fn map_with_coherent_device() {
|
fn map_with_coherent_device() {
|
@ -12,9 +12,9 @@ use crate::arch::tdx_guest;
|
|||||||
use crate::{
|
use crate::{
|
||||||
arch::iommu,
|
arch::iommu,
|
||||||
error::Error,
|
error::Error,
|
||||||
vm::{
|
mm::{
|
||||||
dma::{dma_type, Daddr, DmaType},
|
dma::{dma_type, Daddr, DmaType},
|
||||||
HasPaddr, Paddr, VmIo, VmReader, VmSegment, VmWriter, PAGE_SIZE,
|
HasPaddr, Paddr, Segment, VmIo, VmReader, VmWriter, PAGE_SIZE,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -30,7 +30,7 @@ pub struct DmaStream {
|
|||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct DmaStreamInner {
|
struct DmaStreamInner {
|
||||||
vm_segment: VmSegment,
|
vm_segment: Segment,
|
||||||
start_daddr: Daddr,
|
start_daddr: Daddr,
|
||||||
is_cache_coherent: bool,
|
is_cache_coherent: bool,
|
||||||
direction: DmaDirection,
|
direction: DmaDirection,
|
||||||
@ -46,11 +46,11 @@ pub enum DmaDirection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DmaStream {
|
impl DmaStream {
|
||||||
/// Establish DMA stream mapping for a given `VmSegment`.
|
/// Establish DMA stream mapping for a given `Segment`.
|
||||||
///
|
///
|
||||||
/// The method fails if the segment already belongs to a DMA mapping.
|
/// The method fails if the segment already belongs to a DMA mapping.
|
||||||
pub fn map(
|
pub fn map(
|
||||||
vm_segment: VmSegment,
|
vm_segment: Segment,
|
||||||
direction: DmaDirection,
|
direction: DmaDirection,
|
||||||
is_cache_coherent: bool,
|
is_cache_coherent: bool,
|
||||||
) -> Result<Self, DmaError> {
|
) -> Result<Self, DmaError> {
|
||||||
@ -104,7 +104,7 @@ impl DmaStream {
|
|||||||
/// after the DMA mapping is established because
|
/// after the DMA mapping is established because
|
||||||
/// there is a chance that the device is updating
|
/// there is a chance that the device is updating
|
||||||
/// the memory. Do this at your own risk.
|
/// the memory. Do this at your own risk.
|
||||||
pub fn vm_segment(&self) -> &VmSegment {
|
pub fn vm_segment(&self) -> &Segment {
|
||||||
&self.inner.vm_segment
|
&self.inner.vm_segment
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -294,7 +294,7 @@ mod test {
|
|||||||
use alloc::vec;
|
use alloc::vec;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::vm::VmAllocOptions;
|
use crate::mm::VmAllocOptions;
|
||||||
|
|
||||||
#[ktest]
|
#[ktest]
|
||||||
fn streaming_map() {
|
fn streaming_map() {
|
@ -11,7 +11,7 @@ use inherit_methods_macro::inherit_methods;
|
|||||||
use spin::Once;
|
use spin::Once;
|
||||||
|
|
||||||
use super::Paddr;
|
use super::Paddr;
|
||||||
use crate::{arch::iommu::has_iommu, sync::SpinLock, vm::PAGE_SIZE};
|
use crate::{arch::iommu::has_iommu, mm::PAGE_SIZE, sync::SpinLock};
|
||||||
|
|
||||||
/// If a device performs DMA to read or write system
|
/// If a device performs DMA to read or write system
|
||||||
/// memory, the addresses used by the device are device addresses.
|
/// memory, the addresses used by the device are device addresses.
|
@ -11,10 +11,10 @@ use log::debug;
|
|||||||
|
|
||||||
use super::paddr_to_vaddr;
|
use super::paddr_to_vaddr;
|
||||||
use crate::{
|
use crate::{
|
||||||
|
mm::{page::allocator::FRAME_ALLOCATOR, PAGE_SIZE},
|
||||||
prelude::*,
|
prelude::*,
|
||||||
sync::SpinLock,
|
sync::SpinLock,
|
||||||
trap::disable_local,
|
trap::disable_local,
|
||||||
vm::{page::allocator::FRAME_ALLOCATOR, PAGE_SIZE},
|
|
||||||
Error,
|
Error,
|
||||||
};
|
};
|
||||||
|
|
@ -9,7 +9,7 @@ use pod::Pod;
|
|||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
|
|
||||||
/// A trait that enables reading/writing data from/to a VM object,
|
/// A trait that enables reading/writing data from/to a VM object,
|
||||||
/// e.g., `VmSpace`, `VmFrameVec`, and `VmFrame`.
|
/// e.g., `VmSpace`, `VmFrameVec`, and `Frame`.
|
||||||
///
|
///
|
||||||
/// # Concurrency
|
/// # Concurrency
|
||||||
///
|
///
|
@ -76,7 +76,7 @@ const KERNEL_CODE_BASE_VADDR: usize = 0xffff_ffff_8000_0000 << ADDR_WIDTH_SHIFT;
|
|||||||
|
|
||||||
const FRAME_METADATA_CAP_VADDR: Vaddr = 0xffff_e100_0000_0000 << ADDR_WIDTH_SHIFT;
|
const FRAME_METADATA_CAP_VADDR: Vaddr = 0xffff_e100_0000_0000 << ADDR_WIDTH_SHIFT;
|
||||||
const FRAME_METADATA_BASE_VADDR: Vaddr = 0xffff_e000_0000_0000 << ADDR_WIDTH_SHIFT;
|
const FRAME_METADATA_BASE_VADDR: Vaddr = 0xffff_e000_0000_0000 << ADDR_WIDTH_SHIFT;
|
||||||
pub(in crate::vm) const FRAME_METADATA_RANGE: Range<Vaddr> =
|
pub(in crate::mm) const FRAME_METADATA_RANGE: Range<Vaddr> =
|
||||||
FRAME_METADATA_BASE_VADDR..FRAME_METADATA_CAP_VADDR;
|
FRAME_METADATA_BASE_VADDR..FRAME_METADATA_CAP_VADDR;
|
||||||
|
|
||||||
const VMALLOC_BASE_VADDR: Vaddr = 0xffff_c000_0000_0000 << ADDR_WIDTH_SHIFT;
|
const VMALLOC_BASE_VADDR: Vaddr = 0xffff_c000_0000_0000 << ADDR_WIDTH_SHIFT;
|
@ -28,7 +28,7 @@ pub use self::{
|
|||||||
dma::{Daddr, DmaCoherent, DmaDirection, DmaStream, DmaStreamSlice, HasDaddr},
|
dma::{Daddr, DmaCoherent, DmaDirection, DmaStream, DmaStreamSlice, HasDaddr},
|
||||||
io::{VmIo, VmReader, VmWriter},
|
io::{VmIo, VmReader, VmWriter},
|
||||||
options::VmAllocOptions,
|
options::VmAllocOptions,
|
||||||
page::{FrameVecIter, VmFrame, VmFrameVec, VmSegment},
|
page::{Frame, FrameVecIter, Segment, VmFrameVec},
|
||||||
page_prop::{CachePolicy, PageFlags, PageProperty},
|
page_prop::{CachePolicy, PageFlags, PageProperty},
|
||||||
space::{VmMapOptions, VmSpace},
|
space::{VmMapOptions, VmSpace},
|
||||||
};
|
};
|
@ -1,6 +1,6 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
use super::{page::allocator, VmFrame, VmFrameVec, VmSegment};
|
use super::{page::allocator, Frame, Segment, VmFrameVec};
|
||||||
use crate::{prelude::*, Error};
|
use crate::{prelude::*, Error};
|
||||||
|
|
||||||
/// Options for allocating physical memory pages (or frames).
|
/// Options for allocating physical memory pages (or frames).
|
||||||
@ -66,7 +66,7 @@ impl VmAllocOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Allocate a single page frame according to the given options.
|
/// Allocate a single page frame according to the given options.
|
||||||
pub fn alloc_single(&self) -> Result<VmFrame> {
|
pub fn alloc_single(&self) -> Result<Frame> {
|
||||||
if self.nframes != 1 {
|
if self.nframes != 1 {
|
||||||
return Err(Error::InvalidArgs);
|
return Err(Error::InvalidArgs);
|
||||||
}
|
}
|
||||||
@ -81,8 +81,8 @@ impl VmAllocOptions {
|
|||||||
|
|
||||||
/// Allocate a contiguous range of page frames according to the given options.
|
/// Allocate a contiguous range of page frames according to the given options.
|
||||||
///
|
///
|
||||||
/// The returned `VmSegment` contains at least one page frame.
|
/// The returned `Segment` contains at least one page frame.
|
||||||
pub fn alloc_contiguous(&self) -> Result<VmSegment> {
|
pub fn alloc_contiguous(&self) -> Result<Segment> {
|
||||||
// It's no use to checking `self.is_contiguous` here.
|
// It's no use to checking `self.is_contiguous` here.
|
||||||
if self.nframes == 0 {
|
if self.nframes == 0 {
|
||||||
return Err(Error::InvalidArgs);
|
return Err(Error::InvalidArgs);
|
@ -7,10 +7,10 @@ use buddy_system_allocator::FrameAllocator;
|
|||||||
use log::info;
|
use log::info;
|
||||||
use spin::Once;
|
use spin::Once;
|
||||||
|
|
||||||
use super::{meta::FrameMeta, Page, VmFrame, VmFrameVec, VmSegment};
|
use super::{meta::FrameMeta, Frame, Page, Segment, VmFrameVec};
|
||||||
use crate::{boot::memory_region::MemoryRegionType, sync::SpinLock, vm::PAGE_SIZE};
|
use crate::{boot::memory_region::MemoryRegionType, mm::PAGE_SIZE, sync::SpinLock};
|
||||||
|
|
||||||
pub(in crate::vm) static FRAME_ALLOCATOR: Once<SpinLock<FrameAllocator>> = Once::new();
|
pub(in crate::mm) static FRAME_ALLOCATOR: Once<SpinLock<FrameAllocator>> = Once::new();
|
||||||
|
|
||||||
pub(crate) fn alloc(nframes: usize) -> Option<VmFrameVec> {
|
pub(crate) fn alloc(nframes: usize) -> Option<VmFrameVec> {
|
||||||
FRAME_ALLOCATOR
|
FRAME_ALLOCATOR
|
||||||
@ -23,7 +23,7 @@ pub(crate) fn alloc(nframes: usize) -> Option<VmFrameVec> {
|
|||||||
for i in 0..nframes {
|
for i in 0..nframes {
|
||||||
let paddr = (start + i) * PAGE_SIZE;
|
let paddr = (start + i) * PAGE_SIZE;
|
||||||
// SAFETY: The frame index is valid.
|
// SAFETY: The frame index is valid.
|
||||||
let frame = VmFrame {
|
let frame = Frame {
|
||||||
page: Page::<FrameMeta>::from_unused(paddr).unwrap(),
|
page: Page::<FrameMeta>::from_unused(paddr).unwrap(),
|
||||||
};
|
};
|
||||||
vector.push(frame);
|
vector.push(frame);
|
||||||
@ -32,16 +32,16 @@ pub(crate) fn alloc(nframes: usize) -> Option<VmFrameVec> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn alloc_single() -> Option<VmFrame> {
|
pub(crate) fn alloc_single() -> Option<Frame> {
|
||||||
FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).map(|idx| {
|
FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).map(|idx| {
|
||||||
let paddr = idx * PAGE_SIZE;
|
let paddr = idx * PAGE_SIZE;
|
||||||
VmFrame {
|
Frame {
|
||||||
page: Page::<FrameMeta>::from_unused(paddr).unwrap(),
|
page: Page::<FrameMeta>::from_unused(paddr).unwrap(),
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn alloc_contiguous(nframes: usize) -> Option<VmSegment> {
|
pub(crate) fn alloc_contiguous(nframes: usize) -> Option<Segment> {
|
||||||
FRAME_ALLOCATOR
|
FRAME_ALLOCATOR
|
||||||
.get()
|
.get()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
@ -50,7 +50,7 @@ pub(crate) fn alloc_contiguous(nframes: usize) -> Option<VmSegment> {
|
|||||||
.map(|start|
|
.map(|start|
|
||||||
// SAFETY: The range of page frames is contiguous and valid.
|
// SAFETY: The range of page frames is contiguous and valid.
|
||||||
unsafe {
|
unsafe {
|
||||||
VmSegment::new(
|
Segment::new(
|
||||||
start * PAGE_SIZE,
|
start * PAGE_SIZE,
|
||||||
nframes,
|
nframes,
|
||||||
)
|
)
|
@ -8,7 +8,7 @@ use super::{
|
|||||||
Page,
|
Page,
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
vm::{
|
mm::{
|
||||||
io::{VmIo, VmReader, VmWriter},
|
io::{VmIo, VmReader, VmWriter},
|
||||||
paddr_to_vaddr, HasPaddr, Paddr, PagingLevel, PAGE_SIZE,
|
paddr_to_vaddr, HasPaddr, Paddr, PagingLevel, PAGE_SIZE,
|
||||||
},
|
},
|
||||||
@ -17,30 +17,25 @@ use crate::{
|
|||||||
|
|
||||||
/// A handle to a page frame.
|
/// A handle to a page frame.
|
||||||
///
|
///
|
||||||
/// The referenced page frame could either be huge or regular, which can be
|
/// An instance of `Frame` is a handle to a page frame (a physical memory
|
||||||
/// told by the [`VmFrame::size`] method. It is ensured that there would be
|
/// page). A cloned `Frame` refers to the same page frame as the original.
|
||||||
/// only one TLB entry for such a frame if it is mapped to a virtual address
|
|
||||||
/// and the architecture supports huge TLB entries.
|
|
||||||
///
|
|
||||||
/// An instance of `VmFrame` is a handle to a page frame (a physical memory
|
|
||||||
/// page). A cloned `VmFrame` refers to the same page frame as the original.
|
|
||||||
/// As the original and cloned instances point to the same physical address,
|
/// As the original and cloned instances point to the same physical address,
|
||||||
/// they are treated as equal to each other. Behind the scene, a reference
|
/// they are treated as equal to each other. Behind the scene, a reference
|
||||||
/// counter is maintained for each page frame so that when all instances of
|
/// counter is maintained for each page frame so that when all instances of
|
||||||
/// `VmFrame` that refer to the same page frame are dropped, the page frame
|
/// `Frame` that refer to the same page frame are dropped, the page frame
|
||||||
/// will be globally freed.
|
/// will be globally freed.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct VmFrame {
|
pub struct Frame {
|
||||||
pub(in crate::vm) page: Page<FrameMeta>,
|
pub(in crate::mm) page: Page<FrameMeta>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HasPaddr for VmFrame {
|
impl HasPaddr for Frame {
|
||||||
fn paddr(&self) -> Paddr {
|
fn paddr(&self) -> Paddr {
|
||||||
self.start_paddr()
|
self.start_paddr()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VmFrame {
|
impl Frame {
|
||||||
/// Returns the physical address of the page frame.
|
/// Returns the physical address of the page frame.
|
||||||
pub fn start_paddr(&self) -> Paddr {
|
pub fn start_paddr(&self) -> Paddr {
|
||||||
self.page.paddr()
|
self.page.paddr()
|
||||||
@ -73,7 +68,7 @@ impl VmFrame {
|
|||||||
paddr_to_vaddr(self.start_paddr()) as *mut u8
|
paddr_to_vaddr(self.start_paddr()) as *mut u8
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn copy_from(&self, src: &VmFrame) {
|
pub fn copy_from(&self, src: &Frame) {
|
||||||
if self.paddr() == src.paddr() {
|
if self.paddr() == src.paddr() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -84,7 +79,7 @@ impl VmFrame {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> VmFrame {
|
impl<'a> Frame {
|
||||||
/// Returns a reader to read data from it.
|
/// Returns a reader to read data from it.
|
||||||
pub fn reader(&'a self) -> VmReader<'a> {
|
pub fn reader(&'a self) -> VmReader<'a> {
|
||||||
// SAFETY: the memory of the page is contiguous and is valid during `'a`.
|
// SAFETY: the memory of the page is contiguous and is valid during `'a`.
|
||||||
@ -98,7 +93,7 @@ impl<'a> VmFrame {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VmIo for VmFrame {
|
impl VmIo for Frame {
|
||||||
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
|
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
|
||||||
// Do bound check with potential integer overflow in mind
|
// Do bound check with potential integer overflow in mind
|
||||||
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
|
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
|
||||||
@ -134,23 +129,23 @@ impl PageMeta for FrameMeta {
|
|||||||
|
|
||||||
use core::{marker::PhantomData, ops::Deref};
|
use core::{marker::PhantomData, ops::Deref};
|
||||||
|
|
||||||
/// `VmFrameRef` is a struct that can work as `&'a VmFrame`.
|
/// `VmFrameRef` is a struct that can work as `&'a Frame`.
|
||||||
pub struct VmFrameRef<'a> {
|
pub struct VmFrameRef<'a> {
|
||||||
inner: ManuallyDrop<VmFrame>,
|
inner: ManuallyDrop<Frame>,
|
||||||
_marker: PhantomData<&'a VmFrame>,
|
_marker: PhantomData<&'a Frame>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Deref for VmFrameRef<'a> {
|
impl<'a> Deref for VmFrameRef<'a> {
|
||||||
type Target = VmFrame;
|
type Target = Frame;
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
fn deref(&self) -> &Self::Target {
|
||||||
&self.inner
|
&self.inner
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SAFETY: `VmFrame` is essentially an `*const FrameMeta` that could be used as a `*const` pointer.
|
// SAFETY: `Frame` is essentially an `*const FrameMeta` that could be used as a `*const` pointer.
|
||||||
// The pointer is also aligned to 4.
|
// The pointer is also aligned to 4.
|
||||||
unsafe impl xarray::ItemEntry for VmFrame {
|
unsafe impl xarray::ItemEntry for Frame {
|
||||||
type Ref<'a> = VmFrameRef<'a> where Self: 'a;
|
type Ref<'a> = VmFrameRef<'a> where Self: 'a;
|
||||||
|
|
||||||
fn into_raw(self) -> *const () {
|
fn into_raw(self) -> *const () {
|
||||||
@ -165,7 +160,7 @@ unsafe impl xarray::ItemEntry for VmFrame {
|
|||||||
|
|
||||||
unsafe fn raw_as_ref<'a>(raw: *const ()) -> Self::Ref<'a> {
|
unsafe fn raw_as_ref<'a>(raw: *const ()) -> Self::Ref<'a> {
|
||||||
Self::Ref {
|
Self::Ref {
|
||||||
inner: ManuallyDrop::new(VmFrame::from_raw(raw)),
|
inner: ManuallyDrop::new(Frame::from_raw(raw)),
|
||||||
_marker: PhantomData,
|
_marker: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -9,7 +9,7 @@ pub mod mapping {
|
|||||||
use core::mem::size_of;
|
use core::mem::size_of;
|
||||||
|
|
||||||
use super::MetaSlot;
|
use super::MetaSlot;
|
||||||
use crate::vm::{kspace::FRAME_METADATA_RANGE, Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE};
|
use crate::mm::{kspace::FRAME_METADATA_RANGE, Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE};
|
||||||
|
|
||||||
/// Convert a physical address of a base page to the virtual address of the metadata slot.
|
/// Convert a physical address of a base page to the virtual address of the metadata slot.
|
||||||
pub const fn page_to_meta<C: PagingConstsTrait>(paddr: Paddr) -> Vaddr {
|
pub const fn page_to_meta<C: PagingConstsTrait>(paddr: Paddr) -> Vaddr {
|
||||||
@ -40,7 +40,7 @@ use static_assertions::const_assert_eq;
|
|||||||
use super::Page;
|
use super::Page;
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::mm::{PageTableEntry, PagingConsts},
|
arch::mm::{PageTableEntry, PagingConsts},
|
||||||
vm::{
|
mm::{
|
||||||
paddr_to_vaddr,
|
paddr_to_vaddr,
|
||||||
page::allocator::FRAME_ALLOCATOR,
|
page::allocator::FRAME_ALLOCATOR,
|
||||||
page_size,
|
page_size,
|
@ -11,10 +11,10 @@
|
|||||||
//! address space of the users are backed by frames.
|
//! address space of the users are backed by frames.
|
||||||
|
|
||||||
pub(crate) mod allocator;
|
pub(crate) mod allocator;
|
||||||
pub(in crate::vm) mod meta;
|
pub(in crate::mm) mod meta;
|
||||||
use meta::{mapping, MetaSlot, PageMeta};
|
use meta::{mapping, MetaSlot, PageMeta};
|
||||||
mod frame;
|
mod frame;
|
||||||
pub use frame::{VmFrame, VmFrameRef};
|
pub use frame::{Frame, VmFrameRef};
|
||||||
mod vm_frame_vec;
|
mod vm_frame_vec;
|
||||||
pub use vm_frame_vec::{FrameVecIter, VmFrameVec};
|
pub use vm_frame_vec::{FrameVecIter, VmFrameVec};
|
||||||
mod segment;
|
mod segment;
|
||||||
@ -23,10 +23,10 @@ use core::{
|
|||||||
sync::atomic::{AtomicU32, AtomicUsize, Ordering},
|
sync::atomic::{AtomicU32, AtomicUsize, Ordering},
|
||||||
};
|
};
|
||||||
|
|
||||||
pub use segment::VmSegment;
|
pub use segment::Segment;
|
||||||
|
|
||||||
use super::PAGE_SIZE;
|
use super::PAGE_SIZE;
|
||||||
use crate::vm::{paddr_to_vaddr, Paddr, PagingConsts, Vaddr};
|
use crate::mm::{paddr_to_vaddr, Paddr, PagingConsts, Vaddr};
|
||||||
|
|
||||||
static MAX_PADDR: AtomicUsize = AtomicUsize::new(0);
|
static MAX_PADDR: AtomicUsize = AtomicUsize::new(0);
|
||||||
|
|
||||||
@ -54,7 +54,7 @@ pub enum PageHandleError {
|
|||||||
|
|
||||||
impl<M: PageMeta> Page<M> {
|
impl<M: PageMeta> Page<M> {
|
||||||
/// Convert an unused page to a `Page` handle for a specific usage.
|
/// Convert an unused page to a `Page` handle for a specific usage.
|
||||||
pub(in crate::vm) fn from_unused(paddr: Paddr) -> Result<Self, PageHandleError> {
|
pub(in crate::mm) fn from_unused(paddr: Paddr) -> Result<Self, PageHandleError> {
|
||||||
if paddr % PAGE_SIZE != 0 {
|
if paddr % PAGE_SIZE != 0 {
|
||||||
return Err(PageHandleError::NotAligned);
|
return Err(PageHandleError::NotAligned);
|
||||||
}
|
}
|
||||||
@ -100,7 +100,7 @@ impl<M: PageMeta> Page<M> {
|
|||||||
///
|
///
|
||||||
/// Also, the caller ensures that the usage of the page is correct. There's
|
/// Also, the caller ensures that the usage of the page is correct. There's
|
||||||
/// no checking of the usage in this function.
|
/// no checking of the usage in this function.
|
||||||
pub(in crate::vm) unsafe fn restore(paddr: Paddr) -> Self {
|
pub(in crate::mm) unsafe fn restore(paddr: Paddr) -> Self {
|
||||||
let vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
|
let vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
|
||||||
let ptr = vaddr as *const MetaSlot;
|
let ptr = vaddr as *const MetaSlot;
|
||||||
|
|
||||||
@ -118,7 +118,7 @@ impl<M: PageMeta> Page<M> {
|
|||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
/// The safety requirements are the same as [`Page::restore`].
|
/// The safety requirements are the same as [`Page::restore`].
|
||||||
pub(in crate::vm) unsafe fn clone_restore(paddr: &Paddr) -> Self {
|
pub(in crate::mm) unsafe fn clone_restore(paddr: &Paddr) -> Self {
|
||||||
let vaddr = mapping::page_to_meta::<PagingConsts>(*paddr);
|
let vaddr = mapping::page_to_meta::<PagingConsts>(*paddr);
|
||||||
let ptr = vaddr as *const MetaSlot;
|
let ptr = vaddr as *const MetaSlot;
|
||||||
|
|
||||||
@ -158,7 +158,7 @@ impl<M: PageMeta> Page<M> {
|
|||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
/// The caller should be sure that the page is exclusively owned.
|
/// The caller should be sure that the page is exclusively owned.
|
||||||
pub(in crate::vm) unsafe fn meta_mut(&mut self) -> &mut M {
|
pub(in crate::mm) unsafe fn meta_mut(&mut self) -> &mut M {
|
||||||
unsafe { &mut *(self.ptr as *mut M) }
|
unsafe { &mut *(self.ptr as *mut M) }
|
||||||
}
|
}
|
||||||
|
|
@ -5,19 +5,19 @@ use core::ops::Range;
|
|||||||
use super::{
|
use super::{
|
||||||
allocator,
|
allocator,
|
||||||
meta::{PageMeta, PageUsage, SegmentHeadMeta},
|
meta::{PageMeta, PageUsage, SegmentHeadMeta},
|
||||||
Page, VmFrame,
|
Frame, Page,
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
vm::{HasPaddr, Paddr, VmIo, VmReader, VmWriter, PAGE_SIZE},
|
mm::{HasPaddr, Paddr, VmIo, VmReader, VmWriter, PAGE_SIZE},
|
||||||
Error, Result,
|
Error, Result,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// A handle to a contiguous range of page frames (physical memory pages).
|
/// A handle to a contiguous range of page frames (physical memory pages).
|
||||||
///
|
///
|
||||||
/// The biggest difference between `VmSegment` and `VmFrameVec` is that
|
/// The biggest difference between `Segment` and `VmFrameVec` is that
|
||||||
/// the page frames must be contiguous for `VmSegment`.
|
/// the page frames must be contiguous for `Segment`.
|
||||||
///
|
///
|
||||||
/// A cloned `VmSegment` refers to the same page frames as the original.
|
/// A cloned `Segment` refers to the same page frames as the original.
|
||||||
/// As the original and cloned instances point to the same physical address,
|
/// As the original and cloned instances point to the same physical address,
|
||||||
/// they are treated as equal to each other.
|
/// they are treated as equal to each other.
|
||||||
///
|
///
|
||||||
@ -30,25 +30,25 @@ use crate::{
|
|||||||
/// vm_segment.write_bytes(0, buf)?;
|
/// vm_segment.write_bytes(0, buf)?;
|
||||||
/// ```
|
/// ```
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct VmSegment {
|
pub struct Segment {
|
||||||
head_page: Page<SegmentHeadMeta>,
|
head_page: Page<SegmentHeadMeta>,
|
||||||
range: Range<usize>,
|
range: Range<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HasPaddr for VmSegment {
|
impl HasPaddr for Segment {
|
||||||
fn paddr(&self) -> Paddr {
|
fn paddr(&self) -> Paddr {
|
||||||
self.start_paddr()
|
self.start_paddr()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VmSegment {
|
impl Segment {
|
||||||
/// Creates a new `VmSegment`.
|
/// Creates a new `Segment`.
|
||||||
///
|
///
|
||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
/// The given range of page frames must be contiguous and valid for use.
|
/// The given range of page frames must be contiguous and valid for use.
|
||||||
/// The given range of page frames must not have been allocated before,
|
/// The given range of page frames must not have been allocated before,
|
||||||
/// as part of either a `VmFrame` or `VmSegment`.
|
/// as part of either a `Frame` or `Segment`.
|
||||||
pub(crate) unsafe fn new(paddr: Paddr, nframes: usize) -> Self {
|
pub(crate) unsafe fn new(paddr: Paddr, nframes: usize) -> Self {
|
||||||
let mut head = Page::<SegmentHeadMeta>::from_unused(paddr).unwrap();
|
let mut head = Page::<SegmentHeadMeta>::from_unused(paddr).unwrap();
|
||||||
head.meta_mut().seg_len = (nframes * PAGE_SIZE) as u64;
|
head.meta_mut().seg_len = (nframes * PAGE_SIZE) as u64;
|
||||||
@ -58,11 +58,11 @@ impl VmSegment {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a part of the `VmSegment`.
|
/// Returns a part of the `Segment`.
|
||||||
///
|
///
|
||||||
/// # Panic
|
/// # Panic
|
||||||
///
|
///
|
||||||
/// If `range` is not within the range of this `VmSegment`,
|
/// If `range` is not within the range of this `Segment`,
|
||||||
/// then the method panics.
|
/// then the method panics.
|
||||||
pub fn range(&self, range: Range<usize>) -> Self {
|
pub fn range(&self, range: Range<usize>) -> Self {
|
||||||
let orig_range = &self.range;
|
let orig_range = &self.range;
|
||||||
@ -108,7 +108,7 @@ impl VmSegment {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> VmSegment {
|
impl<'a> Segment {
|
||||||
/// Returns a reader to read data from it.
|
/// Returns a reader to read data from it.
|
||||||
pub fn reader(&'a self) -> VmReader<'a> {
|
pub fn reader(&'a self) -> VmReader<'a> {
|
||||||
// SAFETY: the memory of the page frames is contiguous and is valid during `'a`.
|
// SAFETY: the memory of the page frames is contiguous and is valid during `'a`.
|
||||||
@ -122,7 +122,7 @@ impl<'a> VmSegment {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VmIo for VmSegment {
|
impl VmIo for Segment {
|
||||||
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
|
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
|
||||||
// Do bound check with potential integer overflow in mind
|
// Do bound check with potential integer overflow in mind
|
||||||
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
|
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
|
||||||
@ -156,8 +156,8 @@ impl PageMeta for SegmentHeadMeta {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<VmFrame> for VmSegment {
|
impl From<Frame> for Segment {
|
||||||
fn from(frame: VmFrame) -> Self {
|
fn from(frame: Frame) -> Self {
|
||||||
Self {
|
Self {
|
||||||
head_page: frame.page.into(),
|
head_page: frame.page.into(),
|
||||||
range: 0..1,
|
range: 0..1,
|
@ -3,26 +3,26 @@
|
|||||||
use alloc::{vec, vec::Vec};
|
use alloc::{vec, vec::Vec};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
vm::{VmFrame, VmIo, VmReader, VmWriter, PAGE_SIZE},
|
mm::{Frame, VmIo, VmReader, VmWriter, PAGE_SIZE},
|
||||||
Error, Result,
|
Error, Result,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// A collection of base page frames (regular physical memory pages).
|
/// A collection of base page frames (regular physical memory pages).
|
||||||
///
|
///
|
||||||
/// For the most parts, `VmFrameVec` is like `Vec<VmFrame>`. But the
|
/// For the most parts, `VmFrameVec` is like `Vec<Frame>`. But the
|
||||||
/// implementation may or may not be based on `Vec`. Having a dedicated
|
/// implementation may or may not be based on `Vec`. Having a dedicated
|
||||||
/// type to represent a series of page frames is convenient because,
|
/// type to represent a series of page frames is convenient because,
|
||||||
/// more often than not, one needs to operate on a batch of frames rather
|
/// more often than not, one needs to operate on a batch of frames rather
|
||||||
/// a single frame.
|
/// a single frame.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct VmFrameVec(pub(crate) Vec<VmFrame>);
|
pub struct VmFrameVec(pub(crate) Vec<Frame>);
|
||||||
|
|
||||||
impl VmFrameVec {
|
impl VmFrameVec {
|
||||||
pub fn get(&self, index: usize) -> Option<&VmFrame> {
|
pub fn get(&self, index: usize) -> Option<&Frame> {
|
||||||
self.0.get(index)
|
self.0.get(index)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// returns an empty VmFrame vec
|
/// returns an empty Frame vec
|
||||||
pub fn empty() -> Self {
|
pub fn empty() -> Self {
|
||||||
Self(Vec::new())
|
Self(Vec::new())
|
||||||
}
|
}
|
||||||
@ -32,17 +32,17 @@ impl VmFrameVec {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Pushs a new frame to the collection.
|
/// Pushs a new frame to the collection.
|
||||||
pub fn push(&mut self, new_frame: VmFrame) {
|
pub fn push(&mut self, new_frame: Frame) {
|
||||||
self.0.push(new_frame);
|
self.0.push(new_frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Pop a frame from the collection.
|
/// Pop a frame from the collection.
|
||||||
pub fn pop(&mut self) -> Option<VmFrame> {
|
pub fn pop(&mut self) -> Option<Frame> {
|
||||||
self.0.pop()
|
self.0.pop()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Removes a frame at a position.
|
/// Removes a frame at a position.
|
||||||
pub fn remove(&mut self, at: usize) -> VmFrame {
|
pub fn remove(&mut self, at: usize) -> Frame {
|
||||||
self.0.remove(at)
|
self.0.remove(at)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ impl VmFrameVec {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator
|
/// Returns an iterator
|
||||||
pub fn iter(&self) -> core::slice::Iter<'_, VmFrame> {
|
pub fn iter(&self) -> core::slice::Iter<'_, Frame> {
|
||||||
self.0.iter()
|
self.0.iter()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,13 +84,13 @@ impl VmFrameVec {
|
|||||||
self.0.len() * PAGE_SIZE
|
self.0.len() * PAGE_SIZE
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_one_frame(frame: VmFrame) -> Self {
|
pub fn from_one_frame(frame: Frame) -> Self {
|
||||||
Self(vec![frame])
|
Self(vec![frame])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IntoIterator for VmFrameVec {
|
impl IntoIterator for VmFrameVec {
|
||||||
type Item = VmFrame;
|
type Item = Frame;
|
||||||
|
|
||||||
type IntoIter = alloc::vec::IntoIter<Self::Item>;
|
type IntoIter = alloc::vec::IntoIter<Self::Item>;
|
||||||
|
|
||||||
@ -154,7 +154,7 @@ impl<'a> FrameVecIter<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Iterator for FrameVecIter<'a> {
|
impl<'a> Iterator for FrameVecIter<'a> {
|
||||||
type Item = &'a VmFrame;
|
type Item = &'a Frame;
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
if self.current >= self.frames.0.len() {
|
if self.current >= self.frames.0.len() {
|
@ -9,7 +9,7 @@ use alloc::vec::Vec;
|
|||||||
use super::{pte_index, PageTableEntryTrait};
|
use super::{pte_index, PageTableEntryTrait};
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::mm::{PageTableEntry, PagingConsts},
|
arch::mm::{PageTableEntry, PagingConsts},
|
||||||
vm::{
|
mm::{
|
||||||
paddr_to_vaddr, page::allocator::FRAME_ALLOCATOR, PageProperty, PagingConstsTrait, Vaddr,
|
paddr_to_vaddr, page::allocator::FRAME_ALLOCATOR, PageProperty, PagingConstsTrait, Vaddr,
|
||||||
PAGE_SIZE,
|
PAGE_SIZE,
|
||||||
},
|
},
|
||||||
@ -26,7 +26,7 @@ pub struct BootPageTable<
|
|||||||
> {
|
> {
|
||||||
root_pt: FrameNumber,
|
root_pt: FrameNumber,
|
||||||
// The frames allocated for this page table are not tracked with
|
// The frames allocated for this page table are not tracked with
|
||||||
// metadata [`crate::vm::frame::meta`]. Here is a record of it
|
// metadata [`crate::mm::frame::meta`]. Here is a record of it
|
||||||
// for deallocation.
|
// for deallocation.
|
||||||
frames: Vec<FrameNumber>,
|
frames: Vec<FrameNumber>,
|
||||||
_pretend_to_use: core::marker::PhantomData<(E, C)>,
|
_pretend_to_use: core::marker::PhantomData<(E, C)>,
|
||||||
@ -101,7 +101,7 @@ fn test_boot_pt() {
|
|||||||
use super::page_walk;
|
use super::page_walk;
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::mm::{PageTableEntry, PagingConsts},
|
arch::mm::{PageTableEntry, PagingConsts},
|
||||||
vm::{CachePolicy, PageFlags, VmAllocOptions},
|
mm::{CachePolicy, PageFlags, VmAllocOptions},
|
||||||
};
|
};
|
||||||
|
|
||||||
let root_frame = VmAllocOptions::new(1).alloc_single().unwrap();
|
let root_frame = VmAllocOptions::new(1).alloc_single().unwrap();
|
@ -56,9 +56,9 @@ use align_ext::AlignExt;
|
|||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
page_size, pte_index, Child, KernelMode, PageTable, PageTableEntryTrait, PageTableError,
|
page_size, pte_index, Child, KernelMode, PageTable, PageTableEntryTrait, PageTableError,
|
||||||
PageTableFrame, PageTableMode, PagingConstsTrait, PagingLevel,
|
PageTableMode, PageTableNode, PagingConstsTrait, PagingLevel,
|
||||||
};
|
};
|
||||||
use crate::vm::{Paddr, PageProperty, Vaddr, VmFrame};
|
use crate::mm::{Frame, Paddr, PageProperty, Vaddr};
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub(crate) enum PageTableQueryResult {
|
pub(crate) enum PageTableQueryResult {
|
||||||
@ -68,7 +68,7 @@ pub(crate) enum PageTableQueryResult {
|
|||||||
},
|
},
|
||||||
Mapped {
|
Mapped {
|
||||||
va: Vaddr,
|
va: Vaddr,
|
||||||
frame: VmFrame,
|
frame: Frame,
|
||||||
prop: PageProperty,
|
prop: PageProperty,
|
||||||
},
|
},
|
||||||
MappedUntracked {
|
MappedUntracked {
|
||||||
@ -94,7 +94,7 @@ where
|
|||||||
[(); C::NR_LEVELS as usize]:,
|
[(); C::NR_LEVELS as usize]:,
|
||||||
{
|
{
|
||||||
pt: &'a PageTable<M, E, C>,
|
pt: &'a PageTable<M, E, C>,
|
||||||
guards: [Option<PageTableFrame<E, C>>; C::NR_LEVELS as usize],
|
guards: [Option<PageTableNode<E, C>>; C::NR_LEVELS as usize],
|
||||||
level: PagingLevel, // current level
|
level: PagingLevel, // current level
|
||||||
guard_level: PagingLevel, // from guard_level to level, the locks are held
|
guard_level: PagingLevel, // from guard_level to level, the locks are held
|
||||||
va: Vaddr, // current virtual address
|
va: Vaddr, // current virtual address
|
||||||
@ -246,7 +246,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cur_node(&self) -> &PageTableFrame<E, C> {
|
fn cur_node(&self) -> &PageTableNode<E, C> {
|
||||||
self.guards[(C::NR_LEVELS - self.level) as usize]
|
self.guards[(C::NR_LEVELS - self.level) as usize]
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
@ -267,7 +267,7 @@ where
|
|||||||
|
|
||||||
/// Tell if the current virtual range must contain untracked mappings.
|
/// Tell if the current virtual range must contain untracked mappings.
|
||||||
///
|
///
|
||||||
/// In the kernel mode, this is aligned with the definition in [`crate::vm::kspace`].
|
/// In the kernel mode, this is aligned with the definition in [`crate::mm::kspace`].
|
||||||
/// Only linear mappings in the kernel are considered as untracked mappings.
|
/// Only linear mappings in the kernel are considered as untracked mappings.
|
||||||
///
|
///
|
||||||
/// All mappings in the user mode are tracked. And all mappings in the IOMMU
|
/// All mappings in the user mode are tracked. And all mappings in the IOMMU
|
||||||
@ -275,7 +275,7 @@ where
|
|||||||
fn in_untracked_range(&self) -> bool {
|
fn in_untracked_range(&self) -> bool {
|
||||||
TypeId::of::<M>() == TypeId::of::<crate::arch::iommu::DeviceMode>()
|
TypeId::of::<M>() == TypeId::of::<crate::arch::iommu::DeviceMode>()
|
||||||
|| TypeId::of::<M>() == TypeId::of::<KernelMode>()
|
|| TypeId::of::<M>() == TypeId::of::<KernelMode>()
|
||||||
&& !crate::vm::kspace::VMALLOC_VADDR_RANGE.contains(&self.va)
|
&& !crate::mm::kspace::VMALLOC_VADDR_RANGE.contains(&self.va)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -385,7 +385,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Map the range starting from the current address to a `VmFrame`.
|
/// Map the range starting from the current address to a `Frame`.
|
||||||
///
|
///
|
||||||
/// # Panic
|
/// # Panic
|
||||||
///
|
///
|
||||||
@ -398,7 +398,7 @@ where
|
|||||||
///
|
///
|
||||||
/// The caller should ensure that the virtual range being mapped does
|
/// The caller should ensure that the virtual range being mapped does
|
||||||
/// not affect kernel's memory safety.
|
/// not affect kernel's memory safety.
|
||||||
pub(crate) unsafe fn map(&mut self, frame: VmFrame, prop: PageProperty) {
|
pub(crate) unsafe fn map(&mut self, frame: Frame, prop: PageProperty) {
|
||||||
let end = self.0.va + frame.size();
|
let end = self.0.va + frame.size();
|
||||||
assert!(end <= self.0.barrier_va.end);
|
assert!(end <= self.0.barrier_va.end);
|
||||||
debug_assert!(!self.0.in_untracked_range());
|
debug_assert!(!self.0.in_untracked_range());
|
||||||
@ -599,7 +599,7 @@ where
|
|||||||
/// Consume itself and leak the root guard for the caller if it locked the root level.
|
/// Consume itself and leak the root guard for the caller if it locked the root level.
|
||||||
///
|
///
|
||||||
/// It is useful when the caller wants to keep the root guard while the cursor should be dropped.
|
/// It is useful when the caller wants to keep the root guard while the cursor should be dropped.
|
||||||
pub(super) fn leak_root_guard(mut self) -> Option<PageTableFrame<E, C>> {
|
pub(super) fn leak_root_guard(mut self) -> Option<PageTableNode<E, C>> {
|
||||||
if self.0.guard_level != C::NR_LEVELS {
|
if self.0.guard_level != C::NR_LEVELS {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
@ -616,7 +616,7 @@ where
|
|||||||
/// This method will create a new child frame and go down to it.
|
/// This method will create a new child frame and go down to it.
|
||||||
fn level_down_create(&mut self) {
|
fn level_down_create(&mut self) {
|
||||||
debug_assert!(self.0.level > 1);
|
debug_assert!(self.0.level > 1);
|
||||||
let new_frame = PageTableFrame::<E, C>::alloc(self.0.level - 1);
|
let new_frame = PageTableNode::<E, C>::alloc(self.0.level - 1);
|
||||||
let idx = self.0.cur_idx();
|
let idx = self.0.cur_idx();
|
||||||
let untracked = self.0.in_untracked_range();
|
let untracked = self.0.in_untracked_range();
|
||||||
self.cur_node_mut()
|
self.cur_node_mut()
|
||||||
@ -640,7 +640,7 @@ where
|
|||||||
self.0.guards[(C::NR_LEVELS - self.0.level) as usize] = Some(new_frame.lock());
|
self.0.guards[(C::NR_LEVELS - self.0.level) as usize] = Some(new_frame.lock());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cur_node_mut(&mut self) -> &mut PageTableFrame<E, C> {
|
fn cur_node_mut(&mut self) -> &mut PageTableNode<E, C> {
|
||||||
self.0.guards[(C::NR_LEVELS - self.0.level) as usize]
|
self.0.guards[(C::NR_LEVELS - self.0.level) as usize]
|
||||||
.as_mut()
|
.as_mut()
|
||||||
.unwrap()
|
.unwrap()
|
@ -28,7 +28,7 @@ use core::{marker::PhantomData, mem::ManuallyDrop, ops::Range, panic, sync::atom
|
|||||||
use super::{nr_subpage_per_huge, page_size, PageTableEntryTrait};
|
use super::{nr_subpage_per_huge, page_size, PageTableEntryTrait};
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::mm::{PageTableEntry, PagingConsts},
|
arch::mm::{PageTableEntry, PagingConsts},
|
||||||
vm::{
|
mm::{
|
||||||
paddr_to_vaddr,
|
paddr_to_vaddr,
|
||||||
page::{
|
page::{
|
||||||
allocator::FRAME_ALLOCATOR,
|
allocator::FRAME_ALLOCATOR,
|
||||||
@ -36,7 +36,7 @@ use crate::{
|
|||||||
Page,
|
Page,
|
||||||
},
|
},
|
||||||
page_prop::PageProperty,
|
page_prop::PageProperty,
|
||||||
Paddr, PagingConstsTrait, PagingLevel, VmFrame, PAGE_SIZE,
|
Frame, Paddr, PagingConstsTrait, PagingLevel, PAGE_SIZE,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -47,9 +47,9 @@ use crate::{
|
|||||||
/// the page table frame and subsequent children will be freed.
|
/// the page table frame and subsequent children will be freed.
|
||||||
///
|
///
|
||||||
/// Only the CPU or a PTE can access a page table frame using a raw handle. To access the page
|
/// Only the CPU or a PTE can access a page table frame using a raw handle. To access the page
|
||||||
/// table frame from the kernel code, use the handle [`PageTableFrame`].
|
/// table frame from the kernel code, use the handle [`PageTableNode`].
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(super) struct RawPageTableFrame<E: PageTableEntryTrait, C: PagingConstsTrait>
|
pub(super) struct RawPageTableNode<E: PageTableEntryTrait, C: PagingConstsTrait>
|
||||||
where
|
where
|
||||||
[(); C::NR_LEVELS as usize]:,
|
[(); C::NR_LEVELS as usize]:,
|
||||||
{
|
{
|
||||||
@ -58,7 +58,7 @@ where
|
|||||||
_phantom: PhantomData<(E, C)>,
|
_phantom: PhantomData<(E, C)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: PageTableEntryTrait, C: PagingConstsTrait> RawPageTableFrame<E, C>
|
impl<E: PageTableEntryTrait, C: PagingConstsTrait> RawPageTableNode<E, C>
|
||||||
where
|
where
|
||||||
[(); C::NR_LEVELS as usize]:,
|
[(); C::NR_LEVELS as usize]:,
|
||||||
{
|
{
|
||||||
@ -67,7 +67,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Convert a raw handle to an accessible handle by pertaining the lock.
|
/// Convert a raw handle to an accessible handle by pertaining the lock.
|
||||||
pub(super) fn lock(self) -> PageTableFrame<E, C> {
|
pub(super) fn lock(self) -> PageTableNode<E, C> {
|
||||||
let page = unsafe { Page::<PageTablePageMeta<E, C>>::restore(self.paddr()) };
|
let page = unsafe { Page::<PageTablePageMeta<E, C>>::restore(self.paddr()) };
|
||||||
debug_assert!(page.meta().level == self.level);
|
debug_assert!(page.meta().level == self.level);
|
||||||
// Acquire the lock.
|
// Acquire the lock.
|
||||||
@ -81,7 +81,7 @@ where
|
|||||||
}
|
}
|
||||||
// Prevent dropping the handle.
|
// Prevent dropping the handle.
|
||||||
let _ = ManuallyDrop::new(self);
|
let _ = ManuallyDrop::new(self);
|
||||||
PageTableFrame::<E, C> { page }
|
PageTableNode::<E, C> { page }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a copy of the handle.
|
/// Create a copy of the handle.
|
||||||
@ -116,7 +116,7 @@ where
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::mm::{activate_page_table, current_page_table_paddr},
|
arch::mm::{activate_page_table, current_page_table_paddr},
|
||||||
vm::CachePolicy,
|
mm::CachePolicy,
|
||||||
};
|
};
|
||||||
|
|
||||||
debug_assert_eq!(self.level, PagingConsts::NR_LEVELS);
|
debug_assert_eq!(self.level, PagingConsts::NR_LEVELS);
|
||||||
@ -137,7 +137,7 @@ where
|
|||||||
|
|
||||||
// Decrement the reference count of the last activated page table.
|
// Decrement the reference count of the last activated page table.
|
||||||
|
|
||||||
// Boot page tables are not tracked with [`PageTableFrame`], but
|
// Boot page tables are not tracked with [`PageTableNode`], but
|
||||||
// all page tables after the boot stage are tracked.
|
// all page tables after the boot stage are tracked.
|
||||||
//
|
//
|
||||||
// TODO: the `cpu_local` implementation currently is underpowered,
|
// TODO: the `cpu_local` implementation currently is underpowered,
|
||||||
@ -158,7 +158,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for RawPageTableFrame<E, C>
|
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for RawPageTableNode<E, C>
|
||||||
where
|
where
|
||||||
[(); C::NR_LEVELS as usize]:,
|
[(); C::NR_LEVELS as usize]:,
|
||||||
{
|
{
|
||||||
@ -175,7 +175,7 @@ where
|
|||||||
/// table frame has no references. You can set the page table frame as a child of another
|
/// table frame has no references. You can set the page table frame as a child of another
|
||||||
/// page table frame.
|
/// page table frame.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(super) struct PageTableFrame<
|
pub(super) struct PageTableNode<
|
||||||
E: PageTableEntryTrait = PageTableEntry,
|
E: PageTableEntryTrait = PageTableEntry,
|
||||||
C: PagingConstsTrait = PagingConsts,
|
C: PagingConstsTrait = PagingConsts,
|
||||||
> where
|
> where
|
||||||
@ -190,14 +190,14 @@ pub(super) enum Child<E: PageTableEntryTrait = PageTableEntry, C: PagingConstsTr
|
|||||||
where
|
where
|
||||||
[(); C::NR_LEVELS as usize]:,
|
[(); C::NR_LEVELS as usize]:,
|
||||||
{
|
{
|
||||||
PageTable(RawPageTableFrame<E, C>),
|
PageTable(RawPageTableNode<E, C>),
|
||||||
Frame(VmFrame),
|
Frame(Frame),
|
||||||
/// Frames not tracked by handles.
|
/// Frames not tracked by handles.
|
||||||
Untracked(Paddr),
|
Untracked(Paddr),
|
||||||
None,
|
None,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTableFrame<E, C>
|
impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNode<E, C>
|
||||||
where
|
where
|
||||||
[(); C::NR_LEVELS as usize]:,
|
[(); C::NR_LEVELS as usize]:,
|
||||||
{
|
{
|
||||||
@ -227,12 +227,12 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Convert the handle into a raw handle to be stored in a PTE or CPU.
|
/// Convert the handle into a raw handle to be stored in a PTE or CPU.
|
||||||
pub(super) fn into_raw(self) -> RawPageTableFrame<E, C> {
|
pub(super) fn into_raw(self) -> RawPageTableNode<E, C> {
|
||||||
let level = self.level();
|
let level = self.level();
|
||||||
let raw = self.page.paddr();
|
let raw = self.page.paddr();
|
||||||
self.page.meta().lock.store(0, Ordering::Release);
|
self.page.meta().lock.store(0, Ordering::Release);
|
||||||
core::mem::forget(self);
|
core::mem::forget(self);
|
||||||
RawPageTableFrame {
|
RawPageTableNode {
|
||||||
raw,
|
raw,
|
||||||
level,
|
level,
|
||||||
_phantom: PhantomData,
|
_phantom: PhantomData,
|
||||||
@ -240,9 +240,9 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get a raw handle while still preserving the original handle.
|
/// Get a raw handle while still preserving the original handle.
|
||||||
pub(super) fn clone_raw(&self) -> RawPageTableFrame<E, C> {
|
pub(super) fn clone_raw(&self) -> RawPageTableNode<E, C> {
|
||||||
core::mem::forget(self.page.clone());
|
core::mem::forget(self.page.clone());
|
||||||
RawPageTableFrame {
|
RawPageTableNode {
|
||||||
raw: self.page.paddr(),
|
raw: self.page.paddr(),
|
||||||
level: self.level(),
|
level: self.level(),
|
||||||
_phantom: PhantomData,
|
_phantom: PhantomData,
|
||||||
@ -261,7 +261,7 @@ where
|
|||||||
core::mem::forget(unsafe {
|
core::mem::forget(unsafe {
|
||||||
Page::<PageTablePageMeta<E, C>>::clone_restore(&paddr)
|
Page::<PageTablePageMeta<E, C>>::clone_restore(&paddr)
|
||||||
});
|
});
|
||||||
Child::PageTable(RawPageTableFrame {
|
Child::PageTable(RawPageTableNode {
|
||||||
raw: paddr,
|
raw: paddr,
|
||||||
level: self.level() - 1,
|
level: self.level() - 1,
|
||||||
_phantom: PhantomData,
|
_phantom: PhantomData,
|
||||||
@ -269,7 +269,7 @@ where
|
|||||||
} else if tracked {
|
} else if tracked {
|
||||||
let page = unsafe { Page::<FrameMeta>::restore(paddr) };
|
let page = unsafe { Page::<FrameMeta>::restore(paddr) };
|
||||||
core::mem::forget(page.clone());
|
core::mem::forget(page.clone());
|
||||||
Child::Frame(VmFrame { page })
|
Child::Frame(Frame { page })
|
||||||
} else {
|
} else {
|
||||||
Child::Untracked(paddr)
|
Child::Untracked(paddr)
|
||||||
}
|
}
|
||||||
@ -335,7 +335,7 @@ where
|
|||||||
pub(super) fn set_child_pt(
|
pub(super) fn set_child_pt(
|
||||||
&mut self,
|
&mut self,
|
||||||
idx: usize,
|
idx: usize,
|
||||||
pt: RawPageTableFrame<E, C>,
|
pt: RawPageTableNode<E, C>,
|
||||||
in_untracked_range: bool,
|
in_untracked_range: bool,
|
||||||
) {
|
) {
|
||||||
// They should be ensured by the cursor.
|
// They should be ensured by the cursor.
|
||||||
@ -348,7 +348,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Map a frame at a given index.
|
/// Map a frame at a given index.
|
||||||
pub(super) fn set_child_frame(&mut self, idx: usize, frame: VmFrame, prop: PageProperty) {
|
pub(super) fn set_child_frame(&mut self, idx: usize, frame: Frame, prop: PageProperty) {
|
||||||
// They should be ensured by the cursor.
|
// They should be ensured by the cursor.
|
||||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||||
debug_assert_eq!(frame.level(), self.level());
|
debug_assert_eq!(frame.level(), self.level());
|
||||||
@ -391,7 +391,7 @@ where
|
|||||||
panic!("`split_untracked_huge` not called on an untracked huge page");
|
panic!("`split_untracked_huge` not called on an untracked huge page");
|
||||||
};
|
};
|
||||||
let prop = self.read_pte_prop(idx);
|
let prop = self.read_pte_prop(idx);
|
||||||
let mut new_frame = PageTableFrame::<E, C>::alloc(self.level() - 1);
|
let mut new_frame = PageTableNode::<E, C>::alloc(self.level() - 1);
|
||||||
for i in 0..nr_subpage_per_huge::<C>() {
|
for i in 0..nr_subpage_per_huge::<C>() {
|
||||||
let small_pa = pa + i * page_size::<C>(self.level() - 1);
|
let small_pa = pa + i * page_size::<C>(self.level() - 1);
|
||||||
unsafe { new_frame.set_child_untracked(i, small_pa, prop) };
|
unsafe { new_frame.set_child_untracked(i, small_pa, prop) };
|
||||||
@ -467,7 +467,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for PageTableFrame<E, C>
|
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for PageTableNode<E, C>
|
||||||
where
|
where
|
||||||
[(); C::NR_LEVELS as usize]:,
|
[(); C::NR_LEVELS as usize]:,
|
||||||
{
|
{
|
@ -18,7 +18,7 @@ pub(crate) use cursor::{Cursor, CursorMut, PageTableQueryResult};
|
|||||||
#[cfg(ktest)]
|
#[cfg(ktest)]
|
||||||
mod test;
|
mod test;
|
||||||
|
|
||||||
pub(in crate::vm) mod boot_pt;
|
pub(in crate::mm) mod boot_pt;
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||||
pub enum PageTableError {
|
pub enum PageTableError {
|
||||||
@ -82,7 +82,7 @@ pub(crate) struct PageTable<
|
|||||||
> where
|
> where
|
||||||
[(); C::NR_LEVELS as usize]:,
|
[(); C::NR_LEVELS as usize]:,
|
||||||
{
|
{
|
||||||
root: RawPageTableFrame<E, C>,
|
root: RawPageTableNode<E, C>,
|
||||||
_phantom: PhantomData<M>,
|
_phantom: PhantomData<M>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -160,7 +160,7 @@ impl PageTable<KernelMode> {
|
|||||||
let mut root_frame = self.root.copy_handle().lock();
|
let mut root_frame = self.root.copy_handle().lock();
|
||||||
for i in start..end {
|
for i in start..end {
|
||||||
if !root_frame.read_pte(i).is_present() {
|
if !root_frame.read_pte(i).is_present() {
|
||||||
let frame = PageTableFrame::alloc(PagingConsts::NR_LEVELS - 1);
|
let frame = PageTableNode::alloc(PagingConsts::NR_LEVELS - 1);
|
||||||
root_frame.set_child_pt(i, frame.into_raw(), i < NR_PTES_PER_NODE * 3 / 4);
|
root_frame.set_child_pt(i, frame.into_raw(), i < NR_PTES_PER_NODE * 3 / 4);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -174,7 +174,7 @@ where
|
|||||||
/// Create a new empty page table. Useful for the kernel page table and IOMMU page tables only.
|
/// Create a new empty page table. Useful for the kernel page table and IOMMU page tables only.
|
||||||
pub(crate) fn empty() -> Self {
|
pub(crate) fn empty() -> Self {
|
||||||
PageTable {
|
PageTable {
|
||||||
root: PageTableFrame::<E, C>::alloc(C::NR_LEVELS).into_raw(),
|
root: PageTableNode::<E, C>::alloc(C::NR_LEVELS).into_raw(),
|
||||||
_phantom: PhantomData,
|
_phantom: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -3,7 +3,7 @@
|
|||||||
use core::mem::ManuallyDrop;
|
use core::mem::ManuallyDrop;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::vm::{
|
use crate::mm::{
|
||||||
kspace::LINEAR_MAPPING_BASE_VADDR,
|
kspace::LINEAR_MAPPING_BASE_VADDR,
|
||||||
page_prop::{CachePolicy, PageFlags},
|
page_prop::{CachePolicy, PageFlags},
|
||||||
VmAllocOptions,
|
VmAllocOptions,
|
||||||
@ -43,7 +43,7 @@ fn test_tracked_map_unmap() {
|
|||||||
#[ktest]
|
#[ktest]
|
||||||
fn test_untracked_map_unmap() {
|
fn test_untracked_map_unmap() {
|
||||||
let pt = PageTable::<KernelMode>::empty();
|
let pt = PageTable::<KernelMode>::empty();
|
||||||
const UNTRACKED_OFFSET: usize = crate::vm::kspace::LINEAR_MAPPING_BASE_VADDR;
|
const UNTRACKED_OFFSET: usize = crate::mm::kspace::LINEAR_MAPPING_BASE_VADDR;
|
||||||
|
|
||||||
let from_ppn = 13245..512 * 512 + 23456;
|
let from_ppn = 13245..512 * 512 + 23456;
|
||||||
let to_ppn = from_ppn.start - 11010..from_ppn.end - 11010;
|
let to_ppn = from_ppn.start - 11010..from_ppn.end - 11010;
|
||||||
@ -172,7 +172,7 @@ impl PagingConstsTrait for VeryHugePagingConsts {
|
|||||||
#[ktest]
|
#[ktest]
|
||||||
fn test_untracked_large_protect_query() {
|
fn test_untracked_large_protect_query() {
|
||||||
let pt = PageTable::<KernelMode, PageTableEntry, VeryHugePagingConsts>::empty();
|
let pt = PageTable::<KernelMode, PageTableEntry, VeryHugePagingConsts>::empty();
|
||||||
const UNTRACKED_OFFSET: usize = crate::vm::kspace::LINEAR_MAPPING_BASE_VADDR;
|
const UNTRACKED_OFFSET: usize = crate::mm::kspace::LINEAR_MAPPING_BASE_VADDR;
|
||||||
|
|
||||||
let gmult = 512 * 512;
|
let gmult = 512 * 512;
|
||||||
let from_ppn = gmult - 512..gmult + gmult + 514;
|
let from_ppn = gmult - 512..gmult + gmult + 514;
|
@ -13,11 +13,11 @@ use crate::{
|
|||||||
arch::mm::{
|
arch::mm::{
|
||||||
tlb_flush_addr_range, tlb_flush_all_excluding_global, PageTableEntry, PagingConsts,
|
tlb_flush_addr_range, tlb_flush_all_excluding_global, PageTableEntry, PagingConsts,
|
||||||
},
|
},
|
||||||
prelude::*,
|
mm::{
|
||||||
vm::{
|
|
||||||
page_table::{Cursor, PageTableQueryResult as PtQr},
|
page_table::{Cursor, PageTableQueryResult as PtQr},
|
||||||
VmFrame, MAX_USERSPACE_VADDR,
|
Frame, MAX_USERSPACE_VADDR,
|
||||||
},
|
},
|
||||||
|
prelude::*,
|
||||||
Error,
|
Error,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -31,7 +31,7 @@ use crate::{
|
|||||||
///
|
///
|
||||||
/// A newly-created `VmSpace` is not backed by any physical memory pages.
|
/// A newly-created `VmSpace` is not backed by any physical memory pages.
|
||||||
/// To provide memory pages for a `VmSpace`, one can allocate and map
|
/// To provide memory pages for a `VmSpace`, one can allocate and map
|
||||||
/// physical memory (`VmFrame`s) to the `VmSpace`.
|
/// physical memory (`Frame`s) to the `VmSpace`.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct VmSpace {
|
pub struct VmSpace {
|
||||||
pt: PageTable<UserMode>,
|
pt: PageTable<UserMode>,
|
||||||
@ -103,7 +103,7 @@ impl VmSpace {
|
|||||||
};
|
};
|
||||||
|
|
||||||
for frame in frames.into_iter() {
|
for frame in frames.into_iter() {
|
||||||
// SAFETY: mapping in the user space with `VmFrame` is safe.
|
// SAFETY: mapping in the user space with `Frame` is safe.
|
||||||
unsafe {
|
unsafe {
|
||||||
cursor.map(frame, prop);
|
cursor.map(frame, prop);
|
||||||
}
|
}
|
||||||
@ -299,7 +299,7 @@ pub enum VmQueryResult {
|
|||||||
},
|
},
|
||||||
Mapped {
|
Mapped {
|
||||||
va: Vaddr,
|
va: Vaddr,
|
||||||
frame: VmFrame,
|
frame: Frame,
|
||||||
prop: PageProperty,
|
prop: PageProperty,
|
||||||
},
|
},
|
||||||
}
|
}
|
@ -11,6 +11,6 @@ pub use aster_main::aster_main;
|
|||||||
|
|
||||||
pub use crate::{
|
pub use crate::{
|
||||||
early_print as print, early_println as println,
|
early_print as print, early_println as println,
|
||||||
|
mm::{Paddr, Vaddr},
|
||||||
panicking::abort,
|
panicking::abort,
|
||||||
vm::{Paddr, Vaddr},
|
|
||||||
};
|
};
|
||||||
|
@ -12,10 +12,10 @@ use super::{
|
|||||||
pub(crate) use crate::arch::task::{context_switch, TaskContext};
|
pub(crate) use crate::arch::task::{context_switch, TaskContext};
|
||||||
use crate::{
|
use crate::{
|
||||||
cpu::CpuSet,
|
cpu::CpuSet,
|
||||||
|
mm::{kspace::KERNEL_PAGE_TABLE, PageFlags, Segment, VmAllocOptions, PAGE_SIZE},
|
||||||
prelude::*,
|
prelude::*,
|
||||||
sync::{SpinLock, SpinLockGuard},
|
sync::{SpinLock, SpinLockGuard},
|
||||||
user::UserSpace,
|
user::UserSpace,
|
||||||
vm::{kspace::KERNEL_PAGE_TABLE, PageFlags, VmAllocOptions, VmSegment, PAGE_SIZE},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const KERNEL_STACK_SIZE: usize = PAGE_SIZE * 64;
|
pub const KERNEL_STACK_SIZE: usize = PAGE_SIZE * 64;
|
||||||
@ -35,7 +35,7 @@ pub trait TaskContextApi {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct KernelStack {
|
pub struct KernelStack {
|
||||||
segment: VmSegment,
|
segment: Segment,
|
||||||
has_guard_page: bool,
|
has_guard_page: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,7 +56,7 @@ impl KernelStack {
|
|||||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||||
let guard_page_vaddr = {
|
let guard_page_vaddr = {
|
||||||
let guard_page_paddr = stack_segment.start_paddr();
|
let guard_page_paddr = stack_segment.start_paddr();
|
||||||
crate::vm::paddr_to_vaddr(guard_page_paddr)
|
crate::mm::paddr_to_vaddr(guard_page_paddr)
|
||||||
};
|
};
|
||||||
// SAFETY: the segment allocated is not used by others so we can protect it.
|
// SAFETY: the segment allocated is not used by others so we can protect it.
|
||||||
unsafe {
|
unsafe {
|
||||||
@ -84,7 +84,7 @@ impl Drop for KernelStack {
|
|||||||
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
|
||||||
let guard_page_vaddr = {
|
let guard_page_vaddr = {
|
||||||
let guard_page_paddr = self.segment.start_paddr();
|
let guard_page_paddr = self.segment.start_paddr();
|
||||||
crate::vm::paddr_to_vaddr(guard_page_paddr)
|
crate::mm::paddr_to_vaddr(guard_page_paddr)
|
||||||
};
|
};
|
||||||
// SAFETY: the segment allocated is not used by others so we can protect it.
|
// SAFETY: the segment allocated is not used by others so we can protect it.
|
||||||
unsafe {
|
unsafe {
|
||||||
@ -293,7 +293,7 @@ impl TaskOptions {
|
|||||||
// to at least 16 bytes. And a larger alignment is needed if larger arguments
|
// to at least 16 bytes. And a larger alignment is needed if larger arguments
|
||||||
// are passed to the function. The `kernel_task_entry` function does not
|
// are passed to the function. The `kernel_task_entry` function does not
|
||||||
// have any arguments, so we only need to align the stack pointer to 16 bytes.
|
// have any arguments, so we only need to align the stack pointer to 16 bytes.
|
||||||
ctx.set_stack_pointer(crate::vm::paddr_to_vaddr(new_task.kstack.end_paddr() - 16));
|
ctx.set_stack_pointer(crate::mm::paddr_to_vaddr(new_task.kstack.end_paddr() - 16));
|
||||||
|
|
||||||
Ok(Arc::new(new_task))
|
Ok(Arc::new(new_task))
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
use trapframe::TrapFrame;
|
use trapframe::TrapFrame;
|
||||||
|
|
||||||
use crate::{cpu::UserContext, prelude::*, task::Task, vm::VmSpace};
|
use crate::{cpu::UserContext, mm::VmSpace, prelude::*, task::Task};
|
||||||
|
|
||||||
/// A user space.
|
/// A user space.
|
||||||
///
|
///
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
pub use aster_frame::arch::console;
|
pub use aster_frame::arch::console;
|
||||||
use aster_frame::vm::VmReader;
|
use aster_frame::mm::VmReader;
|
||||||
use spin::Once;
|
use spin::Once;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
use core::ops::Range;
|
use core::ops::Range;
|
||||||
|
|
||||||
use aster_frame::vm::VmIo;
|
use aster_frame::mm::VmIo;
|
||||||
use aster_rights::Full;
|
use aster_rights::Full;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
|
@ -3,8 +3,8 @@
|
|||||||
use core::{num::NonZeroUsize, ops::Range, sync::atomic::AtomicU64};
|
use core::{num::NonZeroUsize, ops::Range, sync::atomic::AtomicU64};
|
||||||
|
|
||||||
use aster_block::{bio::BioWaiter, id::BlockId, BlockDevice};
|
use aster_block::{bio::BioWaiter, id::BlockId, BlockDevice};
|
||||||
use aster_frame::vm::VmFrame;
|
use aster_frame::mm::Frame;
|
||||||
pub(super) use aster_frame::vm::VmIo;
|
pub(super) use aster_frame::mm::VmIo;
|
||||||
use hashbrown::HashMap;
|
use hashbrown::HashMap;
|
||||||
use lru::LruCache;
|
use lru::LruCache;
|
||||||
|
|
||||||
@ -361,7 +361,7 @@ impl ExfatFS {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PageCacheBackend for ExfatFS {
|
impl PageCacheBackend for ExfatFS {
|
||||||
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
fn read_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||||
if self.fs_size() < idx * PAGE_SIZE {
|
if self.fs_size() < idx * PAGE_SIZE {
|
||||||
return_errno_with_message!(Errno::EINVAL, "invalid read size")
|
return_errno_with_message!(Errno::EINVAL, "invalid read size")
|
||||||
}
|
}
|
||||||
@ -371,7 +371,7 @@ impl PageCacheBackend for ExfatFS {
|
|||||||
Ok(waiter)
|
Ok(waiter)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
fn write_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||||
if self.fs_size() < idx * PAGE_SIZE {
|
if self.fs_size() < idx * PAGE_SIZE {
|
||||||
return_errno_with_message!(Errno::EINVAL, "invalid write size")
|
return_errno_with_message!(Errno::EINVAL, "invalid write size")
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,7 @@ use aster_block::{
|
|||||||
id::{Bid, BlockId},
|
id::{Bid, BlockId},
|
||||||
BLOCK_SIZE,
|
BLOCK_SIZE,
|
||||||
};
|
};
|
||||||
use aster_frame::vm::{VmAllocOptions, VmFrame, VmIo};
|
use aster_frame::mm::{Frame, VmAllocOptions, VmIo};
|
||||||
use aster_rights::Full;
|
use aster_rights::Full;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
@ -132,7 +132,7 @@ struct ExfatInodeInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PageCacheBackend for ExfatInode {
|
impl PageCacheBackend for ExfatInode {
|
||||||
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
fn read_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||||
let inner = self.inner.read();
|
let inner = self.inner.read();
|
||||||
if inner.size < idx * PAGE_SIZE {
|
if inner.size < idx * PAGE_SIZE {
|
||||||
return_errno_with_message!(Errno::EINVAL, "Invalid read size")
|
return_errno_with_message!(Errno::EINVAL, "Invalid read size")
|
||||||
@ -145,7 +145,7 @@ impl PageCacheBackend for ExfatInode {
|
|||||||
Ok(waiter)
|
Ok(waiter)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
fn write_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||||
let inner = self.inner.read();
|
let inner = self.inner.read();
|
||||||
let sector_size = inner.fs().sector_size();
|
let sector_size = inner.fs().sector_size();
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ mod test {
|
|||||||
bio::{BioEnqueueError, BioStatus, BioType, SubmittedBio},
|
bio::{BioEnqueueError, BioStatus, BioType, SubmittedBio},
|
||||||
BlockDevice,
|
BlockDevice,
|
||||||
};
|
};
|
||||||
use aster_frame::vm::{VmAllocOptions, VmIo, VmSegment};
|
use aster_frame::mm::{Segment, VmAllocOptions, VmIo};
|
||||||
use rand::{rngs::SmallRng, RngCore, SeedableRng};
|
use rand::{rngs::SmallRng, RngCore, SeedableRng};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@ -37,10 +37,10 @@ mod test {
|
|||||||
|
|
||||||
/// Followings are implementations of memory simulated block device
|
/// Followings are implementations of memory simulated block device
|
||||||
pub const SECTOR_SIZE: usize = 512;
|
pub const SECTOR_SIZE: usize = 512;
|
||||||
struct ExfatMemoryBioQueue(VmSegment);
|
struct ExfatMemoryBioQueue(Segment);
|
||||||
|
|
||||||
impl ExfatMemoryBioQueue {
|
impl ExfatMemoryBioQueue {
|
||||||
pub fn new(segment: VmSegment) -> Self {
|
pub fn new(segment: Segment) -> Self {
|
||||||
ExfatMemoryBioQueue(segment)
|
ExfatMemoryBioQueue(segment)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -54,7 +54,7 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ExfatMemoryDisk {
|
impl ExfatMemoryDisk {
|
||||||
pub fn new(segment: VmSegment) -> Self {
|
pub fn new(segment: Segment) -> Self {
|
||||||
ExfatMemoryDisk {
|
ExfatMemoryDisk {
|
||||||
queue: ExfatMemoryBioQueue::new(segment),
|
queue: ExfatMemoryBioQueue::new(segment),
|
||||||
}
|
}
|
||||||
@ -100,7 +100,7 @@ mod test {
|
|||||||
static EXFAT_IMAGE: &[u8] = include_bytes!("../../../../../regression/build/exfat.img");
|
static EXFAT_IMAGE: &[u8] = include_bytes!("../../../../../regression/build/exfat.img");
|
||||||
|
|
||||||
/// Read exfat disk image
|
/// Read exfat disk image
|
||||||
fn new_vm_segment_from_image() -> VmSegment {
|
fn new_vm_segment_from_image() -> Segment {
|
||||||
let vm_segment = {
|
let vm_segment = {
|
||||||
VmAllocOptions::new(EXFAT_IMAGE.len() / PAGE_SIZE)
|
VmAllocOptions::new(EXFAT_IMAGE.len() / PAGE_SIZE)
|
||||||
.is_contiguous(true)
|
.is_contiguous(true)
|
||||||
|
@ -28,7 +28,7 @@ struct BlockGroupImpl {
|
|||||||
impl BlockGroup {
|
impl BlockGroup {
|
||||||
/// Loads and constructs a block group.
|
/// Loads and constructs a block group.
|
||||||
pub fn load(
|
pub fn load(
|
||||||
group_descriptors_segment: &VmSegment,
|
group_descriptors_segment: &Segment,
|
||||||
idx: usize,
|
idx: usize,
|
||||||
block_device: &dyn BlockDevice,
|
block_device: &dyn BlockDevice,
|
||||||
super_block: &SuperBlock,
|
super_block: &SuperBlock,
|
||||||
@ -318,12 +318,12 @@ impl Debug for BlockGroup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PageCacheBackend for BlockGroupImpl {
|
impl PageCacheBackend for BlockGroupImpl {
|
||||||
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
fn read_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||||
let bid = self.inode_table_bid + idx as Ext2Bid;
|
let bid = self.inode_table_bid + idx as Ext2Bid;
|
||||||
self.fs.upgrade().unwrap().read_block_async(bid, frame)
|
self.fs.upgrade().unwrap().read_block_async(bid, frame)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
fn write_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||||
let bid = self.inode_table_bid + idx as Ext2Bid;
|
let bid = self.inode_table_bid + idx as Ext2Bid;
|
||||||
self.fs.upgrade().unwrap().write_block_async(bid, frame)
|
self.fs.upgrade().unwrap().write_block_async(bid, frame)
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ pub struct Ext2 {
|
|||||||
blocks_per_group: Ext2Bid,
|
blocks_per_group: Ext2Bid,
|
||||||
inode_size: usize,
|
inode_size: usize,
|
||||||
block_size: usize,
|
block_size: usize,
|
||||||
group_descriptors_segment: VmSegment,
|
group_descriptors_segment: Segment,
|
||||||
self_ref: Weak<Self>,
|
self_ref: Weak<Self>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -55,7 +55,7 @@ impl Ext2 {
|
|||||||
// Load the block groups information
|
// Load the block groups information
|
||||||
let load_block_groups = |fs: Weak<Ext2>,
|
let load_block_groups = |fs: Weak<Ext2>,
|
||||||
block_device: &dyn BlockDevice,
|
block_device: &dyn BlockDevice,
|
||||||
group_descriptors_segment: &VmSegment|
|
group_descriptors_segment: &Segment|
|
||||||
-> Result<Vec<BlockGroup>> {
|
-> Result<Vec<BlockGroup>> {
|
||||||
let block_groups_count = super_block.block_groups_count() as usize;
|
let block_groups_count = super_block.block_groups_count() as usize;
|
||||||
let mut block_groups = Vec::with_capacity(block_groups_count);
|
let mut block_groups = Vec::with_capacity(block_groups_count);
|
||||||
@ -291,7 +291,7 @@ impl Ext2 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Reads contiguous blocks starting from the `bid` synchronously.
|
/// Reads contiguous blocks starting from the `bid` synchronously.
|
||||||
pub(super) fn read_blocks(&self, bid: Ext2Bid, segment: &VmSegment) -> Result<()> {
|
pub(super) fn read_blocks(&self, bid: Ext2Bid, segment: &Segment) -> Result<()> {
|
||||||
let status = self
|
let status = self
|
||||||
.block_device
|
.block_device
|
||||||
.read_blocks_sync(Bid::new(bid as u64), segment)?;
|
.read_blocks_sync(Bid::new(bid as u64), segment)?;
|
||||||
@ -302,7 +302,7 @@ impl Ext2 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Reads one block indicated by the `bid` synchronously.
|
/// Reads one block indicated by the `bid` synchronously.
|
||||||
pub(super) fn read_block(&self, bid: Ext2Bid, frame: &VmFrame) -> Result<()> {
|
pub(super) fn read_block(&self, bid: Ext2Bid, frame: &Frame) -> Result<()> {
|
||||||
let status = self
|
let status = self
|
||||||
.block_device
|
.block_device
|
||||||
.read_block_sync(Bid::new(bid as u64), frame)?;
|
.read_block_sync(Bid::new(bid as u64), frame)?;
|
||||||
@ -313,13 +313,13 @@ impl Ext2 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Reads one block indicated by the `bid` asynchronously.
|
/// Reads one block indicated by the `bid` asynchronously.
|
||||||
pub(super) fn read_block_async(&self, bid: Ext2Bid, frame: &VmFrame) -> Result<BioWaiter> {
|
pub(super) fn read_block_async(&self, bid: Ext2Bid, frame: &Frame) -> Result<BioWaiter> {
|
||||||
let waiter = self.block_device.read_block(Bid::new(bid as u64), frame)?;
|
let waiter = self.block_device.read_block(Bid::new(bid as u64), frame)?;
|
||||||
Ok(waiter)
|
Ok(waiter)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Writes contiguous blocks starting from the `bid` synchronously.
|
/// Writes contiguous blocks starting from the `bid` synchronously.
|
||||||
pub(super) fn write_blocks(&self, bid: Ext2Bid, segment: &VmSegment) -> Result<()> {
|
pub(super) fn write_blocks(&self, bid: Ext2Bid, segment: &Segment) -> Result<()> {
|
||||||
let status = self
|
let status = self
|
||||||
.block_device
|
.block_device
|
||||||
.write_blocks_sync(Bid::new(bid as u64), segment)?;
|
.write_blocks_sync(Bid::new(bid as u64), segment)?;
|
||||||
@ -330,7 +330,7 @@ impl Ext2 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Writes one block indicated by the `bid` synchronously.
|
/// Writes one block indicated by the `bid` synchronously.
|
||||||
pub(super) fn write_block(&self, bid: Ext2Bid, frame: &VmFrame) -> Result<()> {
|
pub(super) fn write_block(&self, bid: Ext2Bid, frame: &Frame) -> Result<()> {
|
||||||
let status = self
|
let status = self
|
||||||
.block_device
|
.block_device
|
||||||
.write_block_sync(Bid::new(bid as u64), frame)?;
|
.write_block_sync(Bid::new(bid as u64), frame)?;
|
||||||
@ -341,7 +341,7 @@ impl Ext2 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Writes one block indicated by the `bid` asynchronously.
|
/// Writes one block indicated by the `bid` asynchronously.
|
||||||
pub(super) fn write_block_async(&self, bid: Ext2Bid, frame: &VmFrame) -> Result<BioWaiter> {
|
pub(super) fn write_block_async(&self, bid: Ext2Bid, frame: &Frame) -> Result<BioWaiter> {
|
||||||
let waiter = self.block_device.write_block(Bid::new(bid as u64), frame)?;
|
let waiter = self.block_device.write_block(Bid::new(bid as u64), frame)?;
|
||||||
Ok(waiter)
|
Ok(waiter)
|
||||||
}
|
}
|
||||||
|
@ -124,7 +124,7 @@ impl IndirectBlockCache {
|
|||||||
/// Represents a single indirect block buffer cached by the `IndirectCache`.
|
/// Represents a single indirect block buffer cached by the `IndirectCache`.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct IndirectBlock {
|
pub struct IndirectBlock {
|
||||||
frame: VmFrame,
|
frame: Frame,
|
||||||
state: State,
|
state: State,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -838,7 +838,7 @@ impl InodeImpl_ {
|
|||||||
self.inode().fs()
|
self.inode().fs()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn read_block_async(&self, bid: Ext2Bid, block: &VmFrame) -> Result<BioWaiter> {
|
pub fn read_block_async(&self, bid: Ext2Bid, block: &Frame) -> Result<BioWaiter> {
|
||||||
if bid >= self.desc.blocks_count() {
|
if bid >= self.desc.blocks_count() {
|
||||||
return_errno!(Errno::EINVAL);
|
return_errno!(Errno::EINVAL);
|
||||||
}
|
}
|
||||||
@ -852,14 +852,14 @@ impl InodeImpl_ {
|
|||||||
self.fs().read_block_async(device_range.start, block)
|
self.fs().read_block_async(device_range.start, block)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn read_block_sync(&self, bid: Ext2Bid, block: &VmFrame) -> Result<()> {
|
pub fn read_block_sync(&self, bid: Ext2Bid, block: &Frame) -> Result<()> {
|
||||||
match self.read_block_async(bid, block)?.wait() {
|
match self.read_block_async(bid, block)?.wait() {
|
||||||
Some(BioStatus::Complete) => Ok(()),
|
Some(BioStatus::Complete) => Ok(()),
|
||||||
_ => return_errno!(Errno::EIO),
|
_ => return_errno!(Errno::EIO),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_block_async(&self, bid: Ext2Bid, block: &VmFrame) -> Result<BioWaiter> {
|
pub fn write_block_async(&self, bid: Ext2Bid, block: &Frame) -> Result<BioWaiter> {
|
||||||
if bid >= self.desc.blocks_count() {
|
if bid >= self.desc.blocks_count() {
|
||||||
return_errno!(Errno::EINVAL);
|
return_errno!(Errno::EINVAL);
|
||||||
}
|
}
|
||||||
@ -872,7 +872,7 @@ impl InodeImpl_ {
|
|||||||
Ok(waiter)
|
Ok(waiter)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_block_sync(&self, bid: Ext2Bid, block: &VmFrame) -> Result<()> {
|
pub fn write_block_sync(&self, bid: Ext2Bid, block: &Frame) -> Result<()> {
|
||||||
match self.write_block_async(bid, block)?.wait() {
|
match self.write_block_async(bid, block)?.wait() {
|
||||||
Some(BioStatus::Complete) => Ok(()),
|
Some(BioStatus::Complete) => Ok(()),
|
||||||
_ => return_errno!(Errno::EIO),
|
_ => return_errno!(Errno::EIO),
|
||||||
@ -1525,19 +1525,19 @@ impl InodeImpl {
|
|||||||
self.0.read().desc.ctime
|
self.0.read().desc.ctime
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn read_block_sync(&self, bid: Ext2Bid, block: &VmFrame) -> Result<()> {
|
pub fn read_block_sync(&self, bid: Ext2Bid, block: &Frame) -> Result<()> {
|
||||||
self.0.read().read_block_sync(bid, block)
|
self.0.read().read_block_sync(bid, block)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn read_block_async(&self, bid: Ext2Bid, block: &VmFrame) -> Result<BioWaiter> {
|
pub fn read_block_async(&self, bid: Ext2Bid, block: &Frame) -> Result<BioWaiter> {
|
||||||
self.0.read().read_block_async(bid, block)
|
self.0.read().read_block_async(bid, block)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_block_sync(&self, bid: Ext2Bid, block: &VmFrame) -> Result<()> {
|
pub fn write_block_sync(&self, bid: Ext2Bid, block: &Frame) -> Result<()> {
|
||||||
self.0.read().write_block_sync(bid, block)
|
self.0.read().write_block_sync(bid, block)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_block_async(&self, bid: Ext2Bid, block: &VmFrame) -> Result<BioWaiter> {
|
pub fn write_block_async(&self, bid: Ext2Bid, block: &Frame) -> Result<BioWaiter> {
|
||||||
self.0.read().write_block_async(bid, block)
|
self.0.read().write_block_async(bid, block)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1612,12 +1612,12 @@ impl InodeImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PageCacheBackend for InodeImpl {
|
impl PageCacheBackend for InodeImpl {
|
||||||
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
fn read_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||||
let bid = idx as Ext2Bid;
|
let bid = idx as Ext2Bid;
|
||||||
self.read_block_async(bid, frame)
|
self.read_block_async(bid, frame)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
fn write_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||||
let bid = idx as Ext2Bid;
|
let bid = idx as Ext2Bid;
|
||||||
self.write_block_async(bid, frame)
|
self.write_block_async(bid, frame)
|
||||||
}
|
}
|
||||||
|
@ -12,8 +12,8 @@ pub(super) use aster_block::{
|
|||||||
BlockDevice, BLOCK_SIZE,
|
BlockDevice, BLOCK_SIZE,
|
||||||
};
|
};
|
||||||
pub(super) use aster_frame::{
|
pub(super) use aster_frame::{
|
||||||
|
mm::{Frame, Segment, VmAllocOptions, VmIo},
|
||||||
sync::{RwMutex, RwMutexReadGuard, RwMutexWriteGuard},
|
sync::{RwMutex, RwMutexReadGuard, RwMutexWriteGuard},
|
||||||
vm::{VmAllocOptions, VmFrame, VmIo, VmSegment},
|
|
||||||
};
|
};
|
||||||
pub(super) use aster_rights::Full;
|
pub(super) use aster_rights::Full;
|
||||||
pub(super) use static_assertions::const_assert;
|
pub(super) use static_assertions::const_assert;
|
||||||
|
@ -7,8 +7,8 @@ use core::{
|
|||||||
|
|
||||||
use aster_block::bio::BioWaiter;
|
use aster_block::bio::BioWaiter;
|
||||||
use aster_frame::{
|
use aster_frame::{
|
||||||
|
mm::{Frame, VmIo},
|
||||||
sync::RwMutexWriteGuard,
|
sync::RwMutexWriteGuard,
|
||||||
vm::{VmFrame, VmIo},
|
|
||||||
};
|
};
|
||||||
use aster_rights::Full;
|
use aster_rights::Full;
|
||||||
use aster_util::slot_vec::SlotVec;
|
use aster_util::slot_vec::SlotVec;
|
||||||
@ -436,13 +436,13 @@ impl RamInode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PageCacheBackend for RamInode {
|
impl PageCacheBackend for RamInode {
|
||||||
fn read_page(&self, _idx: usize, frame: &VmFrame) -> Result<BioWaiter> {
|
fn read_page(&self, _idx: usize, frame: &Frame) -> Result<BioWaiter> {
|
||||||
// Initially, any block/page in a RamFs inode contains all zeros
|
// Initially, any block/page in a RamFs inode contains all zeros
|
||||||
frame.writer().fill(0);
|
frame.writer().fill(0);
|
||||||
Ok(BioWaiter::new())
|
Ok(BioWaiter::new())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_page(&self, _idx: usize, _frame: &VmFrame) -> Result<BioWaiter> {
|
fn write_page(&self, _idx: usize, _frame: &Frame) -> Result<BioWaiter> {
|
||||||
// do nothing
|
// do nothing
|
||||||
Ok(BioWaiter::new())
|
Ok(BioWaiter::new())
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
use core::ops::Range;
|
use core::ops::Range;
|
||||||
|
|
||||||
use aster_block::bio::{BioStatus, BioWaiter};
|
use aster_block::bio::{BioStatus, BioWaiter};
|
||||||
use aster_frame::vm::{VmAllocOptions, VmFrame};
|
use aster_frame::mm::{Frame, VmAllocOptions};
|
||||||
use aster_rights::Full;
|
use aster_rights::Full;
|
||||||
use lru::LruCache;
|
use lru::LruCache;
|
||||||
|
|
||||||
@ -151,7 +151,7 @@ impl Debug for PageCacheManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Pager for PageCacheManager {
|
impl Pager for PageCacheManager {
|
||||||
fn commit_page(&self, idx: usize) -> Result<VmFrame> {
|
fn commit_page(&self, idx: usize) -> Result<Frame> {
|
||||||
if let Some(page) = self.pages.lock().get(&idx) {
|
if let Some(page) = self.pages.lock().get(&idx) {
|
||||||
return Ok(page.frame.clone());
|
return Ok(page.frame.clone());
|
||||||
}
|
}
|
||||||
@ -202,7 +202,7 @@ impl Pager for PageCacheManager {
|
|||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct Page {
|
struct Page {
|
||||||
frame: VmFrame,
|
frame: Frame,
|
||||||
state: PageState,
|
state: PageState,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -223,7 +223,7 @@ impl Page {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn frame(&self) -> &VmFrame {
|
pub fn frame(&self) -> &Frame {
|
||||||
&self.frame
|
&self.frame
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -252,16 +252,16 @@ enum PageState {
|
|||||||
/// This trait represents the backend for the page cache.
|
/// This trait represents the backend for the page cache.
|
||||||
pub trait PageCacheBackend: Sync + Send {
|
pub trait PageCacheBackend: Sync + Send {
|
||||||
/// Reads a page from the backend asynchronously.
|
/// Reads a page from the backend asynchronously.
|
||||||
fn read_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter>;
|
fn read_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter>;
|
||||||
/// Writes a page to the backend asynchronously.
|
/// Writes a page to the backend asynchronously.
|
||||||
fn write_page(&self, idx: usize, frame: &VmFrame) -> Result<BioWaiter>;
|
fn write_page(&self, idx: usize, frame: &Frame) -> Result<BioWaiter>;
|
||||||
/// Returns the number of pages in the backend.
|
/// Returns the number of pages in the backend.
|
||||||
fn npages(&self) -> usize;
|
fn npages(&self) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl dyn PageCacheBackend {
|
impl dyn PageCacheBackend {
|
||||||
/// Reads a page from the backend synchronously.
|
/// Reads a page from the backend synchronously.
|
||||||
fn read_page_sync(&self, idx: usize, frame: &VmFrame) -> Result<()> {
|
fn read_page_sync(&self, idx: usize, frame: &Frame) -> Result<()> {
|
||||||
let waiter = self.read_page(idx, frame)?;
|
let waiter = self.read_page(idx, frame)?;
|
||||||
match waiter.wait() {
|
match waiter.wait() {
|
||||||
Some(BioStatus::Complete) => Ok(()),
|
Some(BioStatus::Complete) => Ok(()),
|
||||||
@ -269,7 +269,7 @@ impl dyn PageCacheBackend {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// Writes a page to the backend synchronously.
|
/// Writes a page to the backend synchronously.
|
||||||
fn write_page_sync(&self, idx: usize, frame: &VmFrame) -> Result<()> {
|
fn write_page_sync(&self, idx: usize, frame: &Frame) -> Result<()> {
|
||||||
let waiter = self.write_page(idx, frame)?;
|
let waiter = self.write_page(idx, frame)?;
|
||||||
match waiter.wait() {
|
match waiter.wait() {
|
||||||
Some(BioStatus::Complete) => Ok(()),
|
Some(BioStatus::Complete) => Ok(()),
|
||||||
|
@ -14,8 +14,8 @@ pub(crate) use alloc::{
|
|||||||
pub(crate) use core::{any::Any, ffi::CStr, fmt::Debug};
|
pub(crate) use core::{any::Any, ffi::CStr, fmt::Debug};
|
||||||
|
|
||||||
pub(crate) use aster_frame::{
|
pub(crate) use aster_frame::{
|
||||||
|
mm::{Vaddr, PAGE_SIZE},
|
||||||
sync::{Mutex, MutexGuard, RwLock, RwMutex, SpinLock, SpinLockGuard},
|
sync::{Mutex, MutexGuard, RwLock, RwMutex, SpinLock, SpinLockGuard},
|
||||||
vm::{Vaddr, PAGE_SIZE},
|
|
||||||
};
|
};
|
||||||
pub(crate) use bitflags::bitflags;
|
pub(crate) use bitflags::bitflags;
|
||||||
pub(crate) use int_to_c_enum::TryFromInt;
|
pub(crate) use int_to_c_enum::TryFromInt;
|
||||||
|
@ -4,8 +4,8 @@ use core::sync::atomic::Ordering;
|
|||||||
|
|
||||||
use aster_frame::{
|
use aster_frame::{
|
||||||
cpu::UserContext,
|
cpu::UserContext,
|
||||||
|
mm::VmIo,
|
||||||
user::{UserContextApi, UserSpace},
|
user::{UserContextApi, UserSpace},
|
||||||
vm::VmIo,
|
|
||||||
};
|
};
|
||||||
use aster_rights::Full;
|
use aster_rights::Full;
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ use core::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use align_ext::AlignExt;
|
use align_ext::AlignExt;
|
||||||
use aster_frame::vm::{VmIo, MAX_USERSPACE_VADDR};
|
use aster_frame::mm::{VmIo, MAX_USERSPACE_VADDR};
|
||||||
use aster_rights::{Full, Rights};
|
use aster_rights::{Full, Rights};
|
||||||
|
|
||||||
use self::aux_vec::{AuxKey, AuxVec};
|
use self::aux_vec::{AuxKey, AuxVec};
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
//! When create a process from elf file, we will use the elf_load_info to construct the VmSpace
|
//! When create a process from elf file, we will use the elf_load_info to construct the VmSpace
|
||||||
|
|
||||||
use align_ext::AlignExt;
|
use align_ext::AlignExt;
|
||||||
use aster_frame::{task::Task, vm::VmIo};
|
use aster_frame::{mm::VmIo, task::Task};
|
||||||
use aster_rights::{Full, Rights};
|
use aster_rights::{Full, Rights};
|
||||||
use xmas_elf::program::{self, ProgramHeader64};
|
use xmas_elf::program::{self, ProgramHeader64};
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
use aster_frame::vm::VmIo;
|
use aster_frame::mm::VmIo;
|
||||||
|
|
||||||
use super::SyscallReturn;
|
use super::SyscallReturn;
|
||||||
use crate::{
|
use crate::{
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
use core::mem;
|
use core::mem;
|
||||||
|
|
||||||
use aster_frame::vm::VmIo;
|
use aster_frame::mm::VmIo;
|
||||||
use aster_rights::Full;
|
use aster_rights::Full;
|
||||||
|
|
||||||
use crate::{prelude::*, vm::vmar::Vmar};
|
use crate::{prelude::*, vm::vmar::Vmar};
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
use core::time::Duration;
|
use core::time::Duration;
|
||||||
|
|
||||||
use aster_frame::vm::VmIo;
|
use aster_frame::mm::VmIo;
|
||||||
use aster_rights::Full;
|
use aster_rights::Full;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
@ -15,8 +15,8 @@ use alloc::{boxed::Box, sync::Arc};
|
|||||||
use core::{mem::ManuallyDrop, time::Duration};
|
use core::{mem::ManuallyDrop, time::Duration};
|
||||||
|
|
||||||
use aster_frame::{
|
use aster_frame::{
|
||||||
|
mm::{Frame, VmIo, PAGE_SIZE},
|
||||||
sync::SpinLock,
|
sync::SpinLock,
|
||||||
vm::{VmFrame, VmIo, PAGE_SIZE},
|
|
||||||
};
|
};
|
||||||
use aster_rights::Rights;
|
use aster_rights::Rights;
|
||||||
use aster_time::{read_monotonic_time, Instant};
|
use aster_time::{read_monotonic_time, Instant};
|
||||||
@ -196,9 +196,9 @@ struct Vdso {
|
|||||||
data: SpinLock<VdsoData>,
|
data: SpinLock<VdsoData>,
|
||||||
/// The vmo of the entire VDSO, including the library text and the VDSO data.
|
/// The vmo of the entire VDSO, including the library text and the VDSO data.
|
||||||
vmo: Arc<Vmo>,
|
vmo: Arc<Vmo>,
|
||||||
/// The `VmFrame` that contains the VDSO data. This frame is contained in and
|
/// The `Frame` that contains the VDSO data. This frame is contained in and
|
||||||
/// will not be removed from the VDSO vmo.
|
/// will not be removed from the VDSO vmo.
|
||||||
data_frame: VmFrame,
|
data_frame: Frame,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A `SpinLock` for the `seq` field in `VdsoData`.
|
/// A `SpinLock` for the `seq` field in `VdsoData`.
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
use aster_frame::vm::PageFlags;
|
use aster_frame::mm::PageFlags;
|
||||||
use aster_rights::Rights;
|
use aster_rights::Rights;
|
||||||
use bitflags::bitflags;
|
use bitflags::bitflags;
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
use core::ops::Range;
|
use core::ops::Range;
|
||||||
|
|
||||||
use aster_frame::vm::VmIo;
|
use aster_frame::mm::VmIo;
|
||||||
use aster_rights::Rights;
|
use aster_rights::Rights;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
@ -26,8 +26,8 @@ impl Vmar<Rights> {
|
|||||||
/// # Example
|
/// # Example
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use aster_std::prelude::*;
|
/// use aster_nix::prelude::*;
|
||||||
/// use aster_std::vm::{PAGE_SIZE, Vmar, VmoOptions};
|
/// use aster_nix::vm::{PAGE_SIZE, Vmar, VmoOptions};
|
||||||
///
|
///
|
||||||
/// let vmar = Vmar::new().unwrap();
|
/// let vmar = Vmar::new().unwrap();
|
||||||
/// let vmo = VmoOptions::new(PAGE_SIZE).alloc().unwrap();
|
/// let vmo = VmoOptions::new(PAGE_SIZE).alloc().unwrap();
|
||||||
|
@ -11,7 +11,7 @@ pub mod vm_mapping;
|
|||||||
use core::{cmp::min, ops::Range};
|
use core::{cmp::min, ops::Range};
|
||||||
|
|
||||||
use align_ext::AlignExt;
|
use align_ext::AlignExt;
|
||||||
use aster_frame::vm::{VmSpace, MAX_USERSPACE_VADDR};
|
use aster_frame::mm::{VmSpace, MAX_USERSPACE_VADDR};
|
||||||
use aster_rights::Rights;
|
use aster_rights::Rights;
|
||||||
|
|
||||||
use self::{
|
use self::{
|
||||||
@ -39,15 +39,6 @@ use crate::{prelude::*, vm::perms::VmPerms};
|
|||||||
///
|
///
|
||||||
/// VMARs are implemented with two flavors of capabilities:
|
/// VMARs are implemented with two flavors of capabilities:
|
||||||
/// the dynamic one (`Vmar<Rights>`) and the static one (`Vmar<R: TRights>).
|
/// the dynamic one (`Vmar<Rights>`) and the static one (`Vmar<R: TRights>).
|
||||||
///
|
|
||||||
/// # Implementation
|
|
||||||
///
|
|
||||||
/// `Vmar` provides high-level APIs for address space management by wrapping
|
|
||||||
/// around its low-level counterpart `_frame::vm::VmFrames`.
|
|
||||||
/// Compared with `VmFrames`,
|
|
||||||
/// `Vmar` is easier to use (by offering more powerful APIs) and
|
|
||||||
/// harder to misuse (thanks to its nature of being capability).
|
|
||||||
///
|
|
||||||
pub struct Vmar<R = Rights>(Arc<Vmar_>, R);
|
pub struct Vmar<R = Rights>(Arc<Vmar_>, R);
|
||||||
|
|
||||||
pub trait VmarRightsOp {
|
pub trait VmarRightsOp {
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
//! Options for allocating child VMARs.
|
//! Options for allocating child VMARs.
|
||||||
|
|
||||||
use aster_frame::{vm::PAGE_SIZE, Error, Result};
|
use aster_frame::{mm::PAGE_SIZE, Error, Result};
|
||||||
|
|
||||||
use super::Vmar;
|
use super::Vmar;
|
||||||
|
|
||||||
@ -14,7 +14,7 @@ use super::Vmar;
|
|||||||
/// A child VMAR created from a parent VMAR of _dynamic_ capability is also a
|
/// A child VMAR created from a parent VMAR of _dynamic_ capability is also a
|
||||||
/// _dynamic_ capability.
|
/// _dynamic_ capability.
|
||||||
/// ```
|
/// ```
|
||||||
/// use aster_std::vm::{PAGE_SIZE, Vmar};
|
/// use aster_nix::vm::{PAGE_SIZE, Vmar};
|
||||||
///
|
///
|
||||||
/// let parent_vmar = Vmar::new();
|
/// let parent_vmar = Vmar::new();
|
||||||
/// let child_size = 10 * PAGE_SIZE;
|
/// let child_size = 10 * PAGE_SIZE;
|
||||||
@ -29,8 +29,8 @@ use super::Vmar;
|
|||||||
/// A child VMAR created from a parent VMAR of _static_ capability is also a
|
/// A child VMAR created from a parent VMAR of _static_ capability is also a
|
||||||
/// _static_ capability.
|
/// _static_ capability.
|
||||||
/// ```
|
/// ```
|
||||||
/// use aster_std::prelude::*;
|
/// use aster_nix::prelude::*;
|
||||||
/// use aster_std::vm::{PAGE_SIZE, Vmar};
|
/// use aster_nix::vm::{PAGE_SIZE, Vmar};
|
||||||
///
|
///
|
||||||
/// let parent_vmar: Vmar<Full> = Vmar::new();
|
/// let parent_vmar: Vmar<Full> = Vmar::new();
|
||||||
/// let child_size = 10 * PAGE_SIZE;
|
/// let child_size = 10 * PAGE_SIZE;
|
||||||
@ -135,7 +135,7 @@ impl<R> VmarChildOptions<R> {
|
|||||||
|
|
||||||
#[cfg(ktest)]
|
#[cfg(ktest)]
|
||||||
mod test {
|
mod test {
|
||||||
use aster_frame::vm::VmIo;
|
use aster_frame::mm::VmIo;
|
||||||
use aster_rights::Full;
|
use aster_rights::Full;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
use core::ops::Range;
|
use core::ops::Range;
|
||||||
|
|
||||||
use aster_frame::vm::VmIo;
|
use aster_frame::mm::VmIo;
|
||||||
use aster_rights::{Dup, Rights, TRightSet, TRights};
|
use aster_rights::{Dup, Rights, TRightSet, TRights};
|
||||||
use aster_rights_proc::require;
|
use aster_rights_proc::require;
|
||||||
|
|
||||||
@ -31,8 +31,8 @@ impl<R: TRights> Vmar<TRightSet<R>> {
|
|||||||
/// # Example
|
/// # Example
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use aster_std::prelude::*;
|
/// use aster_nix::prelude::*;
|
||||||
/// use aster_std::vm::{PAGE_SIZE, Vmar, VmoOptions};
|
/// use aster_nix::vm::{PAGE_SIZE, Vmar, VmoOptions};
|
||||||
///
|
///
|
||||||
/// let vmar = Vmar::<RightsWrapper<Full>>::new().unwrap();
|
/// let vmar = Vmar::<RightsWrapper<Full>>::new().unwrap();
|
||||||
/// let vmo = VmoOptions::new(PAGE_SIZE).alloc().unwrap();
|
/// let vmo = VmoOptions::new(PAGE_SIZE).alloc().unwrap();
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
use core::ops::Range;
|
use core::ops::Range;
|
||||||
|
|
||||||
use aster_frame::vm::{PageFlags, VmFrame, VmFrameVec, VmIo, VmMapOptions, VmSpace};
|
use aster_frame::mm::{Frame, PageFlags, VmFrameVec, VmIo, VmMapOptions, VmSpace};
|
||||||
|
|
||||||
use super::{interval::Interval, is_intersected, Vmar, Vmar_};
|
use super::{interval::Interval, is_intersected, Vmar, Vmar_};
|
||||||
use crate::{
|
use crate::{
|
||||||
@ -143,7 +143,7 @@ impl VmMapping {
|
|||||||
pub(super) fn map_one_page(
|
pub(super) fn map_one_page(
|
||||||
&self,
|
&self,
|
||||||
page_idx: usize,
|
page_idx: usize,
|
||||||
frame: VmFrame,
|
frame: Frame,
|
||||||
is_readonly: bool,
|
is_readonly: bool,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let parent = self.parent.upgrade().unwrap();
|
let parent = self.parent.upgrade().unwrap();
|
||||||
@ -458,7 +458,7 @@ impl VmMappingInner {
|
|||||||
vmo: &Vmo<Rights>,
|
vmo: &Vmo<Rights>,
|
||||||
vm_space: &VmSpace,
|
vm_space: &VmSpace,
|
||||||
page_idx: usize,
|
page_idx: usize,
|
||||||
frame: VmFrame,
|
frame: Frame,
|
||||||
is_readonly: bool,
|
is_readonly: bool,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let map_addr = self.page_map_addr(page_idx);
|
let map_addr = self.page_map_addr(page_idx);
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
use core::ops::Range;
|
use core::ops::Range;
|
||||||
|
|
||||||
use aster_frame::vm::{VmFrame, VmIo};
|
use aster_frame::mm::{Frame, VmIo};
|
||||||
use aster_rights::{Rights, TRights};
|
use aster_rights::{Rights, TRights};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
@ -68,7 +68,7 @@ impl Vmo<Rights> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// commit a page at specific offset
|
/// commit a page at specific offset
|
||||||
pub fn commit_page(&self, offset: usize) -> Result<VmFrame> {
|
pub fn commit_page(&self, offset: usize) -> Result<Frame> {
|
||||||
self.check_rights(Rights::WRITE)?;
|
self.check_rights(Rights::WRITE)?;
|
||||||
self.0.commit_page(offset, false)
|
self.0.commit_page(offset, false)
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,7 @@ use core::ops::Range;
|
|||||||
use align_ext::AlignExt;
|
use align_ext::AlignExt;
|
||||||
use aster_frame::{
|
use aster_frame::{
|
||||||
collections::xarray::{CursorMut, XArray, XMark},
|
collections::xarray::{CursorMut, XArray, XMark},
|
||||||
vm::{VmAllocOptions, VmFrame, VmReader, VmWriter},
|
mm::{Frame, VmAllocOptions, VmReader, VmWriter},
|
||||||
};
|
};
|
||||||
use aster_rights::Rights;
|
use aster_rights::Rights;
|
||||||
|
|
||||||
@ -139,8 +139,8 @@ pub(super) enum VmoMark {
|
|||||||
/// The VMO whose `pages` is marked as `CowVmo` may require a Copy-On-Write (COW) operation
|
/// The VMO whose `pages` is marked as `CowVmo` may require a Copy-On-Write (COW) operation
|
||||||
/// when performing a write action.
|
/// when performing a write action.
|
||||||
CowVmo,
|
CowVmo,
|
||||||
/// Marks used for the `VmFrame` stored within the pages marked as `CowVmo`,
|
/// Marks used for the `Frame` stored within the pages marked as `CowVmo`,
|
||||||
/// `VmFrame`s marked as `ExclusivePage` are newly created through the COW mechanism
|
/// `Frame`s marked as `ExclusivePage` are newly created through the COW mechanism
|
||||||
/// and do not require further COW operations.
|
/// and do not require further COW operations.
|
||||||
ExclusivePage,
|
ExclusivePage,
|
||||||
}
|
}
|
||||||
@ -154,19 +154,19 @@ impl From<VmoMark> for XMark {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `Pages` is the struct that manages the `VmFrame`s stored in `Vmo_`.
|
/// `Pages` is the struct that manages the `Frame`s stored in `Vmo_`.
|
||||||
pub(super) enum Pages {
|
pub(super) enum Pages {
|
||||||
/// `Pages` that cannot be resized. This kind of `Pages` will have a constant size.
|
/// `Pages` that cannot be resized. This kind of `Pages` will have a constant size.
|
||||||
Nonresizable(Arc<Mutex<XArray<VmFrame, VmoMark>>>, usize),
|
Nonresizable(Arc<Mutex<XArray<Frame, VmoMark>>>, usize),
|
||||||
/// `Pages` that can be resized and have a variable size, and such `Pages` cannot
|
/// `Pages` that can be resized and have a variable size, and such `Pages` cannot
|
||||||
/// be shared between different VMOs.
|
/// be shared between different VMOs.
|
||||||
Resizable(Mutex<(XArray<VmFrame, VmoMark>, usize)>),
|
Resizable(Mutex<(XArray<Frame, VmoMark>, usize)>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Pages {
|
impl Pages {
|
||||||
fn with<R, F>(&self, func: F) -> R
|
fn with<R, F>(&self, func: F) -> R
|
||||||
where
|
where
|
||||||
F: FnOnce(&mut XArray<VmFrame, VmoMark>, usize) -> R,
|
F: FnOnce(&mut XArray<Frame, VmoMark>, usize) -> R,
|
||||||
{
|
{
|
||||||
match self {
|
match self {
|
||||||
Self::Nonresizable(pages, size) => func(&mut pages.lock(), *size),
|
Self::Nonresizable(pages, size) => func(&mut pages.lock(), *size),
|
||||||
@ -194,14 +194,14 @@ pub(super) struct Vmo_ {
|
|||||||
pages: Pages,
|
pages: Pages,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn clone_page(page: &VmFrame) -> Result<VmFrame> {
|
fn clone_page(page: &Frame) -> Result<Frame> {
|
||||||
let new_page = VmAllocOptions::new(1).alloc_single()?;
|
let new_page = VmAllocOptions::new(1).alloc_single()?;
|
||||||
new_page.copy_from(page);
|
new_page.copy_from(page);
|
||||||
Ok(new_page)
|
Ok(new_page)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Vmo_ {
|
impl Vmo_ {
|
||||||
/// Prepare a new `VmFrame` for the target index in pages, returning the new page as well as
|
/// Prepare a new `Frame` for the target index in pages, returning the new page as well as
|
||||||
/// whether this page needs to be marked as exclusive.
|
/// whether this page needs to be marked as exclusive.
|
||||||
///
|
///
|
||||||
/// Based on the type of VMO and the impending operation on the prepared page, there are 3 conditions:
|
/// Based on the type of VMO and the impending operation on the prepared page, there are 3 conditions:
|
||||||
@ -216,7 +216,7 @@ impl Vmo_ {
|
|||||||
page_idx: usize,
|
page_idx: usize,
|
||||||
is_cow_vmo: bool,
|
is_cow_vmo: bool,
|
||||||
will_write: bool,
|
will_write: bool,
|
||||||
) -> Result<(VmFrame, bool)> {
|
) -> Result<(Frame, bool)> {
|
||||||
let (page, should_mark_exclusive) = match &self.pager {
|
let (page, should_mark_exclusive) = match &self.pager {
|
||||||
None => {
|
None => {
|
||||||
// Condition 1. The new anonymous page only need to be marked as `ExclusivePage`
|
// Condition 1. The new anonymous page only need to be marked as `ExclusivePage`
|
||||||
@ -227,8 +227,8 @@ impl Vmo_ {
|
|||||||
let page = pager.commit_page(page_idx)?;
|
let page = pager.commit_page(page_idx)?;
|
||||||
// The prerequisite for triggering the COW mechanism here is that the current
|
// The prerequisite for triggering the COW mechanism here is that the current
|
||||||
// VMO requires COW and the prepared page is about to undergo a write operation.
|
// VMO requires COW and the prepared page is about to undergo a write operation.
|
||||||
// At this point, the `VmFrame` obtained from the pager needs to be cloned to
|
// At this point, the `Frame` obtained from the pager needs to be cloned to
|
||||||
// avoid subsequent modifications affecting the content of the `VmFrame` in the pager.
|
// avoid subsequent modifications affecting the content of the `Frame` in the pager.
|
||||||
let trigger_cow = is_cow_vmo && will_write;
|
let trigger_cow = is_cow_vmo && will_write;
|
||||||
if trigger_cow {
|
if trigger_cow {
|
||||||
// Condition 3.
|
// Condition 3.
|
||||||
@ -244,10 +244,10 @@ impl Vmo_ {
|
|||||||
|
|
||||||
fn commit_with_cursor(
|
fn commit_with_cursor(
|
||||||
&self,
|
&self,
|
||||||
cursor: &mut CursorMut<'_, VmFrame, VmoMark>,
|
cursor: &mut CursorMut<'_, Frame, VmoMark>,
|
||||||
is_cow_vmo: bool,
|
is_cow_vmo: bool,
|
||||||
will_write: bool,
|
will_write: bool,
|
||||||
) -> Result<VmFrame> {
|
) -> Result<Frame> {
|
||||||
let (new_page, is_exclusive) = {
|
let (new_page, is_exclusive) = {
|
||||||
let is_exclusive = cursor.is_marked(VmoMark::ExclusivePage);
|
let is_exclusive = cursor.is_marked(VmoMark::ExclusivePage);
|
||||||
if let Some(committed_page) = cursor.load() {
|
if let Some(committed_page) = cursor.load() {
|
||||||
@ -276,7 +276,7 @@ impl Vmo_ {
|
|||||||
/// Commit the page corresponding to the target offset in the VMO and return that page.
|
/// Commit the page corresponding to the target offset in the VMO and return that page.
|
||||||
/// If the current offset has already been committed, the page will be returned directly.
|
/// If the current offset has already been committed, the page will be returned directly.
|
||||||
/// During the commit process, the Copy-On-Write (COW) mechanism may be triggered depending on the circumstances.
|
/// During the commit process, the Copy-On-Write (COW) mechanism may be triggered depending on the circumstances.
|
||||||
pub fn commit_page(&self, offset: usize, will_write: bool) -> Result<VmFrame> {
|
pub fn commit_page(&self, offset: usize, will_write: bool) -> Result<Frame> {
|
||||||
let page_idx = offset / PAGE_SIZE + self.page_idx_offset;
|
let page_idx = offset / PAGE_SIZE + self.page_idx_offset;
|
||||||
self.pages.with(|pages, size| {
|
self.pages.with(|pages, size| {
|
||||||
let is_cow_vmo = pages.is_marked(VmoMark::CowVmo);
|
let is_cow_vmo = pages.is_marked(VmoMark::CowVmo);
|
||||||
@ -310,7 +310,7 @@ impl Vmo_ {
|
|||||||
will_write: bool,
|
will_write: bool,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
F: FnMut(VmFrame),
|
F: FnMut(Frame),
|
||||||
{
|
{
|
||||||
self.pages.with(|pages, size| {
|
self.pages.with(|pages, size| {
|
||||||
if range.end > size {
|
if range.end > size {
|
||||||
@ -348,7 +348,7 @@ impl Vmo_ {
|
|||||||
let mut read_offset = offset % PAGE_SIZE;
|
let mut read_offset = offset % PAGE_SIZE;
|
||||||
let mut buf_writer: VmWriter = buf.into();
|
let mut buf_writer: VmWriter = buf.into();
|
||||||
|
|
||||||
let read = move |page: VmFrame| {
|
let read = move |page: Frame| {
|
||||||
page.reader().skip(read_offset).read(&mut buf_writer);
|
page.reader().skip(read_offset).read(&mut buf_writer);
|
||||||
read_offset = 0;
|
read_offset = 0;
|
||||||
};
|
};
|
||||||
@ -363,7 +363,7 @@ impl Vmo_ {
|
|||||||
let mut write_offset = offset % PAGE_SIZE;
|
let mut write_offset = offset % PAGE_SIZE;
|
||||||
let mut buf_reader: VmReader = buf.into();
|
let mut buf_reader: VmReader = buf.into();
|
||||||
|
|
||||||
let write = move |page: VmFrame| {
|
let write = move |page: Frame| {
|
||||||
page.writer().skip(write_offset).write(&mut buf_reader);
|
page.writer().skip(write_offset).write(&mut buf_reader);
|
||||||
write_offset = 0;
|
write_offset = 0;
|
||||||
};
|
};
|
||||||
@ -518,7 +518,7 @@ impl Vmo_ {
|
|||||||
|
|
||||||
fn decommit_pages(
|
fn decommit_pages(
|
||||||
&self,
|
&self,
|
||||||
pages: &mut XArray<VmFrame, VmoMark>,
|
pages: &mut XArray<Frame, VmoMark>,
|
||||||
range: Range<usize>,
|
range: Range<usize>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let raw_page_idx_range = get_page_idx_range(&range);
|
let raw_page_idx_range = get_page_idx_range(&range);
|
||||||
@ -575,7 +575,7 @@ impl<R> Vmo<R> {
|
|||||||
self.0.is_page_committed(page_idx)
|
self.0.is_page_committed(page_idx)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_committed_frame(&self, page_idx: usize, write_page: bool) -> Result<VmFrame> {
|
pub fn get_committed_frame(&self, page_idx: usize, write_page: bool) -> Result<Frame> {
|
||||||
self.0.commit_page(page_idx * PAGE_SIZE, write_page)
|
self.0.commit_page(page_idx * PAGE_SIZE, write_page)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@ use core::{marker::PhantomData, ops::Range};
|
|||||||
use align_ext::AlignExt;
|
use align_ext::AlignExt;
|
||||||
use aster_frame::{
|
use aster_frame::{
|
||||||
collections::xarray::XArray,
|
collections::xarray::XArray,
|
||||||
vm::{VmAllocOptions, VmFrame},
|
mm::{Frame, VmAllocOptions},
|
||||||
};
|
};
|
||||||
use aster_rights::{Dup, Rights, TRightSet, TRights, Write};
|
use aster_rights::{Dup, Rights, TRightSet, TRights, Write};
|
||||||
use aster_rights_proc::require;
|
use aster_rights_proc::require;
|
||||||
@ -22,7 +22,7 @@ use crate::{prelude::*, vm::vmo::Vmo_};
|
|||||||
///
|
///
|
||||||
/// Creating a VMO as a _dynamic_ capability with full access rights:
|
/// Creating a VMO as a _dynamic_ capability with full access rights:
|
||||||
/// ```
|
/// ```
|
||||||
/// use aster_std::vm::{PAGE_SIZE, VmoOptions};
|
/// use aster_nix::vm::{PAGE_SIZE, VmoOptions};
|
||||||
///
|
///
|
||||||
/// let vmo = VmoOptions::new(PAGE_SIZE)
|
/// let vmo = VmoOptions::new(PAGE_SIZE)
|
||||||
/// .alloc()
|
/// .alloc()
|
||||||
@ -31,8 +31,8 @@ use crate::{prelude::*, vm::vmo::Vmo_};
|
|||||||
///
|
///
|
||||||
/// Creating a VMO as a _static_ capability with all access rights:
|
/// Creating a VMO as a _static_ capability with all access rights:
|
||||||
/// ```
|
/// ```
|
||||||
/// use aster_std::prelude::*;
|
/// use aster_nix::prelude::*;
|
||||||
/// use aster_std::vm::{PAGE_SIZE, VmoOptions};
|
/// use aster_nix::vm::{PAGE_SIZE, VmoOptions};
|
||||||
///
|
///
|
||||||
/// let vmo = VmoOptions::<Full>::new(PAGE_SIZE)
|
/// let vmo = VmoOptions::<Full>::new(PAGE_SIZE)
|
||||||
/// .alloc()
|
/// .alloc()
|
||||||
@ -43,7 +43,7 @@ use crate::{prelude::*, vm::vmo::Vmo_};
|
|||||||
/// physically contiguous:
|
/// physically contiguous:
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use aster_std::vm::{PAGE_SIZE, VmoOptions, VmoFlags};
|
/// use aster_nix::vm::{PAGE_SIZE, VmoOptions, VmoFlags};
|
||||||
///
|
///
|
||||||
/// let vmo = VmoOptions::new(10 * PAGE_SIZE)
|
/// let vmo = VmoOptions::new(10 * PAGE_SIZE)
|
||||||
/// .flags(VmoFlags::RESIZABLE)
|
/// .flags(VmoFlags::RESIZABLE)
|
||||||
@ -140,7 +140,7 @@ fn alloc_vmo_(size: usize, flags: VmoFlags, pager: Option<Arc<dyn Pager>>) -> Re
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<VmFrame, VmoMark>> {
|
fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<Frame, VmoMark>> {
|
||||||
if flags.contains(VmoFlags::CONTIGUOUS) {
|
if flags.contains(VmoFlags::CONTIGUOUS) {
|
||||||
// if the vmo is continuous, we need to allocate frames for the vmo
|
// if the vmo is continuous, we need to allocate frames for the vmo
|
||||||
let frames_num = size / PAGE_SIZE;
|
let frames_num = size / PAGE_SIZE;
|
||||||
@ -168,7 +168,7 @@ fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<
|
|||||||
/// A child VMO created from a parent VMO of _dynamic_ capability is also a
|
/// A child VMO created from a parent VMO of _dynamic_ capability is also a
|
||||||
/// _dynamic_ capability.
|
/// _dynamic_ capability.
|
||||||
/// ```
|
/// ```
|
||||||
/// use aster_std::vm::{PAGE_SIZE, VmoOptions};
|
/// use aster_nix::vm::{PAGE_SIZE, VmoOptions};
|
||||||
///
|
///
|
||||||
/// let parent_vmo = VmoOptions::new(PAGE_SIZE)
|
/// let parent_vmo = VmoOptions::new(PAGE_SIZE)
|
||||||
/// .alloc()
|
/// .alloc()
|
||||||
@ -182,8 +182,8 @@ fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<
|
|||||||
/// A child VMO created from a parent VMO of _static_ capability is also a
|
/// A child VMO created from a parent VMO of _static_ capability is also a
|
||||||
/// _static_ capability.
|
/// _static_ capability.
|
||||||
/// ```
|
/// ```
|
||||||
/// use aster_std::prelude::*;
|
/// use aster_nix::prelude::*;
|
||||||
/// use aster_std::vm::{PAGE_SIZE, VmoOptions, VmoChildOptions};
|
/// use aster_nix::vm::{PAGE_SIZE, VmoOptions, VmoChildOptions};
|
||||||
///
|
///
|
||||||
/// let parent_vmo: Vmo<Full> = VmoOptions::new(PAGE_SIZE)
|
/// let parent_vmo: Vmo<Full> = VmoOptions::new(PAGE_SIZE)
|
||||||
/// .alloc()
|
/// .alloc()
|
||||||
@ -200,7 +200,7 @@ fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<
|
|||||||
/// right regardless of whether the parent is writable or not.
|
/// right regardless of whether the parent is writable or not.
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use aster_std::vm::{PAGE_SIZE, VmoOptions, VmoChildOptions};
|
/// use aster_nix::vm::{PAGE_SIZE, VmoOptions, VmoChildOptions};
|
||||||
///
|
///
|
||||||
/// let parent_vmo = VmoOptions::new(PAGE_SIZE)
|
/// let parent_vmo = VmoOptions::new(PAGE_SIZE)
|
||||||
/// .alloc()
|
/// .alloc()
|
||||||
@ -215,7 +215,7 @@ fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<
|
|||||||
/// The above rule for COW VMO children also applies to static capabilities.
|
/// The above rule for COW VMO children also applies to static capabilities.
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use aster_std::vm::{PAGE_SIZE, VmoOptions, VmoChildOptions};
|
/// use aster_nix::vm::{PAGE_SIZE, VmoOptions, VmoChildOptions};
|
||||||
///
|
///
|
||||||
/// let parent_vmo = VmoOptions::<TRights![Read, Dup]>::new(PAGE_SIZE)
|
/// let parent_vmo = VmoOptions::<TRights![Read, Dup]>::new(PAGE_SIZE)
|
||||||
/// .alloc()
|
/// .alloc()
|
||||||
@ -231,7 +231,7 @@ fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<
|
|||||||
/// Note that a slice VMO child and its parent cannot not be resizable.
|
/// Note that a slice VMO child and its parent cannot not be resizable.
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// use aster_std::vm::{PAGE_SIZE, VmoOptions};
|
/// use aster_nix::vm::{PAGE_SIZE, VmoOptions};
|
||||||
///
|
///
|
||||||
/// let parent_vmo = VmoOptions::new(PAGE_SIZE)
|
/// let parent_vmo = VmoOptions::new(PAGE_SIZE)
|
||||||
/// .alloc()
|
/// .alloc()
|
||||||
@ -474,7 +474,7 @@ impl VmoChildType for VmoCowChild {}
|
|||||||
|
|
||||||
#[cfg(ktest)]
|
#[cfg(ktest)]
|
||||||
mod test {
|
mod test {
|
||||||
use aster_frame::vm::VmIo;
|
use aster_frame::mm::VmIo;
|
||||||
use aster_rights::Full;
|
use aster_rights::Full;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
use aster_frame::vm::VmFrame;
|
use aster_frame::mm::Frame;
|
||||||
|
|
||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
|
|
||||||
@ -26,7 +26,7 @@ pub trait Pager: Send + Sync {
|
|||||||
/// whatever frame that may or may not be the same as the last time.
|
/// whatever frame that may or may not be the same as the last time.
|
||||||
///
|
///
|
||||||
/// It is up to the pager to decide the range of valid indices.
|
/// It is up to the pager to decide the range of valid indices.
|
||||||
fn commit_page(&self, idx: usize) -> Result<VmFrame>;
|
fn commit_page(&self, idx: usize) -> Result<Frame>;
|
||||||
|
|
||||||
/// Notify the pager that the frame at a specified index has been updated.
|
/// Notify the pager that the frame at a specified index has been updated.
|
||||||
///
|
///
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
use core::ops::Range;
|
use core::ops::Range;
|
||||||
|
|
||||||
use aster_frame::vm::{VmFrame, VmIo};
|
use aster_frame::mm::{Frame, VmIo};
|
||||||
use aster_rights::{Dup, Rights, TRightSet, TRights, Write};
|
use aster_rights::{Dup, Rights, TRightSet, TRights, Write};
|
||||||
use aster_rights_proc::require;
|
use aster_rights_proc::require;
|
||||||
|
|
||||||
@ -68,7 +68,7 @@ impl<R: TRights> Vmo<TRightSet<R>> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// commit a page at specific offset
|
/// commit a page at specific offset
|
||||||
pub fn commit_page(&self, offset: usize) -> Result<VmFrame> {
|
pub fn commit_page(&self, offset: usize) -> Result<Frame> {
|
||||||
self.check_rights(Rights::WRITE)?;
|
self.check_rights(Rights::WRITE)?;
|
||||||
self.0.commit_page(offset, false)
|
self.0.commit_page(offset, false)
|
||||||
}
|
}
|
||||||
|
@ -2,8 +2,8 @@
|
|||||||
|
|
||||||
use align_ext::AlignExt;
|
use align_ext::AlignExt;
|
||||||
use aster_frame::{
|
use aster_frame::{
|
||||||
|
mm::{Frame, Segment, VmReader, VmWriter},
|
||||||
sync::WaitQueue,
|
sync::WaitQueue,
|
||||||
vm::{VmFrame, VmReader, VmSegment, VmWriter},
|
|
||||||
};
|
};
|
||||||
use int_to_c_enum::TryFromInt;
|
use int_to_c_enum::TryFromInt;
|
||||||
|
|
||||||
@ -359,7 +359,7 @@ pub enum BioStatus {
|
|||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct BioSegment {
|
pub struct BioSegment {
|
||||||
/// The contiguous pages on which this segment resides.
|
/// The contiguous pages on which this segment resides.
|
||||||
pages: VmSegment,
|
pages: Segment,
|
||||||
/// The starting offset (in bytes) within the first page.
|
/// The starting offset (in bytes) within the first page.
|
||||||
/// The offset should always be aligned to the sector size and
|
/// The offset should always be aligned to the sector size and
|
||||||
/// must not exceed the size of a single page.
|
/// must not exceed the size of a single page.
|
||||||
@ -373,8 +373,8 @@ pub struct BioSegment {
|
|||||||
const SECTOR_SIZE: u16 = super::SECTOR_SIZE as u16;
|
const SECTOR_SIZE: u16 = super::SECTOR_SIZE as u16;
|
||||||
|
|
||||||
impl<'a> BioSegment {
|
impl<'a> BioSegment {
|
||||||
/// Constructs a new `BioSegment` from `VmSegment`.
|
/// Constructs a new `BioSegment` from `Segment`.
|
||||||
pub fn from_segment(segment: VmSegment, offset: usize, len: usize) -> Self {
|
pub fn from_segment(segment: Segment, offset: usize, len: usize) -> Self {
|
||||||
assert!(offset + len <= segment.nbytes());
|
assert!(offset + len <= segment.nbytes());
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
@ -384,12 +384,12 @@ impl<'a> BioSegment {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Constructs a new `BioSegment` from `VmFrame`.
|
/// Constructs a new `BioSegment` from `Frame`.
|
||||||
pub fn from_frame(frame: VmFrame, offset: usize, len: usize) -> Self {
|
pub fn from_frame(frame: Frame, offset: usize, len: usize) -> Self {
|
||||||
assert!(offset + len <= super::BLOCK_SIZE);
|
assert!(offset + len <= super::BLOCK_SIZE);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
pages: VmSegment::from(frame),
|
pages: Segment::from(frame),
|
||||||
offset: AlignedUsize::<SECTOR_SIZE>::new(offset).unwrap(),
|
offset: AlignedUsize::<SECTOR_SIZE>::new(offset).unwrap(),
|
||||||
len: AlignedUsize::<SECTOR_SIZE>::new(len).unwrap(),
|
len: AlignedUsize::<SECTOR_SIZE>::new(len).unwrap(),
|
||||||
}
|
}
|
||||||
@ -411,7 +411,7 @@ impl<'a> BioSegment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the contiguous pages on which this segment resides.
|
/// Returns the contiguous pages on which this segment resides.
|
||||||
pub fn pages(&self) -> &VmSegment {
|
pub fn pages(&self) -> &Segment {
|
||||||
&self.pages
|
&self.pages
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
use aster_frame::vm::{VmAllocOptions, VmFrame, VmIo, VmSegment};
|
use aster_frame::mm::{Frame, Segment, VmAllocOptions, VmIo};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
bio::{Bio, BioEnqueueError, BioSegment, BioStatus, BioType, BioWaiter, SubmittedBio},
|
bio::{Bio, BioEnqueueError, BioSegment, BioStatus, BioType, BioWaiter, SubmittedBio},
|
||||||
@ -16,7 +16,7 @@ impl dyn BlockDevice {
|
|||||||
pub fn read_blocks_sync(
|
pub fn read_blocks_sync(
|
||||||
&self,
|
&self,
|
||||||
bid: Bid,
|
bid: Bid,
|
||||||
segment: &VmSegment,
|
segment: &Segment,
|
||||||
) -> Result<BioStatus, BioEnqueueError> {
|
) -> Result<BioStatus, BioEnqueueError> {
|
||||||
let bio = create_bio_from_segment(BioType::Read, bid, segment);
|
let bio = create_bio_from_segment(BioType::Read, bid, segment);
|
||||||
let status = bio.submit_sync(self)?;
|
let status = bio.submit_sync(self)?;
|
||||||
@ -24,20 +24,20 @@ impl dyn BlockDevice {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Asynchronously reads contiguous blocks starting from the `bid`.
|
/// Asynchronously reads contiguous blocks starting from the `bid`.
|
||||||
pub fn read_blocks(&self, bid: Bid, segment: &VmSegment) -> Result<BioWaiter, BioEnqueueError> {
|
pub fn read_blocks(&self, bid: Bid, segment: &Segment) -> Result<BioWaiter, BioEnqueueError> {
|
||||||
let bio = create_bio_from_segment(BioType::Read, bid, segment);
|
let bio = create_bio_from_segment(BioType::Read, bid, segment);
|
||||||
bio.submit(self)
|
bio.submit(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Synchronously reads one block indicated by the `bid`.
|
/// Synchronously reads one block indicated by the `bid`.
|
||||||
pub fn read_block_sync(&self, bid: Bid, frame: &VmFrame) -> Result<BioStatus, BioEnqueueError> {
|
pub fn read_block_sync(&self, bid: Bid, frame: &Frame) -> Result<BioStatus, BioEnqueueError> {
|
||||||
let bio = create_bio_from_frame(BioType::Read, bid, frame);
|
let bio = create_bio_from_frame(BioType::Read, bid, frame);
|
||||||
let status = bio.submit_sync(self)?;
|
let status = bio.submit_sync(self)?;
|
||||||
Ok(status)
|
Ok(status)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Asynchronously reads one block indicated by the `bid`.
|
/// Asynchronously reads one block indicated by the `bid`.
|
||||||
pub fn read_block(&self, bid: Bid, frame: &VmFrame) -> Result<BioWaiter, BioEnqueueError> {
|
pub fn read_block(&self, bid: Bid, frame: &Frame) -> Result<BioWaiter, BioEnqueueError> {
|
||||||
let bio = create_bio_from_frame(BioType::Read, bid, frame);
|
let bio = create_bio_from_frame(BioType::Read, bid, frame);
|
||||||
bio.submit(self)
|
bio.submit(self)
|
||||||
}
|
}
|
||||||
@ -46,7 +46,7 @@ impl dyn BlockDevice {
|
|||||||
pub fn write_blocks_sync(
|
pub fn write_blocks_sync(
|
||||||
&self,
|
&self,
|
||||||
bid: Bid,
|
bid: Bid,
|
||||||
segment: &VmSegment,
|
segment: &Segment,
|
||||||
) -> Result<BioStatus, BioEnqueueError> {
|
) -> Result<BioStatus, BioEnqueueError> {
|
||||||
let bio = create_bio_from_segment(BioType::Write, bid, segment);
|
let bio = create_bio_from_segment(BioType::Write, bid, segment);
|
||||||
let status = bio.submit_sync(self)?;
|
let status = bio.submit_sync(self)?;
|
||||||
@ -54,28 +54,20 @@ impl dyn BlockDevice {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Asynchronously writes contiguous blocks starting from the `bid`.
|
/// Asynchronously writes contiguous blocks starting from the `bid`.
|
||||||
pub fn write_blocks(
|
pub fn write_blocks(&self, bid: Bid, segment: &Segment) -> Result<BioWaiter, BioEnqueueError> {
|
||||||
&self,
|
|
||||||
bid: Bid,
|
|
||||||
segment: &VmSegment,
|
|
||||||
) -> Result<BioWaiter, BioEnqueueError> {
|
|
||||||
let bio = create_bio_from_segment(BioType::Write, bid, segment);
|
let bio = create_bio_from_segment(BioType::Write, bid, segment);
|
||||||
bio.submit(self)
|
bio.submit(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Synchronously writes one block indicated by the `bid`.
|
/// Synchronously writes one block indicated by the `bid`.
|
||||||
pub fn write_block_sync(
|
pub fn write_block_sync(&self, bid: Bid, frame: &Frame) -> Result<BioStatus, BioEnqueueError> {
|
||||||
&self,
|
|
||||||
bid: Bid,
|
|
||||||
frame: &VmFrame,
|
|
||||||
) -> Result<BioStatus, BioEnqueueError> {
|
|
||||||
let bio = create_bio_from_frame(BioType::Write, bid, frame);
|
let bio = create_bio_from_frame(BioType::Write, bid, frame);
|
||||||
let status = bio.submit_sync(self)?;
|
let status = bio.submit_sync(self)?;
|
||||||
Ok(status)
|
Ok(status)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Asynchronously writes one block indicated by the `bid`.
|
/// Asynchronously writes one block indicated by the `bid`.
|
||||||
pub fn write_block(&self, bid: Bid, frame: &VmFrame) -> Result<BioWaiter, BioEnqueueError> {
|
pub fn write_block(&self, bid: Bid, frame: &Frame) -> Result<BioWaiter, BioEnqueueError> {
|
||||||
let bio = create_bio_from_frame(BioType::Write, bid, frame);
|
let bio = create_bio_from_frame(BioType::Write, bid, frame);
|
||||||
bio.submit(self)
|
bio.submit(self)
|
||||||
}
|
}
|
||||||
@ -202,7 +194,7 @@ impl dyn BlockDevice {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Maybe we should have a builder for `Bio`.
|
// TODO: Maybe we should have a builder for `Bio`.
|
||||||
fn create_bio_from_segment(type_: BioType, bid: Bid, segment: &VmSegment) -> Bio {
|
fn create_bio_from_segment(type_: BioType, bid: Bid, segment: &Segment) -> Bio {
|
||||||
let bio_segment = BioSegment::from_segment(segment.clone(), 0, segment.nbytes());
|
let bio_segment = BioSegment::from_segment(segment.clone(), 0, segment.nbytes());
|
||||||
Bio::new(
|
Bio::new(
|
||||||
type_,
|
type_,
|
||||||
@ -213,7 +205,7 @@ fn create_bio_from_segment(type_: BioType, bid: Bid, segment: &VmSegment) -> Bio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Maybe we should have a builder for `Bio`.
|
// TODO: Maybe we should have a builder for `Bio`.
|
||||||
fn create_bio_from_frame(type_: BioType, bid: Bid, frame: &VmFrame) -> Bio {
|
fn create_bio_from_frame(type_: BioType, bid: Bid, frame: &Frame) -> Bio {
|
||||||
let bio_segment = BioSegment::from_frame(frame.clone(), 0, BLOCK_SIZE);
|
let bio_segment = BioSegment::from_frame(frame.clone(), 0, BLOCK_SIZE);
|
||||||
Bio::new(
|
Bio::new(
|
||||||
type_,
|
type_,
|
||||||
|
@ -49,7 +49,7 @@ use self::{
|
|||||||
prelude::*,
|
prelude::*,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const BLOCK_SIZE: usize = aster_frame::vm::PAGE_SIZE;
|
pub const BLOCK_SIZE: usize = aster_frame::mm::PAGE_SIZE;
|
||||||
pub const SECTOR_SIZE: usize = 512;
|
pub const SECTOR_SIZE: usize = 512;
|
||||||
|
|
||||||
pub trait BlockDevice: Send + Sync + Any + Debug {
|
pub trait BlockDevice: Send + Sync + Any + Debug {
|
||||||
|
@ -10,7 +10,7 @@ extern crate alloc;
|
|||||||
use alloc::{collections::BTreeMap, fmt::Debug, string::String, sync::Arc, vec::Vec};
|
use alloc::{collections::BTreeMap, fmt::Debug, string::String, sync::Arc, vec::Vec};
|
||||||
use core::any::Any;
|
use core::any::Any;
|
||||||
|
|
||||||
use aster_frame::{sync::SpinLock, vm::VmReader};
|
use aster_frame::{mm::VmReader, sync::SpinLock};
|
||||||
use component::{init_component, ComponentInitError};
|
use component::{init_component, ComponentInitError};
|
||||||
use spin::Once;
|
use spin::Once;
|
||||||
|
|
||||||
|
@ -16,8 +16,8 @@ use core::{
|
|||||||
use aster_frame::{
|
use aster_frame::{
|
||||||
boot,
|
boot,
|
||||||
io_mem::IoMem,
|
io_mem::IoMem,
|
||||||
|
mm::{VmIo, PAGE_SIZE},
|
||||||
sync::SpinLock,
|
sync::SpinLock,
|
||||||
vm::{VmIo, PAGE_SIZE},
|
|
||||||
};
|
};
|
||||||
use component::{init_component, ComponentInitError};
|
use component::{init_component, ComponentInitError};
|
||||||
use font8x8::UnicodeFonts;
|
use font8x8::UnicodeFonts;
|
||||||
@ -39,7 +39,7 @@ pub(crate) fn init() {
|
|||||||
let mut writer = {
|
let mut writer = {
|
||||||
let framebuffer = boot::framebuffer_arg();
|
let framebuffer = boot::framebuffer_arg();
|
||||||
let mut size = 0;
|
let mut size = 0;
|
||||||
for i in aster_frame::vm::FRAMEBUFFER_REGIONS.get().unwrap().iter() {
|
for i in aster_frame::mm::FRAMEBUFFER_REGIONS.get().unwrap().iter() {
|
||||||
size = i.len();
|
size = i.len();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,8 +4,8 @@ use alloc::{collections::LinkedList, sync::Arc};
|
|||||||
|
|
||||||
use align_ext::AlignExt;
|
use align_ext::AlignExt;
|
||||||
use aster_frame::{
|
use aster_frame::{
|
||||||
|
mm::{Daddr, DmaDirection, DmaStream, HasDaddr, VmAllocOptions, VmReader, VmWriter, PAGE_SIZE},
|
||||||
sync::SpinLock,
|
sync::SpinLock,
|
||||||
vm::{Daddr, DmaDirection, DmaStream, HasDaddr, VmAllocOptions, VmReader, VmWriter, PAGE_SIZE},
|
|
||||||
};
|
};
|
||||||
use pod::Pod;
|
use pod::Pod;
|
||||||
use spin::Once;
|
use spin::Once;
|
||||||
|
@ -9,8 +9,8 @@ use alloc::{
|
|||||||
use core::ops::Range;
|
use core::ops::Range;
|
||||||
|
|
||||||
use aster_frame::{
|
use aster_frame::{
|
||||||
|
mm::{Daddr, DmaDirection, DmaStream, HasDaddr, VmAllocOptions, VmReader, VmWriter, PAGE_SIZE},
|
||||||
sync::{RwLock, SpinLock},
|
sync::{RwLock, SpinLock},
|
||||||
vm::{Daddr, DmaDirection, DmaStream, HasDaddr, VmAllocOptions, VmReader, VmWriter, PAGE_SIZE},
|
|
||||||
};
|
};
|
||||||
use bitvec::{array::BitArray, prelude::Lsb0};
|
use bitvec::{array::BitArray, prelude::Lsb0};
|
||||||
use ktest::ktest;
|
use ktest::ktest;
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
use alloc::vec;
|
use alloc::vec;
|
||||||
|
|
||||||
use aster_frame::vm::VmWriter;
|
use aster_frame::mm::VmWriter;
|
||||||
use smoltcp::{phy, time::Instant};
|
use smoltcp::{phy, time::Instant};
|
||||||
|
|
||||||
use crate::{buffer::RxBuffer, AnyNetworkDevice};
|
use crate::{buffer::RxBuffer, AnyNetworkDevice};
|
||||||
|
@ -9,9 +9,9 @@ use aster_block::{
|
|||||||
};
|
};
|
||||||
use aster_frame::{
|
use aster_frame::{
|
||||||
io_mem::IoMem,
|
io_mem::IoMem,
|
||||||
|
mm::{DmaDirection, DmaStream, DmaStreamSlice, VmAllocOptions, VmIo},
|
||||||
sync::SpinLock,
|
sync::SpinLock,
|
||||||
trap::TrapFrame,
|
trap::TrapFrame,
|
||||||
vm::{DmaDirection, DmaStream, DmaStreamSlice, VmAllocOptions, VmIo},
|
|
||||||
};
|
};
|
||||||
use aster_util::safe_ptr::SafePtr;
|
use aster_util::safe_ptr::SafePtr;
|
||||||
use id_alloc::IdAlloc;
|
use id_alloc::IdAlloc;
|
||||||
|
@ -6,9 +6,9 @@ use core::hint::spin_loop;
|
|||||||
use aster_console::{AnyConsoleDevice, ConsoleCallback};
|
use aster_console::{AnyConsoleDevice, ConsoleCallback};
|
||||||
use aster_frame::{
|
use aster_frame::{
|
||||||
io_mem::IoMem,
|
io_mem::IoMem,
|
||||||
|
mm::{DmaDirection, DmaStream, DmaStreamSlice, VmAllocOptions, VmReader},
|
||||||
sync::{RwLock, SpinLock},
|
sync::{RwLock, SpinLock},
|
||||||
trap::TrapFrame,
|
trap::TrapFrame,
|
||||||
vm::{DmaDirection, DmaStream, DmaStreamSlice, VmAllocOptions, VmReader},
|
|
||||||
};
|
};
|
||||||
use aster_util::safe_ptr::SafePtr;
|
use aster_util::safe_ptr::SafePtr;
|
||||||
use log::debug;
|
use log::debug;
|
||||||
|
@ -10,10 +10,10 @@ use core::{fmt::Debug, iter, mem};
|
|||||||
|
|
||||||
use aster_frame::{
|
use aster_frame::{
|
||||||
io_mem::IoMem,
|
io_mem::IoMem,
|
||||||
|
mm::{DmaDirection, DmaStream, HasDaddr, VmAllocOptions, VmIo, PAGE_SIZE},
|
||||||
offset_of,
|
offset_of,
|
||||||
sync::{RwLock, SpinLock},
|
sync::{RwLock, SpinLock},
|
||||||
trap::TrapFrame,
|
trap::TrapFrame,
|
||||||
vm::{DmaDirection, DmaStream, HasDaddr, VmAllocOptions, VmIo, PAGE_SIZE},
|
|
||||||
};
|
};
|
||||||
use aster_input::{
|
use aster_input::{
|
||||||
key::{Key, KeyStatus},
|
key::{Key, KeyStatus},
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
use aster_frame::vm::{DmaCoherent, DmaStream, DmaStreamSlice, HasDaddr};
|
use aster_frame::mm::{DmaCoherent, DmaStream, DmaStreamSlice, HasDaddr};
|
||||||
use aster_network::{DmaSegment, RxBuffer, TxBuffer};
|
use aster_network::{DmaSegment, RxBuffer, TxBuffer};
|
||||||
|
|
||||||
/// A DMA-capable buffer.
|
/// A DMA-capable buffer.
|
||||||
|
@ -10,8 +10,8 @@ use core::{
|
|||||||
|
|
||||||
use aster_frame::{
|
use aster_frame::{
|
||||||
io_mem::IoMem,
|
io_mem::IoMem,
|
||||||
|
mm::{DmaCoherent, VmAllocOptions},
|
||||||
offset_of,
|
offset_of,
|
||||||
vm::{DmaCoherent, VmAllocOptions},
|
|
||||||
};
|
};
|
||||||
use aster_rights::{Dup, TRightSet, TRights, Write};
|
use aster_rights::{Dup, TRightSet, TRights, Write};
|
||||||
use aster_util::{field_ptr, safe_ptr::SafePtr};
|
use aster_util::{field_ptr, safe_ptr::SafePtr};
|
||||||
@ -74,7 +74,7 @@ impl VirtQueue {
|
|||||||
|
|
||||||
let (descriptor_ptr, avail_ring_ptr, used_ring_ptr) = if transport.is_legacy_version() {
|
let (descriptor_ptr, avail_ring_ptr, used_ring_ptr) = if transport.is_legacy_version() {
|
||||||
// FIXME: How about pci legacy?
|
// FIXME: How about pci legacy?
|
||||||
// Currently, we use one VmFrame to place the descriptors and avaliable rings, one VmFrame to place used rings
|
// Currently, we use one Frame to place the descriptors and avaliable rings, one Frame to place used rings
|
||||||
// because the virtio-mmio legacy required the address to be continuous. The max queue size is 128.
|
// because the virtio-mmio legacy required the address to be continuous. The max queue size is 128.
|
||||||
if size > 128 {
|
if size > 128 {
|
||||||
return Err(QueueError::InvalidArgs);
|
return Err(QueueError::InvalidArgs);
|
||||||
|
@ -9,10 +9,10 @@ use aster_frame::{
|
|||||||
device::{MmioCommonDevice, VirtioMmioVersion},
|
device::{MmioCommonDevice, VirtioMmioVersion},
|
||||||
},
|
},
|
||||||
io_mem::IoMem,
|
io_mem::IoMem,
|
||||||
|
mm::{DmaCoherent, PAGE_SIZE},
|
||||||
offset_of,
|
offset_of,
|
||||||
sync::RwLock,
|
sync::RwLock,
|
||||||
trap::IrqCallbackFunction,
|
trap::IrqCallbackFunction,
|
||||||
vm::{DmaCoherent, PAGE_SIZE},
|
|
||||||
};
|
};
|
||||||
use aster_rights::{ReadOp, WriteOp};
|
use aster_rights::{ReadOp, WriteOp};
|
||||||
use aster_util::{field_ptr, safe_ptr::SafePtr};
|
use aster_util::{field_ptr, safe_ptr::SafePtr};
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
use alloc::boxed::Box;
|
use alloc::boxed::Box;
|
||||||
use core::fmt::Debug;
|
use core::fmt::Debug;
|
||||||
|
|
||||||
use aster_frame::{io_mem::IoMem, trap::IrqCallbackFunction, vm::DmaCoherent};
|
use aster_frame::{io_mem::IoMem, mm::DmaCoherent, trap::IrqCallbackFunction};
|
||||||
use aster_util::safe_ptr::SafePtr;
|
use aster_util::safe_ptr::SafePtr;
|
||||||
|
|
||||||
use self::{mmio::virtio_mmio_init, pci::virtio_pci_init};
|
use self::{mmio::virtio_mmio_init, pci::virtio_pci_init};
|
||||||
|
@ -11,9 +11,9 @@ use aster_frame::{
|
|||||||
BusProbeError,
|
BusProbeError,
|
||||||
},
|
},
|
||||||
io_mem::IoMem,
|
io_mem::IoMem,
|
||||||
|
mm::DmaCoherent,
|
||||||
offset_of,
|
offset_of,
|
||||||
trap::IrqCallbackFunction,
|
trap::IrqCallbackFunction,
|
||||||
vm::DmaCoherent,
|
|
||||||
};
|
};
|
||||||
use aster_util::{field_ptr, safe_ptr::SafePtr};
|
use aster_util::{field_ptr, safe_ptr::SafePtr};
|
||||||
use log::{info, warn};
|
use log::{info, warn};
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
use core::{fmt::Debug, marker::PhantomData};
|
use core::{fmt::Debug, marker::PhantomData};
|
||||||
|
|
||||||
use aster_frame::{
|
use aster_frame::{
|
||||||
vm::{Daddr, DmaStream, HasDaddr, HasPaddr, Paddr, VmIo},
|
mm::{Daddr, DmaStream, HasDaddr, HasPaddr, Paddr, VmIo},
|
||||||
Result,
|
Result,
|
||||||
};
|
};
|
||||||
use aster_rights::{Dup, Exec, Full, Read, Signal, TRightSet, TRights, Write};
|
use aster_rights::{Dup, Exec, Full, Read, Signal, TRightSet, TRights, Write};
|
||||||
@ -56,7 +56,7 @@ pub use typeflags_util::SetContain;
|
|||||||
///
|
///
|
||||||
/// The generic parameter `M` of `SafePtr<_, M, _>` must implement the `VmIo`
|
/// The generic parameter `M` of `SafePtr<_, M, _>` must implement the `VmIo`
|
||||||
/// trait. The most important `VmIo` types are `Vmar`, `Vmo`, `IoMem`, and
|
/// trait. The most important `VmIo` types are `Vmar`, `Vmo`, `IoMem`, and
|
||||||
/// `VmFrame`. The blanket implementations of `VmIo` also include pointer-like
|
/// `Frame`. The blanket implementations of `VmIo` also include pointer-like
|
||||||
/// types that refer to a `VmIo` type. Some examples are `&Vmo`, `Box<Vmar>`,
|
/// types that refer to a `VmIo` type. Some examples are `&Vmo`, `Box<Vmar>`,
|
||||||
/// and `Arc<IoMem>`.
|
/// and `Arc<IoMem>`.
|
||||||
///
|
///
|
||||||
@ -382,7 +382,7 @@ impl<T, M: Debug, R> Debug for SafePtr<T, M, R> {
|
|||||||
macro_rules! field_ptr {
|
macro_rules! field_ptr {
|
||||||
($ptr:expr, $type:ty, $($field:tt)+) => {{
|
($ptr:expr, $type:ty, $($field:tt)+) => {{
|
||||||
use aster_frame::offset_of;
|
use aster_frame::offset_of;
|
||||||
use aster_frame::vm::VmIo;
|
use aster_frame::mm::VmIo;
|
||||||
use aster_rights::Dup;
|
use aster_rights::Dup;
|
||||||
use aster_rights::TRightSet;
|
use aster_rights::TRightSet;
|
||||||
use aster_rights::TRights;
|
use aster_rights::TRights;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user