Understanding memory space and move higher the stack

This commit is contained in:
Zhang Junyang
2024-03-13 13:56:04 +08:00
committed by Tate, Hongliang Tian
parent dede22843a
commit 52f07458f7
29 changed files with 155 additions and 125 deletions

View File

@ -15,8 +15,7 @@ use crate::{
memory_region::{non_overlapping_regions_from, MemoryRegion, MemoryRegionType},
BootloaderAcpiArg, BootloaderFramebufferArg,
},
config::PHYS_OFFSET,
vm::paddr_to_vaddr,
vm::{paddr_to_vaddr, PHYS_MEM_BASE_VADDR},
};
static BOOT_PARAMS: Once<BootParams> = Once::new();
@ -71,7 +70,7 @@ fn init_initramfs(initramfs: &'static Once<&'static [u8]>) {
let hdr = &BOOT_PARAMS.get().unwrap().hdr;
let ptr = hdr.ramdisk_image as usize;
// We must return a slice composed by VA since kernel should read everything in VA.
let base_va = if ptr < PHYS_OFFSET {
let base_va = if ptr < PHYS_MEM_BASE_VADDR {
paddr_to_vaddr(ptr)
} else {
ptr

View File

@ -12,8 +12,7 @@ use crate::{
memory_region::{non_overlapping_regions_from, MemoryRegion, MemoryRegionType},
BootloaderAcpiArg, BootloaderFramebufferArg,
},
config::PHYS_OFFSET,
vm::paddr_to_vaddr,
vm::{paddr_to_vaddr, PHYS_MEM_BASE_VADDR},
};
global_asm!(include_str!("header.S"));
@ -77,7 +76,7 @@ fn init_initramfs(initramfs: &'static Once<&'static [u8]>) {
)
};
// We must return a slice composed by VA since kernel should read every in VA.
let base_va = if start < PHYS_OFFSET {
let base_va = if start < PHYS_MEM_BASE_VADDR {
paddr_to_vaddr(start)
} else {
start

View File

@ -17,7 +17,7 @@ use crate::boot::{
global_asm!(include_str!("header.S"));
use crate::{config::PHYS_OFFSET, vm::paddr_to_vaddr};
use crate::vm::{paddr_to_vaddr, PHYS_MEM_BASE_VADDR};
pub(super) const MULTIBOOT2_ENTRY_MAGIC: u32 = 0x36d76289;
@ -58,7 +58,7 @@ fn init_initramfs(initramfs: &'static Once<&'static [u8]>) {
.expect("No Multiboot2 modules found!");
let base_addr = mb2_module_tag.start_address() as usize;
// We must return a slice composed by VA since kernel should read every in VA.
let base_va = if base_addr < PHYS_OFFSET {
let base_va = if base_addr < PHYS_MEM_BASE_VADDR {
paddr_to_vaddr(base_addr)
} else {
base_addr

View File

@ -3,7 +3,7 @@
use pod::Pod;
use crate::{
config::ENTRY_COUNT,
arch::x86::mm::NR_ENTRIES_PER_PAGE,
vm::page_table::{PageTableEntryTrait, PageTableFlagsTrait},
};
@ -153,6 +153,6 @@ impl PageTableEntryTrait for PageTableEntry {
fn page_index(va: crate::vm::Vaddr, level: usize) -> usize {
debug_assert!((1..=5).contains(&level));
va >> (12 + 9 * (level - 1)) & (ENTRY_COUNT - 1)
va >> (12 + 9 * (level - 1)) & (NR_ENTRIES_PER_PAGE - 1)
}
}

View File

@ -6,7 +6,6 @@ use pod::Pod;
use x86_64::{instructions::tlb, structures::paging::PhysFrame, VirtAddr};
use crate::{
config::ENTRY_COUNT,
sync::Mutex,
vm::{
page_table::{table_of, PageTableEntryTrait, PageTableFlagsTrait},
@ -14,6 +13,8 @@ use crate::{
},
};
pub(crate) const NR_ENTRIES_PER_PAGE: usize = 512;
bitflags::bitflags! {
#[derive(Pod)]
#[repr(C)]
@ -210,7 +211,7 @@ impl PageTableEntryTrait for PageTableEntry {
fn page_index(va: crate::vm::Vaddr, level: usize) -> usize {
debug_assert!((1..=5).contains(&level));
va >> (12 + 9 * (level - 1)) & (ENTRY_COUNT - 1)
va >> (12 + 9 * (level - 1)) & (NR_ENTRIES_PER_PAGE - 1)
}
}

View File

@ -12,11 +12,11 @@ use tdx_guest::{
use crate::{
arch::mm::{is_kernel_vaddr, PageTableFlags},
config::PAGE_SIZE,
vm::{
paddr_to_vaddr,
page_table::{PageTableError, KERNEL_PAGE_TABLE},
},
PAGE_SIZE,
};
const SHARED_BIT: u8 = 51;

View File

@ -1,23 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
#![allow(unused)]
use log::Level;
pub const USER_STACK_SIZE: usize = PAGE_SIZE * 4;
pub const KERNEL_STACK_SIZE: usize = PAGE_SIZE * 64;
pub const KERNEL_HEAP_SIZE: usize = PAGE_SIZE * 256;
pub const KERNEL_OFFSET: usize = 0xffffffff80000000;
pub const PHYS_OFFSET: usize = 0xFFFF800000000000;
pub const ENTRY_COUNT: usize = 512;
pub const PAGE_SIZE: usize = 0x1000;
pub const PAGE_SIZE_BITS: usize = 0xc;
pub const KVA_START: usize = (usize::MAX) << PAGE_SIZE_BITS;
pub const DEFAULT_LOG_LEVEL: Level = Level::Error;
pub const REAL_TIME_TASK_PRI: u16 = 100;

View File

@ -28,7 +28,6 @@ extern crate static_assertions;
pub mod arch;
pub mod boot;
pub mod bus;
pub mod config;
pub mod console;
pub mod cpu;
mod error;

View File

@ -1,16 +1,20 @@
// SPDX-License-Identifier: MPL-2.0
use log::{Metadata, Record};
use log::{Level, Metadata, Record};
use crate::{config::DEFAULT_LOG_LEVEL, early_println};
use crate::early_println;
const LOGGER: Logger = Logger {};
/// FIXME: The logs should be able to be read from files in the userspace,
/// and the log level should be configurable.
pub const INIT_LOG_LEVEL: Level = Level::Error;
struct Logger {}
impl log::Log for Logger {
fn enabled(&self, metadata: &Metadata) -> bool {
metadata.level() <= DEFAULT_LOG_LEVEL
metadata.level() <= INIT_LOG_LEVEL
}
fn log(&self, record: &Record) {
@ -24,6 +28,6 @@ impl log::Log for Logger {
pub(crate) fn init() {
log::set_logger(&LOGGER)
.map(|()| log::set_max_level(DEFAULT_LOG_LEVEL.to_level_filter()))
.map(|()| log::set_max_level(INIT_LOG_LEVEL.to_level_filter()))
.unwrap();
}

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
use crate::config::REAL_TIME_TASK_PRI;
pub const REAL_TIME_TASK_PRIORITY: u16 = 100;
/// The priority of a task.
/// Similar to Linux, a larger value represents a lower priority,
@ -44,6 +44,6 @@ impl Priority {
}
pub const fn is_real_time(&self) -> bool {
self.0 < REAL_TIME_TASK_PRI
self.0 < REAL_TIME_TASK_PRIORITY
}
}

View File

@ -9,14 +9,15 @@ use super::{
};
use crate::{
arch::mm::PageTableFlags,
config::{KERNEL_STACK_SIZE, PAGE_SIZE},
cpu::CpuSet,
prelude::*,
sync::{Mutex, MutexGuard},
user::UserSpace,
vm::{page_table::KERNEL_PAGE_TABLE, VmAllocOptions, VmSegment},
vm::{page_table::KERNEL_PAGE_TABLE, VmAllocOptions, VmSegment, PAGE_SIZE},
};
pub const KERNEL_STACK_SIZE: usize = PAGE_SIZE * 64;
core::arch::global_asm!(include_str!("switch.S"));
#[derive(Debug, Default, Clone, Copy)]

View File

@ -10,7 +10,7 @@ pub use dma_stream::{DmaDirection, DmaStream};
use spin::Once;
use super::Paddr;
use crate::{arch::iommu::has_iommu, config::PAGE_SIZE, sync::SpinLock};
use crate::{arch::iommu::has_iommu, sync::SpinLock, vm::PAGE_SIZE};
/// If a device performs DMA to read or write system
/// memory, the addresses used by the device are device addresses.

View File

@ -9,7 +9,7 @@ use core::{
use pod::Pod;
use super::{frame_allocator, HasPaddr, VmIo};
use crate::{config::PAGE_SIZE, prelude::*, Error};
use crate::{prelude::*, vm::PAGE_SIZE, Error};
/// A collection of page frames (physical memory pages).
///

View File

@ -10,8 +10,8 @@ use spin::Once;
use super::{frame::VmFrameFlags, VmFrame, VmFrameVec, VmSegment};
use crate::{
boot::memory_region::{MemoryRegion, MemoryRegionType},
config::PAGE_SIZE,
sync::SpinLock,
vm::PAGE_SIZE,
};
pub(super) static FRAME_ALLOCATOR: Once<SpinLock<FrameAllocator>> = Once::new();

View File

@ -11,11 +11,10 @@ use log::debug;
use super::paddr_to_vaddr;
use crate::{
config::{KERNEL_HEAP_SIZE, PAGE_SIZE},
prelude::*,
sync::SpinLock,
trap::disable_local,
vm::frame_allocator::FRAME_ALLOCATOR,
vm::{frame_allocator::FRAME_ALLOCATOR, PAGE_SIZE},
Error,
};
@ -27,12 +26,14 @@ pub fn handle_alloc_error(layout: core::alloc::Layout) -> ! {
panic!("Heap allocation error, layout = {:?}", layout);
}
static mut HEAP_SPACE: [u8; KERNEL_HEAP_SIZE] = [0; KERNEL_HEAP_SIZE];
const INIT_KERNEL_HEAP_SIZE: usize = PAGE_SIZE * 256;
static mut HEAP_SPACE: [u8; INIT_KERNEL_HEAP_SIZE] = [0; INIT_KERNEL_HEAP_SIZE];
pub fn init() {
// Safety: The HEAP_SPACE is a static memory range, so it's always valid.
unsafe {
HEAP_ALLOCATOR.init(HEAP_SPACE.as_ptr(), KERNEL_HEAP_SIZE);
HEAP_ALLOCATOR.init(HEAP_SPACE.as_ptr(), INIT_KERNEL_HEAP_SIZE);
}
}

View File

@ -6,9 +6,11 @@ use core::fmt;
use super::page_table::{PageTable, PageTableConfig, UserMode};
use crate::{
arch::mm::{PageTableEntry, PageTableFlags},
config::{PAGE_SIZE, PHYS_OFFSET},
prelude::*,
vm::{is_page_aligned, VmAllocOptions, VmFrame, VmFrameVec, VmReader, VmWriter},
vm::{
is_page_aligned, VmAllocOptions, VmFrame, VmFrameVec, VmReader, VmWriter,
PHYS_MEM_BASE_VADDR, PAGE_SIZE,
},
Error,
};
@ -148,7 +150,7 @@ impl MemorySet {
if let Entry::Vacant(e) = self.areas.entry(area.start_va) {
let area = e.insert(area);
for (va, frame) in area.mapper.iter() {
debug_assert!(frame.start_paddr() < PHYS_OFFSET);
debug_assert!(frame.start_paddr() < PHYS_MEM_BASE_VADDR);
self.pt.map(*va, frame, area.flags).unwrap();
}
} else {

View File

@ -32,10 +32,38 @@ pub use self::{
page_table::PageTable,
space::{VmMapOptions, VmPerm, VmSpace},
};
use crate::{
boot::memory_region::{MemoryRegion, MemoryRegionType},
config::{KERNEL_OFFSET, PAGE_SIZE, PHYS_OFFSET},
};
use crate::boot::memory_region::{MemoryRegion, MemoryRegionType};
pub const PAGE_SIZE: usize = 0x1000;
/// The maximum virtual address of user space (non inclusive).
///
/// Typicall 64-bit systems have at least 48-bit virtual address space.
/// A typical way to reserve half of the address space for the kernel is
/// to use the highest 48-bit virtual address space.
///
/// Also, the top page is not regarded as usable since it's a workaround
/// for some x86_64 CPUs' bugs. See
/// <https://github.com/torvalds/linux/blob/480e035fc4c714fb5536e64ab9db04fedc89e910/arch/x86/include/asm/page_64.h#L68-L78>
/// for the rationale.
pub const MAX_USERSPACE_VADDR: Vaddr = 0x0000_8000_0000_0000 - PAGE_SIZE;
/// Start of the kernel address space.
///
/// This is the _lowest_ address of the x86-64's _high_ canonical addresses.
///
/// This is also the base address of the direct mapping of all physical
/// memory in the kernel address space.
pub(crate) const PHYS_MEM_BASE_VADDR: Vaddr = 0xffff_8000_0000_0000;
/// The kernel code is linear mapped to this address.
///
/// FIXME: This offset should be randomly chosen by the loader or the
/// boot compatibility layer. But we disabled it because the framework
/// doesn't support relocatable kernel yet.
pub fn kernel_loaded_offset() -> usize {
0xffff_ffff_8000_0000
}
/// Get physical address trait
pub trait HasPaddr {
@ -43,9 +71,9 @@ pub trait HasPaddr {
}
pub fn vaddr_to_paddr(va: Vaddr) -> Option<Paddr> {
if (PHYS_OFFSET..=KERNEL_OFFSET).contains(&va) {
if (PHYS_MEM_BASE_VADDR..=kernel_loaded_offset()).contains(&va) {
// can use offset to get the physical address
Some(va - PHYS_OFFSET)
Some(va - PHYS_MEM_BASE_VADDR)
} else {
page_table::vaddr_to_paddr(va)
}
@ -57,7 +85,7 @@ pub const fn is_page_aligned(p: usize) -> bool {
/// Convert physical address to virtual address using offset, only available inside aster-frame
pub(crate) fn paddr_to_vaddr(pa: usize) -> usize {
pa + PHYS_OFFSET
pa + PHYS_MEM_BASE_VADDR
}
/// Only available inside aster-frame

View File

@ -9,10 +9,9 @@ use spin::Once;
use super::{paddr_to_vaddr, Paddr, Vaddr, VmAllocOptions};
use crate::{
arch::mm::{is_kernel_vaddr, is_user_vaddr, tlb_flush, PageTableEntry},
config::{ENTRY_COUNT, PAGE_SIZE},
arch::mm::{is_kernel_vaddr, is_user_vaddr, tlb_flush, PageTableEntry, NR_ENTRIES_PER_PAGE},
sync::SpinLock,
vm::VmFrame,
vm::{VmFrame, PAGE_SIZE},
};
pub trait PageTableFlagsTrait: Clone + Copy + Sized + Pod + Debug {
@ -77,9 +76,9 @@ pub trait PageTableEntryTrait: Clone + Copy + Sized + Pod + Debug {
/// The index of the next PTE is determined based on the virtual address and the current level, and the level range is [1,5].
///
/// For example, in x86 we use the following expression to get the index (ENTRY_COUNT is 512):
/// For example, in x86 we use the following expression to get the index (NR_ENTRIES_PER_PAGE is 512):
/// ```
/// va >> (12 + 9 * (level - 1)) & (ENTRY_COUNT - 1)
/// va >> (12 + 9 * (level - 1)) & (NR_ENTRIES_PER_PAGE - 1)
/// ```
///
fn page_index(va: Vaddr, level: usize) -> usize;
@ -395,7 +394,7 @@ impl<T: PageTableEntryTrait, M> PageTable<T, M> {
}
}
/// Read `ENTRY_COUNT` of PageTableEntry from an address
/// Read `NR_ENTRIES_PER_PAGE` of PageTableEntry from an address
///
/// # Safety
///
@ -406,7 +405,7 @@ pub unsafe fn table_of<'a, T: PageTableEntryTrait>(pa: Paddr) -> Option<&'a mut
return None;
}
let ptr = super::paddr_to_vaddr(pa) as *mut _;
Some(core::slice::from_raw_parts_mut(ptr, ENTRY_COUNT))
Some(core::slice::from_raw_parts_mut(ptr, NR_ENTRIES_PER_PAGE))
}
/// translate a virtual address to physical address which cannot use offset to get physical address

View File

@ -5,7 +5,7 @@ use core::ops::Range;
use bitflags::bitflags;
use super::{is_page_aligned, MapArea, MemorySet, VmFrameVec, VmIo};
use crate::{arch::mm::PageTableFlags, config::PAGE_SIZE, prelude::*, sync::Mutex, Error};
use crate::{arch::mm::PageTableFlags, prelude::*, sync::Mutex, vm::PAGE_SIZE, Error};
/// Virtual memory space.
///

View File

@ -14,9 +14,8 @@ pub(crate) use alloc::{
pub(crate) use core::{any::Any, ffi::CStr, fmt::Debug};
pub(crate) use aster_frame::{
config::PAGE_SIZE,
sync::{Mutex, MutexGuard, RwLock, RwMutex, SpinLock, SpinLockGuard},
vm::Vaddr,
vm::{Vaddr, PAGE_SIZE},
};
pub(crate) use bitflags::bitflags;
pub(crate) use int_to_c_enum::TryFromInt;

View File

@ -14,30 +14,40 @@ use user_heap::UserHeap;
use crate::vm::vmar::Vmar;
/*
* The user vm space layout is look like below.
* |-----------------------|-------The highest user vm address
* | |
* | Mmap Areas |
* | |
* | |
* --------------------------------The init stack base
* | |
* | User Stack(Init Stack)|
* | |
* | || |
* ----------||----------------------The user stack top, grows down
* | \/ |
* | |
* | Unmapped Areas |
* | |
* | /\ |
* ----------||---------------------The user heap top, grows up
* | || |
* | |
* | User Heap |
* | |
* ----------------------------------The user heap base
*/
* The user's virtual memory space layout looks like below.
* TODO: The layout of the userheap does not match the current implementation,
* And currently the initial program break is a fixed value.
*
* (high address)
* +---------------------+ <------+ The top of Vmar, which is the highest address usable
* | | Randomly padded pages
* +---------------------+ <------+ The base of the initial user stack
* | User stack |
* | |
* +---------||----------+ <------+ The user stack limit, can be extended lower
* | \/ |
* | ... |
* | |
* | MMAP Spaces |
* | |
* | ... |
* | /\ |
* +---------||----------+ <------+ The current program break
* | User heap |
* | |
* +---------------------+ <------+ The original program break
* | | Randomly padded pages
* +---------------------+ <------+ The end of the program's last segment
* | |
* | Loaded segments |
* | .text, .data, .bss |
* | , etc. |
* | |
* +---------------------+ <------+ The bottom of Vmar at 0x1_0000
* | | 64 KiB unusable space
* +---------------------+
* (low address)
*/
/// The virtual space usage.
/// This struct is used to control brk and mmap now.

View File

@ -7,7 +7,7 @@
use core::mem;
use align_ext::AlignExt;
use aster_frame::vm::{VmIo, VmPerm};
use aster_frame::vm::{VmIo, VmPerm, MAX_USERSPACE_VADDR};
use aster_rights::{Full, Rights};
use super::{
@ -20,15 +20,16 @@ use crate::{
vm::{perms::VmPerms, vmar::Vmar, vmo::VmoOptions},
};
pub const INIT_STACK_BASE: Vaddr = 0x0000_0000_2000_0000;
pub const INIT_STACK_SIZE: usize = 0x1000 * 16; // 64KB
pub const INIT_STACK_SIZE: usize = 64 * 1024; // 64 KiB
/*
* The initial stack of a process looks like below(This figure is from occlum):
* Illustration of the virtual memory space containing the processes' init stack:
*
*
* +---------------------+ <------+ Top of stack
* | | (high address)
* (high address)
* +---------------------+ <------+ Highest address
* | | Random stack paddings
* +---------------------+ <------+ The base of stack (stack grows down)
* | |
* | Null-terminated |
* | strings referenced |
* | by variables below |
@ -62,8 +63,10 @@ pub const INIT_STACK_SIZE: usize = 0x1000 * 16; // 64KB
* +---------------------+
* | |
* | |
* + +
*
* +---------------------+
* | |
* +---------------------+ <------+ User stack default rlimit
* (low address)
*/
pub struct InitStack {
/// The high address of init stack
@ -93,9 +96,13 @@ impl InitStack {
}
}
/// This function only work for first process
pub fn new_default_config(argv: Vec<CString>, envp: Vec<CString>) -> Self {
let init_stack_top = INIT_STACK_BASE - PAGE_SIZE;
let nr_pages_padding = {
let mut random_nr_pages_padding: u8 = 0;
getrandom::getrandom(random_nr_pages_padding.as_bytes_mut()).unwrap();
random_nr_pages_padding as usize
};
let init_stack_top = MAX_USERSPACE_VADDR - PAGE_SIZE * nr_pages_padding;
let init_stack_size = INIT_STACK_SIZE;
InitStack::new(init_stack_top, init_stack_size, argv, envp)
}

View File

@ -13,7 +13,10 @@
use alloc::{boxed::Box, sync::Arc};
use aster_frame::{config::PAGE_SIZE, sync::Mutex, vm::VmIo};
use aster_frame::{
sync::Mutex,
vm::{VmIo, PAGE_SIZE},
};
use aster_rights::Rights;
use aster_time::Instant;
use aster_util::coeff::Coeff;

View File

@ -11,7 +11,7 @@ pub mod vm_mapping;
use core::ops::Range;
use align_ext::AlignExt;
use aster_frame::vm::VmSpace;
use aster_frame::vm::{VmSpace, MAX_USERSPACE_VADDR};
use aster_rights::Rights;
use self::{
@ -124,11 +124,8 @@ impl VmarInner {
}
}
// FIXME: How to set the correct root vmar range?
// We should not include addr 0 here(is this right?), since the 0 addr means the null pointer.
// We should include addr 0x0040_0000, since non-pie executables typically are put on 0x0040_0000.
const ROOT_VMAR_LOWEST_ADDR: Vaddr = 0x0010_0000;
const ROOT_VMAR_HIGHEST_ADDR: Vaddr = 0x1000_0000_0000;
const ROOT_VMAR_LOWEST_ADDR: Vaddr = 0x001_0000; // 64 KiB is the Linux configurable default
const ROOT_VMAR_CAP_ADDR: Vaddr = MAX_USERSPACE_VADDR;
impl Interval<usize> for Arc<Vmar_> {
fn range(&self) -> Range<usize> {
@ -161,7 +158,7 @@ impl Vmar_ {
pub fn new_root() -> Arc<Self> {
let mut free_regions = BTreeMap::new();
let root_region = FreeRegion::new(ROOT_VMAR_LOWEST_ADDR..ROOT_VMAR_HIGHEST_ADDR);
let root_region = FreeRegion::new(ROOT_VMAR_LOWEST_ADDR..ROOT_VMAR_CAP_ADDR);
free_regions.insert(root_region.start(), root_region);
let vmar_inner = VmarInner {
is_destroyed: false,
@ -169,7 +166,7 @@ impl Vmar_ {
vm_mappings: BTreeMap::new(),
free_regions,
};
Vmar_::new(vmar_inner, VmSpace::new(), 0, ROOT_VMAR_HIGHEST_ADDR, None)
Vmar_::new(vmar_inner, VmSpace::new(), 0, ROOT_VMAR_CAP_ADDR, None)
}
fn is_root_vmar(&self) -> bool {
@ -279,7 +276,7 @@ impl Vmar_ {
inner.child_vmar_s.clear();
inner.vm_mappings.clear();
inner.free_regions.clear();
let root_region = FreeRegion::new(ROOT_VMAR_LOWEST_ADDR..ROOT_VMAR_HIGHEST_ADDR);
let root_region = FreeRegion::new(ROOT_VMAR_LOWEST_ADDR..ROOT_VMAR_CAP_ADDR);
inner.free_regions.insert(root_region.start(), root_region);
Ok(())
}

View File

@ -2,7 +2,7 @@
//! Options for allocating child VMARs.
use aster_frame::{config::PAGE_SIZE, Error, Result};
use aster_frame::{vm::PAGE_SIZE, Error, Result};
use super::Vmar;
@ -142,14 +142,14 @@ mod test {
use crate::vm::{
page_fault_handler::PageFaultHandler,
perms::VmPerms,
vmar::ROOT_VMAR_HIGHEST_ADDR,
vmar::ROOT_VMAR_CAP_ADDR,
vmo::{VmoOptions, VmoRightsOp},
};
#[ktest]
fn root_vmar() {
let vmar = Vmar::<Full>::new_root();
assert!(vmar.size() == ROOT_VMAR_HIGHEST_ADDR);
assert!(vmar.size() == ROOT_VMAR_CAP_ADDR);
}
#[ktest]

View File

@ -49,7 +49,7 @@ use self::{
prelude::*,
};
pub const BLOCK_SIZE: usize = aster_frame::config::PAGE_SIZE;
pub const BLOCK_SIZE: usize = aster_frame::vm::PAGE_SIZE;
pub const SECTOR_SIZE: usize = 512;
pub trait BlockDevice: Send + Sync + Any + Debug {

View File

@ -13,7 +13,12 @@ use core::{
ops::{Index, IndexMut},
};
use aster_frame::{boot, config::PAGE_SIZE, io_mem::IoMem, sync::SpinLock, vm::VmIo};
use aster_frame::{
boot,
io_mem::IoMem,
sync::SpinLock,
vm::{VmIo, PAGE_SIZE},
};
use component::{init_component, ComponentInitError};
use font8x8::UnicodeFonts;
use spin::Once;

View File

@ -4,7 +4,7 @@ use alloc::{boxed::Box, fmt::Debug, string::ToString, sync::Arc, vec::Vec};
use core::hint::spin_loop;
use aster_console::{AnyConsoleDevice, ConsoleCallback};
use aster_frame::{config::PAGE_SIZE, io_mem::IoMem, sync::SpinLock, trap::TrapFrame};
use aster_frame::{io_mem::IoMem, sync::SpinLock, trap::TrapFrame, vm::PAGE_SIZE};
use aster_util::safe_ptr::SafePtr;
use log::debug;

View File

@ -8,12 +8,11 @@ use aster_frame::{
bus::MmioDevice,
device::{MmioCommonDevice, VirtioMmioVersion},
},
config::PAGE_SIZE,
io_mem::IoMem,
offset_of,
sync::RwLock,
trap::IrqCallbackFunction,
vm::DmaCoherent,
vm::{DmaCoherent, PAGE_SIZE},
};
use aster_rights::{ReadOp, WriteOp};
use aster_util::{field_ptr, safe_ptr::SafePtr};