Understanding memory space and move higher the stack

This commit is contained in:
Zhang Junyang
2024-03-13 13:56:04 +08:00
committed by Tate, Hongliang Tian
parent dede22843a
commit 52f07458f7
29 changed files with 155 additions and 125 deletions

View File

@ -15,8 +15,7 @@ use crate::{
memory_region::{non_overlapping_regions_from, MemoryRegion, MemoryRegionType},
BootloaderAcpiArg, BootloaderFramebufferArg,
},
config::PHYS_OFFSET,
vm::paddr_to_vaddr,
vm::{paddr_to_vaddr, PHYS_MEM_BASE_VADDR},
};
static BOOT_PARAMS: Once<BootParams> = Once::new();
@ -71,7 +70,7 @@ fn init_initramfs(initramfs: &'static Once<&'static [u8]>) {
let hdr = &BOOT_PARAMS.get().unwrap().hdr;
let ptr = hdr.ramdisk_image as usize;
// We must return a slice composed by VA since kernel should read everything in VA.
let base_va = if ptr < PHYS_OFFSET {
let base_va = if ptr < PHYS_MEM_BASE_VADDR {
paddr_to_vaddr(ptr)
} else {
ptr

View File

@ -12,8 +12,7 @@ use crate::{
memory_region::{non_overlapping_regions_from, MemoryRegion, MemoryRegionType},
BootloaderAcpiArg, BootloaderFramebufferArg,
},
config::PHYS_OFFSET,
vm::paddr_to_vaddr,
vm::{paddr_to_vaddr, PHYS_MEM_BASE_VADDR},
};
global_asm!(include_str!("header.S"));
@ -77,7 +76,7 @@ fn init_initramfs(initramfs: &'static Once<&'static [u8]>) {
)
};
// We must return a slice composed by VA since kernel should read every in VA.
let base_va = if start < PHYS_OFFSET {
let base_va = if start < PHYS_MEM_BASE_VADDR {
paddr_to_vaddr(start)
} else {
start

View File

@ -17,7 +17,7 @@ use crate::boot::{
global_asm!(include_str!("header.S"));
use crate::{config::PHYS_OFFSET, vm::paddr_to_vaddr};
use crate::vm::{paddr_to_vaddr, PHYS_MEM_BASE_VADDR};
pub(super) const MULTIBOOT2_ENTRY_MAGIC: u32 = 0x36d76289;
@ -58,7 +58,7 @@ fn init_initramfs(initramfs: &'static Once<&'static [u8]>) {
.expect("No Multiboot2 modules found!");
let base_addr = mb2_module_tag.start_address() as usize;
// We must return a slice composed by VA since kernel should read every in VA.
let base_va = if base_addr < PHYS_OFFSET {
let base_va = if base_addr < PHYS_MEM_BASE_VADDR {
paddr_to_vaddr(base_addr)
} else {
base_addr

View File

@ -3,7 +3,7 @@
use pod::Pod;
use crate::{
config::ENTRY_COUNT,
arch::x86::mm::NR_ENTRIES_PER_PAGE,
vm::page_table::{PageTableEntryTrait, PageTableFlagsTrait},
};
@ -153,6 +153,6 @@ impl PageTableEntryTrait for PageTableEntry {
fn page_index(va: crate::vm::Vaddr, level: usize) -> usize {
debug_assert!((1..=5).contains(&level));
va >> (12 + 9 * (level - 1)) & (ENTRY_COUNT - 1)
va >> (12 + 9 * (level - 1)) & (NR_ENTRIES_PER_PAGE - 1)
}
}

View File

@ -6,7 +6,6 @@ use pod::Pod;
use x86_64::{instructions::tlb, structures::paging::PhysFrame, VirtAddr};
use crate::{
config::ENTRY_COUNT,
sync::Mutex,
vm::{
page_table::{table_of, PageTableEntryTrait, PageTableFlagsTrait},
@ -14,6 +13,8 @@ use crate::{
},
};
pub(crate) const NR_ENTRIES_PER_PAGE: usize = 512;
bitflags::bitflags! {
#[derive(Pod)]
#[repr(C)]
@ -210,7 +211,7 @@ impl PageTableEntryTrait for PageTableEntry {
fn page_index(va: crate::vm::Vaddr, level: usize) -> usize {
debug_assert!((1..=5).contains(&level));
va >> (12 + 9 * (level - 1)) & (ENTRY_COUNT - 1)
va >> (12 + 9 * (level - 1)) & (NR_ENTRIES_PER_PAGE - 1)
}
}

View File

@ -12,11 +12,11 @@ use tdx_guest::{
use crate::{
arch::mm::{is_kernel_vaddr, PageTableFlags},
config::PAGE_SIZE,
vm::{
paddr_to_vaddr,
page_table::{PageTableError, KERNEL_PAGE_TABLE},
},
PAGE_SIZE,
};
const SHARED_BIT: u8 = 51;

View File

@ -1,23 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
#![allow(unused)]
use log::Level;
pub const USER_STACK_SIZE: usize = PAGE_SIZE * 4;
pub const KERNEL_STACK_SIZE: usize = PAGE_SIZE * 64;
pub const KERNEL_HEAP_SIZE: usize = PAGE_SIZE * 256;
pub const KERNEL_OFFSET: usize = 0xffffffff80000000;
pub const PHYS_OFFSET: usize = 0xFFFF800000000000;
pub const ENTRY_COUNT: usize = 512;
pub const PAGE_SIZE: usize = 0x1000;
pub const PAGE_SIZE_BITS: usize = 0xc;
pub const KVA_START: usize = (usize::MAX) << PAGE_SIZE_BITS;
pub const DEFAULT_LOG_LEVEL: Level = Level::Error;
pub const REAL_TIME_TASK_PRI: u16 = 100;

View File

@ -28,7 +28,6 @@ extern crate static_assertions;
pub mod arch;
pub mod boot;
pub mod bus;
pub mod config;
pub mod console;
pub mod cpu;
mod error;

View File

@ -1,16 +1,20 @@
// SPDX-License-Identifier: MPL-2.0
use log::{Metadata, Record};
use log::{Level, Metadata, Record};
use crate::{config::DEFAULT_LOG_LEVEL, early_println};
use crate::early_println;
const LOGGER: Logger = Logger {};
/// FIXME: The logs should be able to be read from files in the userspace,
/// and the log level should be configurable.
pub const INIT_LOG_LEVEL: Level = Level::Error;
struct Logger {}
impl log::Log for Logger {
fn enabled(&self, metadata: &Metadata) -> bool {
metadata.level() <= DEFAULT_LOG_LEVEL
metadata.level() <= INIT_LOG_LEVEL
}
fn log(&self, record: &Record) {
@ -24,6 +28,6 @@ impl log::Log for Logger {
pub(crate) fn init() {
log::set_logger(&LOGGER)
.map(|()| log::set_max_level(DEFAULT_LOG_LEVEL.to_level_filter()))
.map(|()| log::set_max_level(INIT_LOG_LEVEL.to_level_filter()))
.unwrap();
}

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
use crate::config::REAL_TIME_TASK_PRI;
pub const REAL_TIME_TASK_PRIORITY: u16 = 100;
/// The priority of a task.
/// Similar to Linux, a larger value represents a lower priority,
@ -44,6 +44,6 @@ impl Priority {
}
pub const fn is_real_time(&self) -> bool {
self.0 < REAL_TIME_TASK_PRI
self.0 < REAL_TIME_TASK_PRIORITY
}
}

View File

@ -9,14 +9,15 @@ use super::{
};
use crate::{
arch::mm::PageTableFlags,
config::{KERNEL_STACK_SIZE, PAGE_SIZE},
cpu::CpuSet,
prelude::*,
sync::{Mutex, MutexGuard},
user::UserSpace,
vm::{page_table::KERNEL_PAGE_TABLE, VmAllocOptions, VmSegment},
vm::{page_table::KERNEL_PAGE_TABLE, VmAllocOptions, VmSegment, PAGE_SIZE},
};
pub const KERNEL_STACK_SIZE: usize = PAGE_SIZE * 64;
core::arch::global_asm!(include_str!("switch.S"));
#[derive(Debug, Default, Clone, Copy)]

View File

@ -10,7 +10,7 @@ pub use dma_stream::{DmaDirection, DmaStream};
use spin::Once;
use super::Paddr;
use crate::{arch::iommu::has_iommu, config::PAGE_SIZE, sync::SpinLock};
use crate::{arch::iommu::has_iommu, sync::SpinLock, vm::PAGE_SIZE};
/// If a device performs DMA to read or write system
/// memory, the addresses used by the device are device addresses.

View File

@ -9,7 +9,7 @@ use core::{
use pod::Pod;
use super::{frame_allocator, HasPaddr, VmIo};
use crate::{config::PAGE_SIZE, prelude::*, Error};
use crate::{prelude::*, vm::PAGE_SIZE, Error};
/// A collection of page frames (physical memory pages).
///

View File

@ -10,8 +10,8 @@ use spin::Once;
use super::{frame::VmFrameFlags, VmFrame, VmFrameVec, VmSegment};
use crate::{
boot::memory_region::{MemoryRegion, MemoryRegionType},
config::PAGE_SIZE,
sync::SpinLock,
vm::PAGE_SIZE,
};
pub(super) static FRAME_ALLOCATOR: Once<SpinLock<FrameAllocator>> = Once::new();

View File

@ -11,11 +11,10 @@ use log::debug;
use super::paddr_to_vaddr;
use crate::{
config::{KERNEL_HEAP_SIZE, PAGE_SIZE},
prelude::*,
sync::SpinLock,
trap::disable_local,
vm::frame_allocator::FRAME_ALLOCATOR,
vm::{frame_allocator::FRAME_ALLOCATOR, PAGE_SIZE},
Error,
};
@ -27,12 +26,14 @@ pub fn handle_alloc_error(layout: core::alloc::Layout) -> ! {
panic!("Heap allocation error, layout = {:?}", layout);
}
static mut HEAP_SPACE: [u8; KERNEL_HEAP_SIZE] = [0; KERNEL_HEAP_SIZE];
const INIT_KERNEL_HEAP_SIZE: usize = PAGE_SIZE * 256;
static mut HEAP_SPACE: [u8; INIT_KERNEL_HEAP_SIZE] = [0; INIT_KERNEL_HEAP_SIZE];
pub fn init() {
// Safety: The HEAP_SPACE is a static memory range, so it's always valid.
unsafe {
HEAP_ALLOCATOR.init(HEAP_SPACE.as_ptr(), KERNEL_HEAP_SIZE);
HEAP_ALLOCATOR.init(HEAP_SPACE.as_ptr(), INIT_KERNEL_HEAP_SIZE);
}
}

View File

@ -6,9 +6,11 @@ use core::fmt;
use super::page_table::{PageTable, PageTableConfig, UserMode};
use crate::{
arch::mm::{PageTableEntry, PageTableFlags},
config::{PAGE_SIZE, PHYS_OFFSET},
prelude::*,
vm::{is_page_aligned, VmAllocOptions, VmFrame, VmFrameVec, VmReader, VmWriter},
vm::{
is_page_aligned, VmAllocOptions, VmFrame, VmFrameVec, VmReader, VmWriter,
PHYS_MEM_BASE_VADDR, PAGE_SIZE,
},
Error,
};
@ -148,7 +150,7 @@ impl MemorySet {
if let Entry::Vacant(e) = self.areas.entry(area.start_va) {
let area = e.insert(area);
for (va, frame) in area.mapper.iter() {
debug_assert!(frame.start_paddr() < PHYS_OFFSET);
debug_assert!(frame.start_paddr() < PHYS_MEM_BASE_VADDR);
self.pt.map(*va, frame, area.flags).unwrap();
}
} else {

View File

@ -32,10 +32,38 @@ pub use self::{
page_table::PageTable,
space::{VmMapOptions, VmPerm, VmSpace},
};
use crate::{
boot::memory_region::{MemoryRegion, MemoryRegionType},
config::{KERNEL_OFFSET, PAGE_SIZE, PHYS_OFFSET},
};
use crate::boot::memory_region::{MemoryRegion, MemoryRegionType};
pub const PAGE_SIZE: usize = 0x1000;
/// The maximum virtual address of user space (non inclusive).
///
/// Typicall 64-bit systems have at least 48-bit virtual address space.
/// A typical way to reserve half of the address space for the kernel is
/// to use the highest 48-bit virtual address space.
///
/// Also, the top page is not regarded as usable since it's a workaround
/// for some x86_64 CPUs' bugs. See
/// <https://github.com/torvalds/linux/blob/480e035fc4c714fb5536e64ab9db04fedc89e910/arch/x86/include/asm/page_64.h#L68-L78>
/// for the rationale.
pub const MAX_USERSPACE_VADDR: Vaddr = 0x0000_8000_0000_0000 - PAGE_SIZE;
/// Start of the kernel address space.
///
/// This is the _lowest_ address of the x86-64's _high_ canonical addresses.
///
/// This is also the base address of the direct mapping of all physical
/// memory in the kernel address space.
pub(crate) const PHYS_MEM_BASE_VADDR: Vaddr = 0xffff_8000_0000_0000;
/// The kernel code is linear mapped to this address.
///
/// FIXME: This offset should be randomly chosen by the loader or the
/// boot compatibility layer. But we disabled it because the framework
/// doesn't support relocatable kernel yet.
pub fn kernel_loaded_offset() -> usize {
0xffff_ffff_8000_0000
}
/// Get physical address trait
pub trait HasPaddr {
@ -43,9 +71,9 @@ pub trait HasPaddr {
}
pub fn vaddr_to_paddr(va: Vaddr) -> Option<Paddr> {
if (PHYS_OFFSET..=KERNEL_OFFSET).contains(&va) {
if (PHYS_MEM_BASE_VADDR..=kernel_loaded_offset()).contains(&va) {
// can use offset to get the physical address
Some(va - PHYS_OFFSET)
Some(va - PHYS_MEM_BASE_VADDR)
} else {
page_table::vaddr_to_paddr(va)
}
@ -57,7 +85,7 @@ pub const fn is_page_aligned(p: usize) -> bool {
/// Convert physical address to virtual address using offset, only available inside aster-frame
pub(crate) fn paddr_to_vaddr(pa: usize) -> usize {
pa + PHYS_OFFSET
pa + PHYS_MEM_BASE_VADDR
}
/// Only available inside aster-frame

View File

@ -9,10 +9,9 @@ use spin::Once;
use super::{paddr_to_vaddr, Paddr, Vaddr, VmAllocOptions};
use crate::{
arch::mm::{is_kernel_vaddr, is_user_vaddr, tlb_flush, PageTableEntry},
config::{ENTRY_COUNT, PAGE_SIZE},
arch::mm::{is_kernel_vaddr, is_user_vaddr, tlb_flush, PageTableEntry, NR_ENTRIES_PER_PAGE},
sync::SpinLock,
vm::VmFrame,
vm::{VmFrame, PAGE_SIZE},
};
pub trait PageTableFlagsTrait: Clone + Copy + Sized + Pod + Debug {
@ -77,9 +76,9 @@ pub trait PageTableEntryTrait: Clone + Copy + Sized + Pod + Debug {
/// The index of the next PTE is determined based on the virtual address and the current level, and the level range is [1,5].
///
/// For example, in x86 we use the following expression to get the index (ENTRY_COUNT is 512):
/// For example, in x86 we use the following expression to get the index (NR_ENTRIES_PER_PAGE is 512):
/// ```
/// va >> (12 + 9 * (level - 1)) & (ENTRY_COUNT - 1)
/// va >> (12 + 9 * (level - 1)) & (NR_ENTRIES_PER_PAGE - 1)
/// ```
///
fn page_index(va: Vaddr, level: usize) -> usize;
@ -395,7 +394,7 @@ impl<T: PageTableEntryTrait, M> PageTable<T, M> {
}
}
/// Read `ENTRY_COUNT` of PageTableEntry from an address
/// Read `NR_ENTRIES_PER_PAGE` of PageTableEntry from an address
///
/// # Safety
///
@ -406,7 +405,7 @@ pub unsafe fn table_of<'a, T: PageTableEntryTrait>(pa: Paddr) -> Option<&'a mut
return None;
}
let ptr = super::paddr_to_vaddr(pa) as *mut _;
Some(core::slice::from_raw_parts_mut(ptr, ENTRY_COUNT))
Some(core::slice::from_raw_parts_mut(ptr, NR_ENTRIES_PER_PAGE))
}
/// translate a virtual address to physical address which cannot use offset to get physical address

View File

@ -5,7 +5,7 @@ use core::ops::Range;
use bitflags::bitflags;
use super::{is_page_aligned, MapArea, MemorySet, VmFrameVec, VmIo};
use crate::{arch::mm::PageTableFlags, config::PAGE_SIZE, prelude::*, sync::Mutex, Error};
use crate::{arch::mm::PageTableFlags, prelude::*, sync::Mutex, vm::PAGE_SIZE, Error};
/// Virtual memory space.
///