Do mapping in the wrapper

This commit is contained in:
Zhang Junyang
2023-12-24 21:47:16 +08:00
committed by Tate, Hongliang Tian
parent e922eaa428
commit 85d4cfdeb7
12 changed files with 322 additions and 113 deletions

View File

@ -18,7 +18,9 @@ pub fn load_elf(file: &[u8]) {
for ph in elf.program_iter() {
let ProgramHeader::Ph64(program) = ph else {
panic!("[setup] Unexpected program header type! Asterinas should be 64-bit ELF binary.");
panic!(
"[setup] Unexpected program header type! Asterinas should be 64-bit ELF binary."
);
};
if program.get_type().unwrap() == xmas_elf::program::Type::Load {
load_segment(&elf, program);

View File

@ -6,6 +6,8 @@ use uefi::{
use linux_boot_params::BootParams;
use crate::x86::paging::{Ia32eFlags, PageNumber, PageTableCreator};
#[export_name = "efi_stub_entry"]
extern "sysv64" fn efi_stub_entry(handle: Handle, mut system_table: SystemTable<Boot>) -> ! {
unsafe {
@ -100,7 +102,12 @@ fn efi_phase_runtime(
let e820_table = &mut boot_params.e820_table;
let mut e820_entries = 0usize;
for md in memory_map.entries() {
if e820_entries >= e820_table.len() || e820_entries >= 128 {
if e820_entries >= e820_table.len() || e820_entries >= 127 {
unsafe {
crate::console::print_str(
"[EFI stub] Warning: number of E820 entries exceeded 128!\n",
);
}
break;
}
e820_table[e820_entries] = linux_boot_params::BootE820Entry {
@ -120,6 +127,64 @@ fn efi_phase_runtime(
}
boot_params.e820_entries = e820_entries as u8;
unsafe {
crate::console::print_str("[EFI stub] Setting up the page table.\n");
}
// Make a new linear page table. The linear page table will be stored at
// 0x4000000, hoping that the firmware will not use this area.
let mut creator = unsafe {
PageTableCreator::new(
PageNumber::from_addr(0x4000000),
PageNumber::from_addr(0x8000000),
)
};
// Map the following regions:
// - 0x0: identity map the first 4GiB;
// - 0xffff8000_00000000: linear map 4GiB to low 4 GiB;
// - 0xffffffff_80000000: linear map 2GiB to low 2 GiB;
// - 0xffff8008_00000000: linear map 1GiB to 0x00000008_00000000.
let flags = Ia32eFlags::PRESENT | Ia32eFlags::WRITABLE;
for i in 0..4 * 1024 * 1024 * 1024 / 4096 {
let from_vpn = PageNumber::from_addr(i * 4096);
let from_vpn2 = PageNumber::from_addr(i * 4096 + 0xffff8000_00000000);
let to_low_pfn = PageNumber::from_addr(i * 4096);
creator.map(from_vpn, to_low_pfn, flags);
creator.map(from_vpn2, to_low_pfn, flags);
}
for i in 0..2 * 1024 * 1024 * 1024 / 4096 {
let from_vpn = PageNumber::from_addr(i * 4096 + 0xffffffff_80000000);
let to_low_pfn = PageNumber::from_addr(i * 4096);
creator.map(from_vpn, to_low_pfn, flags);
}
for i in 0..1024 * 1024 * 1024 / 4096 {
let from_vpn = PageNumber::from_addr(i * 4096 + 0xffff8008_00000000);
let to_pfn = PageNumber::from_addr(i * 4096 + 0x00000008_00000000);
creator.map(from_vpn, to_pfn, flags);
}
// Mark this as reserved in e820 table.
e820_table[e820_entries] = linux_boot_params::BootE820Entry {
addr: 0x4000000,
size: creator.nr_frames_used() as u64 * 4096,
typ: linux_boot_params::E820Type::Reserved,
};
e820_entries += 1;
boot_params.e820_entries = e820_entries as u8;
#[cfg(feature = "debug_print")]
unsafe {
crate::console::print_str("[EFI stub] Activating the new page table.\n");
}
unsafe {
creator.activate(x86_64::registers::control::Cr3Flags::PAGE_LEVEL_CACHE_DISABLE);
}
#[cfg(feature = "debug_print")]
unsafe {
crate::console::print_str("[EFI stub] Page table activated.\n");
}
unsafe {
use crate::console::{print_hex, print_str};
print_str("[EFI stub] Entering Asterinas entrypoint at ");
@ -127,5 +192,5 @@ fn efi_phase_runtime(
print_str("\n");
}
unsafe { super::call_aster_entrypoint(super::ASTER_ENTRY_POINT, boot_params_ptr as u64) }
unsafe { super::call_aster_entrypoint(super::ASTER_ENTRY_POINT as u64, boot_params_ptr as u64) }
}

View File

@ -8,4 +8,5 @@ cfg_if::cfg_if! {
}
}
pub mod paging;
pub mod relocation;

View File

@ -0,0 +1,198 @@
//! This module provides abstraction over the Intel IA32E paging mechanism. And
//! offers method to create linear page tables.
//!
//! Notebly, the 4-level page table has a paging structure named as follows:
//! - Level-4: Page Map Level 4 (PML4), or "the root page table";
//! - Level-3: Page Directory Pointer Table (PDPT);
//! - Level-2: Page Directory (PD);
//! - Level-1: Page Table (PT).
//! We sometimes use "level-n" page table to refer to the page table described
//! above, avoiding the use of complicated names in the Intel manual.
use x86_64::structures::paging::PhysFrame;
const TABLE_ENTRY_COUNT: usize = 512;
bitflags::bitflags! {
#[derive(Clone, Copy)]
pub struct Ia32eFlags: u64 {
const PRESENT = 1 << 0;
const WRITABLE = 1 << 1;
const USER = 1 << 2;
const WRITE_THROUGH = 1 << 3;
const NO_CACHE = 1 << 4;
const ACCESSED = 1 << 5;
const DIRTY = 1 << 6;
const HUGE = 1 << 7;
const GLOBAL = 1 << 8;
const NO_EXECUTE = 1 << 63;
}
}
pub struct Ia32eEntry(u64);
/// The table in the IA32E paging specification that occupies a physical page frame.
pub struct Ia32eTable([Ia32eEntry; TABLE_ENTRY_COUNT]);
/// A page number. It could be either a physical page number or a virtual page number.
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct PageNumber(u64);
fn is_4k_page_aligned(addr: u64) -> bool {
addr & 0xfff == 0
}
impl PageNumber {
/// Creates a new page number from the given address.
pub fn from_addr(addr: u64) -> Self {
assert!(is_4k_page_aligned(addr));
Self(addr >> 12)
}
/// Returns the address of the page.
pub fn addr(&self) -> u64 {
self.0 << 12
}
/// Get the physical page frame as slice.
///
/// # Safety
/// The caller must ensure that the page number is a physical page number and
/// it is identically mapped when running the code.
unsafe fn get_page_frame(&self) -> &'static mut [u8] {
core::slice::from_raw_parts_mut(self.addr() as *mut u8, 4096)
}
}
impl core::ops::Add<usize> for PageNumber {
type Output = Self;
fn add(self, rhs: usize) -> Self::Output {
Self(self.0 + rhs as u64)
}
}
impl core::ops::AddAssign<usize> for PageNumber {
fn add_assign(&mut self, rhs: usize) {
self.0 += rhs as u64;
}
}
impl core::ops::Sub<PageNumber> for PageNumber {
type Output = u64;
fn sub(self, rhs: PageNumber) -> Self::Output {
self.0 - rhs.0
}
}
/// A creator for a page table.
///
/// It allocates page frames from the given physical memory range. And the first
/// page frame is always used for the PML4 table (root page table).
pub struct PageTableCreator {
first_pfn: PageNumber,
next_pfn: PageNumber,
end_pfn: PageNumber,
}
/// Fills the given slice with the given value.
///
/// TODO: use `Slice::fill` instead. But it currently will fail with "invalid opcode".
unsafe fn memset(dst: &mut [u8], val: u8) {
core::arch::asm!(
"rep stosb",
inout("rcx") dst.len() => _,
inout("rdi") dst.as_mut_ptr() => _,
in("al") val,
options(nostack),
);
}
impl PageTableCreator {
/// Creates a new page table creator.
///
/// The input physical memory range must be at least 4 page frames. New
/// mappings will be written into the given physical memory range.
///
/// # Safety
/// The caller must ensure that the given physical memory range is valid.
pub unsafe fn new(first_pfn: PageNumber, end_pfn: PageNumber) -> Self {
assert!(end_pfn - first_pfn >= 4);
// Clear the first page for the PML4 table.
memset(first_pfn.get_page_frame(), 0);
Self {
first_pfn,
next_pfn: first_pfn + 1,
end_pfn,
}
}
fn allocate(&mut self) -> PageNumber {
assert!(self.next_pfn < self.end_pfn);
let pfn = self.next_pfn;
self.next_pfn += 1;
unsafe {
memset(pfn.get_page_frame(), 0);
}
pfn
}
pub fn map(&mut self, from: PageNumber, to: PageNumber, flags: Ia32eFlags) {
let pml4 = unsafe { &mut *(self.first_pfn.addr() as *mut Ia32eTable) };
let pml4e = pml4.index(4, from.addr());
if !pml4e.flags().contains(Ia32eFlags::PRESENT) {
let pdpt_pfn = self.allocate();
pml4e.update(pdpt_pfn.addr(), flags);
}
let pdpt = unsafe { &mut *(pml4e.paddr() as *mut Ia32eTable) };
let pdpte = pdpt.index(3, from.addr());
if !pdpte.flags().contains(Ia32eFlags::PRESENT) {
let pd_pfn = self.allocate();
pdpte.update(pd_pfn.addr(), flags);
}
let pd = unsafe { &mut *(pdpte.paddr() as *mut Ia32eTable) };
let pde = pd.index(2, from.addr());
if !pde.flags().contains(Ia32eFlags::PRESENT) {
let pt_pfn = self.allocate();
pde.update(pt_pfn.addr(), flags);
}
let pt = unsafe { &mut *(pde.paddr() as *mut Ia32eTable) };
let pte = pt.index(1, from.addr());
pte.update(to.addr(), flags);
}
pub fn nr_frames_used(&self) -> usize {
(self.next_pfn - self.first_pfn).try_into().unwrap()
}
/// Activates the created page table.
///
/// # Safety
/// The caller must ensure that the page table is valid.
pub unsafe fn activate(&self, flags: x86_64::registers::control::Cr3Flags) {
x86_64::registers::control::Cr3::write(
PhysFrame::from_start_address(x86_64::PhysAddr::new(self.first_pfn.addr())).unwrap(),
flags,
);
}
}
impl Ia32eTable {
fn index(&mut self, level: usize, va: u64) -> &mut Ia32eEntry {
debug_assert!((1..=5).contains(&level));
let index = va as usize >> (12 + 9 * (level - 1)) & (TABLE_ENTRY_COUNT - 1);
&mut self.0[index]
}
}
impl Ia32eEntry {
/// 51:12
const PHYS_ADDR_MASK: u64 = 0xF_FFFF_FFFF_F000;
fn paddr(&self) -> u64 {
self.0 & Self::PHYS_ADDR_MASK
}
fn flags(&self) -> Ia32eFlags {
Ia32eFlags::from_bits_truncate(self.0)
}
fn update(&mut self, paddr: u64, flags: Ia32eFlags) {
self.0 = (paddr & Self::PHYS_ADDR_MASK) | flags.bits();
}
}