Combine vm and mm modules into one

This commit is contained in:
Yuke Peng 2023-03-16 04:36:48 -07:00 committed by Tate, Hongliang Tian
parent 840fe53845
commit 8dc7bda147
17 changed files with 220 additions and 530 deletions

View File

@ -5,8 +5,6 @@ use limine::{LimineFramebufferRequest, LimineMemoryMapEntryType};
use spin::Mutex; use spin::Mutex;
use volatile::Volatile; use volatile::Volatile;
use crate::mm;
pub(crate) static WRITER: Mutex<Option<Writer>> = Mutex::new(None); pub(crate) static WRITER: Mutex<Option<Writer>> = Mutex::new(None);
static FRAMEBUFFER_REUEST: LimineFramebufferRequest = LimineFramebufferRequest::new(0); static FRAMEBUFFER_REUEST: LimineFramebufferRequest = LimineFramebufferRequest::new(0);
@ -19,7 +17,7 @@ pub(crate) fn init() {
assert_eq!(response.framebuffer_count, 1); assert_eq!(response.framebuffer_count, 1);
let mut writer = None; let mut writer = None;
let mut size = 0; let mut size = 0;
for i in mm::MEMORY_REGIONS.get().unwrap().iter() { for i in crate::vm::MEMORY_REGIONS.get().unwrap().iter() {
if i.typ == LimineMemoryMapEntryType::Framebuffer { if i.typ == LimineMemoryMapEntryType::Framebuffer {
size = i.len as usize; size = i.len as usize;
} }

View File

@ -1,6 +1,6 @@
use core::ptr::NonNull; use core::ptr::NonNull;
use crate::{config, mm::address::phys_to_virt}; use crate::{config, vm::phys_to_virt};
use acpi::{AcpiHandler, AcpiTables}; use acpi::{AcpiHandler, AcpiTables};
use lazy_static::lazy_static; use lazy_static::lazy_static;
use limine::LimineRsdpRequest; use limine::LimineRsdpRequest;

View File

@ -60,8 +60,7 @@ pub fn init() {
panic!("Unknown interrupt model") panic!("Unknown interrupt model")
} }
}; };
let mut io_apic = let mut io_apic = unsafe { IoApic::new(crate::vm::phys_to_virt(ioapic_address as usize)) };
unsafe { IoApic::new(crate::mm::address::phys_to_virt(ioapic_address as usize)) };
let id = io_apic.id(); let id = io_apic.id();
let version = io_apic.version(); let version = io_apic.version();

View File

@ -1,3 +1,4 @@
use crate::vm::phys_to_virt;
use acpi::{AcpiError, HpetInfo}; use acpi::{AcpiError, HpetInfo};
use alloc::vec::Vec; use alloc::vec::Vec;
use spin::Once; use spin::Once;
@ -35,16 +36,15 @@ struct HPET {
impl HPET { impl HPET {
fn new(base_address: usize) -> HPET { fn new(base_address: usize) -> HPET {
let information_register_ref = unsafe { let information_register_ref = unsafe {
&*(crate::mm::address::phys_to_virt(base_address + OFFSET_ID_REGISTER) as *mut usize &*(phys_to_virt(base_address + OFFSET_ID_REGISTER) as *mut usize as *mut u32)
as *mut u32)
}; };
let general_configuration_register_ref = unsafe { let general_configuration_register_ref = unsafe {
&mut *(crate::mm::address::phys_to_virt(base_address + OFFSET_CONFIGURATION_REGISTER) &mut *(phys_to_virt(base_address + OFFSET_CONFIGURATION_REGISTER) as *mut usize
as *mut usize as *mut u32) as *mut u32)
}; };
let general_interrupt_status_register_ref = unsafe { let general_interrupt_status_register_ref = unsafe {
&mut *(crate::mm::address::phys_to_virt(base_address + OFFSET_INTERRUPT_STATUS_REGISTER) &mut *(phys_to_virt(base_address + OFFSET_INTERRUPT_STATUS_REGISTER) as *mut usize
as *mut usize as *mut u32) as *mut u32)
}; };
let information_register = Volatile::new_read_only(information_register_ref); let information_register = Volatile::new_read_only(information_register_ref);
@ -58,8 +58,8 @@ impl HPET {
for i in 0..num_comparator { for i in 0..num_comparator {
let comp = Volatile::new(unsafe { let comp = Volatile::new(unsafe {
&mut *(crate::mm::address::phys_to_virt(base_address + 0x100 + i as usize * 0x20) &mut *(phys_to_virt(base_address + 0x100 + i as usize * 0x20) as *mut usize
as *mut usize as *mut HPETTimerRegister) as *mut HPETTimerRegister)
}); });
comparators.push(comp); comparators.push(comp);
} }

View File

@ -1,4 +1,4 @@
use crate::mm; use crate::vm;
use log::debug; use log::debug;
use spin::{Mutex, Once}; use spin::{Mutex, Once};
use x86::apic::xapic; use x86::apic::xapic;
@ -47,7 +47,7 @@ pub(crate) fn has_apic() -> bool {
pub(crate) fn init() { pub(crate) fn init() {
super::pic::disable_temp(); super::pic::disable_temp();
let mut apic = XAPIC::new(mm::address::phys_to_virt(get_apic_base_address())); let mut apic = XAPIC::new(vm::phys_to_virt(get_apic_base_address()));
// enable apic // enable apic
set_apic_base_address(get_apic_base_address()); set_apic_base_address(get_apic_base_address());

View File

@ -22,7 +22,6 @@ pub mod device;
mod driver; mod driver;
mod error; mod error;
pub mod logger; pub mod logger;
pub mod mm;
pub mod prelude; pub mod prelude;
pub mod sync; pub mod sync;
pub mod task; pub mod task;
@ -58,7 +57,7 @@ pub fn init() {
device::serial::init(); device::serial::init();
logger::init(); logger::init();
boot::init(); boot::init();
mm::init(); vm::init();
trap::init(); trap::init();
device::init(); device::init();
driver::init(); driver::init();

View File

@ -1,247 +0,0 @@
use core::ops::{Add, AddAssign, Sub, SubAssign};
use alloc::fmt;
use crate::config::{PAGE_SIZE, PHYS_OFFSET};
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
#[repr(transparent)]
pub struct PhysAddr(pub usize);
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
#[repr(transparent)]
pub struct VirtAddr(pub usize);
pub const fn phys_to_virt(pa: usize) -> usize {
pa + PHYS_OFFSET
}
pub const fn virt_to_phys(va: usize) -> usize {
va - PHYS_OFFSET
}
impl PhysAddr {
pub const fn kvaddr(self) -> VirtAddr {
VirtAddr(phys_to_virt(self.0))
}
pub const fn align_down(self) -> Self {
Self(align_down(self.0))
}
pub const fn align_up(self) -> Self {
Self(align_up(self.0))
}
pub const fn page_offset(self) -> usize {
page_offset(self.0)
}
pub const fn is_aligned(self) -> bool {
is_aligned(self.0)
}
}
impl fmt::Debug for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("PhysAddr")
.field(&format_args!("{:#x}", self.0))
.finish()
}
}
impl fmt::Binary for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Binary::fmt(&self.0, f)
}
}
impl fmt::LowerHex for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::LowerHex::fmt(&self.0, f)
}
}
impl fmt::Octal for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Octal::fmt(&self.0, f)
}
}
impl fmt::UpperHex for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::UpperHex::fmt(&self.0, f)
}
}
impl fmt::Pointer for PhysAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&(self.0 as *const ()), f)
}
}
impl Add<usize> for PhysAddr {
type Output = Self;
#[inline]
fn add(self, rhs: usize) -> Self::Output {
PhysAddr(self.0 + rhs)
}
}
impl AddAssign<usize> for PhysAddr {
#[inline]
fn add_assign(&mut self, rhs: usize) {
*self = *self + rhs;
}
}
impl Sub<usize> for PhysAddr {
type Output = Self;
#[inline]
fn sub(self, rhs: usize) -> Self::Output {
PhysAddr(self.0 - rhs)
}
}
impl SubAssign<usize> for PhysAddr {
#[inline]
fn sub_assign(&mut self, rhs: usize) {
*self = *self - rhs;
}
}
impl Sub<PhysAddr> for PhysAddr {
type Output = u64;
#[inline]
fn sub(self, rhs: PhysAddr) -> Self::Output {
self.0.checked_sub(rhs.0).unwrap().try_into().unwrap()
}
}
impl VirtAddr {
pub const fn as_ptr(self) -> *mut u8 {
self.0 as _
}
pub const fn align_down(self) -> Self {
Self(align_down(self.0))
}
pub const fn align_up(self) -> Self {
Self(align_up(self.0))
}
pub const fn page_offset(self) -> usize {
page_offset(self.0)
}
pub const fn is_aligned(self) -> bool {
is_aligned(self.0)
}
}
impl VirtAddr {
pub fn get_bytes_array(&self) -> &'static mut [u8] {
unsafe { core::slice::from_raw_parts_mut(self.0 as *mut u8, 4096) }
}
pub fn get_ref<T>(&self) -> &'static T {
unsafe { (self.0 as *const T).as_ref().unwrap() }
}
pub fn get_mut<T>(&self) -> &'static mut T {
unsafe { (self.0 as *mut T).as_mut().unwrap() }
}
}
impl fmt::Debug for VirtAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("VirtAddr")
.field(&format_args!("{:#x}", self.0))
.finish()
}
}
impl fmt::Binary for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Binary::fmt(&self.0, f)
}
}
impl fmt::LowerHex for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::LowerHex::fmt(&self.0, f)
}
}
impl fmt::Octal for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Octal::fmt(&self.0, f)
}
}
impl fmt::UpperHex for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::UpperHex::fmt(&self.0, f)
}
}
impl fmt::Pointer for VirtAddr {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&(self.0 as *const ()), f)
}
}
impl Add<usize> for VirtAddr {
type Output = Self;
#[inline]
fn add(self, rhs: usize) -> Self::Output {
VirtAddr(self.0 + rhs)
}
}
impl AddAssign<usize> for VirtAddr {
#[inline]
fn add_assign(&mut self, rhs: usize) {
*self = *self + rhs;
}
}
impl Sub<usize> for VirtAddr {
type Output = Self;
#[inline]
fn sub(self, rhs: usize) -> Self::Output {
VirtAddr(self.0 - rhs)
}
}
impl SubAssign<usize> for VirtAddr {
#[inline]
fn sub_assign(&mut self, rhs: usize) {
*self = *self - rhs;
}
}
impl Sub<VirtAddr> for VirtAddr {
type Output = u64;
#[inline]
fn sub(self, rhs: VirtAddr) -> Self::Output {
self.0.checked_sub(rhs.0).unwrap().try_into().unwrap()
}
}
pub const fn align_down(p: usize) -> usize {
p & !(PAGE_SIZE - 1)
}
pub const fn align_up(p: usize) -> usize {
(p + PAGE_SIZE - 1) & !(PAGE_SIZE - 1)
}
pub const fn page_offset(p: usize) -> usize {
p & (PAGE_SIZE - 1)
}
pub const fn is_aligned(p: usize) -> bool {
page_offset(p) == 0
}

View File

@ -1,64 +0,0 @@
//! memory management.
pub mod address;
mod frame_allocator;
mod heap_allocator;
mod memory_set;
pub(crate) mod page_table;
pub use self::{
frame_allocator::PhysFrame,
memory_set::{MapArea, MemorySet},
page_table::{translate_not_offset_virtual_address, PageTable},
};
use address::PhysAddr;
use address::VirtAddr;
use alloc::vec::Vec;
use limine::LimineMemmapRequest;
use log::debug;
use spin::Once;
bitflags::bitflags! {
/// Possible flags for a page table entry.
pub struct PTFlags: usize {
/// Specifies whether the mapped frame or page table is loaded in memory.
const PRESENT = 1;
/// Controls whether writes to the mapped frames are allowed.
const WRITABLE = 1 << 1;
/// Controls whether accesses from userspace (i.e. ring 3) are permitted.
const USER = 1 << 2;
/// If this bit is set, a “write-through” policy is used for the cache, else a “write-back”
/// policy is used.
const WRITE_THROUGH = 1 << 3;
/// Disables caching for the pointed entry is cacheable.
const NO_CACHE = 1 << 4;
/// Indicates that the mapping is present in all address spaces, so it isn't flushed from
/// the TLB on an address space switch.
const GLOBAL = 1 << 8;
/// Forbid execute codes on the page. The NXE bits in EFER msr must be set.
const NO_EXECUTE = 1 << 63;
}
}
/// Only available inside jinux-frame
pub(crate) static MEMORY_REGIONS: Once<Vec<&limine::LimineMemmapEntry>> = Once::new();
static MEMMAP_REQUEST: LimineMemmapRequest = LimineMemmapRequest::new(0);
pub(crate) fn init() {
heap_allocator::init();
let mut memory_regions = Vec::new();
let response = MEMMAP_REQUEST
.get_response()
.get()
.expect("Not found memory region information");
for i in response.memmap() {
debug!("Found memory region:{:x?}", **i);
memory_regions.push(&**i);
}
frame_allocator::init(&memory_regions);
page_table::init();
MEMORY_REGIONS.call_once(|| memory_regions);
}

View File

@ -132,7 +132,7 @@ impl Task {
result.task_inner.lock().task_status = TaskStatus::Runnable; result.task_inner.lock().task_status = TaskStatus::Runnable;
result.task_inner.lock().ctx.rip = kernel_task_entry as usize; result.task_inner.lock().ctx.rip = kernel_task_entry as usize;
result.task_inner.lock().ctx.regs.rsp = result.task_inner.lock().ctx.regs.rsp =
(result.kstack.frame.end_pa().unwrap().kvaddr().0) as u64; (crate::vm::phys_to_virt(result.kstack.frame.end_pa().unwrap())) as u64;
let arc_self = Arc::new(result); let arc_self = Arc::new(result);
switch_to_task(arc_self.clone()); switch_to_task(arc_self.clone());
@ -172,7 +172,7 @@ impl Task {
result.task_inner.lock().task_status = TaskStatus::Runnable; result.task_inner.lock().task_status = TaskStatus::Runnable;
result.task_inner.lock().ctx.rip = kernel_task_entry as usize; result.task_inner.lock().ctx.rip = kernel_task_entry as usize;
result.task_inner.lock().ctx.regs.rsp = result.task_inner.lock().ctx.regs.rsp =
(result.kstack.frame.end_pa().unwrap().kvaddr().0) as u64; (crate::vm::phys_to_virt(result.kstack.frame.end_pa().unwrap())) as u64;
Ok(Arc::new(result)) Ok(Arc::new(result))
} }

View File

@ -1,12 +1,12 @@
use core::iter::Iterator; use core::iter::Iterator;
use crate::{config::PAGE_SIZE, mm::address::PhysAddr, prelude::*, Error}; use crate::{config::PAGE_SIZE, prelude::*, Error};
use pod::Pod; use pod::Pod;
use super::VmIo; use super::{Paddr, VmIo};
use alloc::vec; use alloc::vec;
use crate::mm::PhysFrame; use super::frame_allocator::PhysFrame;
/// A collection of page frames (physical memory pages). /// A collection of page frames (physical memory pages).
/// ///
@ -66,9 +66,9 @@ impl VmFrameVec {
} }
/// get the end pa of the collection /// get the end pa of the collection
pub fn end_pa(&self) -> Option<PhysAddr> { pub fn end_pa(&self) -> Option<Paddr> {
if let Some(frame) = self.0.last() { if let Some(frame) = self.0.last() {
Some(PhysAddr(frame.paddr() + PAGE_SIZE)) Some(frame.paddr() + PAGE_SIZE)
} else { } else {
None None
} }
@ -148,7 +148,7 @@ impl VmIo for VmFrameVec {
} else { } else {
let copy_len = (PAGE_SIZE - start).min(remain); let copy_len = (PAGE_SIZE - start).min(remain);
let src = &mut buf[processed..processed + copy_len]; let src = &mut buf[processed..processed + copy_len];
let dst = &pa.start_pa().kvaddr().get_bytes_array()[start..src.len() + start]; let dst = unsafe { &pa.as_slice()[start..src.len() + start] };
src.copy_from_slice(dst); src.copy_from_slice(dst);
processed += copy_len; processed += copy_len;
remain -= copy_len; remain -= copy_len;
@ -171,7 +171,7 @@ impl VmIo for VmFrameVec {
} else { } else {
let copy_len = (PAGE_SIZE - start).min(remain); let copy_len = (PAGE_SIZE - start).min(remain);
let src = &buf[processed..processed + copy_len]; let src = &buf[processed..processed + copy_len];
let dst = &mut pa.start_pa().kvaddr().get_bytes_array()[start..src.len() + start]; let dst = unsafe { &mut pa.as_slice()[start..src.len() + start] };
dst.copy_from_slice(src); dst.copy_from_slice(src);
processed += copy_len; processed += copy_len;
remain -= copy_len; remain -= copy_len;
@ -321,10 +321,17 @@ impl VmFrame {
/// Allocate a new VmFrame filled with zero /// Allocate a new VmFrame filled with zero
pub(crate) fn alloc_zero() -> Option<Self> { pub(crate) fn alloc_zero() -> Option<Self> {
let phys = PhysFrame::alloc_zero(); let phys = PhysFrame::alloc();
if phys.is_none() { if phys.is_none() {
return None; return None;
} }
unsafe {
core::ptr::write_bytes(
super::phys_to_virt(phys.as_ref().unwrap().start_pa()) as *mut u8,
0,
PAGE_SIZE,
)
};
Some(Self { Some(Self {
physical_frame: Arc::new(phys.unwrap()), physical_frame: Arc::new(phys.unwrap()),
}) })
@ -342,18 +349,28 @@ impl VmFrame {
/// Returns the physical address of the page frame. /// Returns the physical address of the page frame.
pub fn paddr(&self) -> Paddr { pub fn paddr(&self) -> Paddr {
self.physical_frame.start_pa().0 self.physical_frame.start_pa()
} }
/// fill the frame with zero /// fill the frame with zero
pub fn zero(&self) { pub fn zero(&self) {
unsafe { core::ptr::write_bytes(self.start_pa().kvaddr().as_ptr(), 0, PAGE_SIZE) } unsafe {
core::ptr::write_bytes(
super::phys_to_virt(self.start_pa()) as *mut u8,
0,
PAGE_SIZE,
)
}
} }
pub fn start_pa(&self) -> PhysAddr { pub fn start_pa(&self) -> Paddr {
self.physical_frame.start_pa() self.physical_frame.start_pa()
} }
pub fn end_pa(&self) -> Paddr {
self.physical_frame.end_pa()
}
/// Returns whether the page frame is accessible by DMA. /// Returns whether the page frame is accessible by DMA.
/// ///
/// In a TEE environment, DMAable pages are untrusted pages shared with /// In a TEE environment, DMAable pages are untrusted pages shared with
@ -361,6 +378,10 @@ impl VmFrame {
pub fn can_dma(&self) -> bool { pub fn can_dma(&self) -> bool {
todo!() todo!()
} }
pub unsafe fn as_slice(&self) -> &mut [u8] {
core::slice::from_raw_parts_mut(super::phys_to_virt(self.start_pa()) as *mut u8, PAGE_SIZE)
}
} }
impl VmIo for VmFrame { impl VmIo for VmFrame {
@ -368,7 +389,7 @@ impl VmIo for VmFrame {
if offset >= PAGE_SIZE || buf.len() + offset > PAGE_SIZE { if offset >= PAGE_SIZE || buf.len() + offset > PAGE_SIZE {
Err(Error::InvalidArgs) Err(Error::InvalidArgs)
} else { } else {
let dst = &self.start_pa().kvaddr().get_bytes_array()[offset..buf.len() + offset]; let dst = unsafe { &self.as_slice()[offset..buf.len() + offset] };
buf.copy_from_slice(dst); buf.copy_from_slice(dst);
Ok(()) Ok(())
} }
@ -378,7 +399,7 @@ impl VmIo for VmFrame {
if offset >= PAGE_SIZE || buf.len() + offset > PAGE_SIZE { if offset >= PAGE_SIZE || buf.len() + offset > PAGE_SIZE {
Err(Error::InvalidArgs) Err(Error::InvalidArgs)
} else { } else {
let dst = &mut self.start_pa().kvaddr().get_bytes_array()[offset..buf.len() + offset]; let dst = unsafe { &mut self.as_slice()[offset..buf.len() + offset] };
dst.copy_from_slice(buf); dst.copy_from_slice(buf);
Ok(()) Ok(())
} }
@ -387,14 +408,14 @@ impl VmIo for VmFrame {
/// Read a value of a specified type at a specified offset. /// Read a value of a specified type at a specified offset.
fn read_val<T: Pod>(&self, offset: usize) -> Result<T> { fn read_val<T: Pod>(&self, offset: usize) -> Result<T> {
let paddr = self.paddr() + offset; let paddr = self.paddr() + offset;
let val = unsafe { &mut *(crate::mm::address::phys_to_virt(paddr) as *mut T) }; let val = unsafe { &mut *(super::phys_to_virt(paddr) as *mut T) };
Ok(*val) Ok(*val)
} }
/// Write a value of a specified type at a specified offset. /// Write a value of a specified type at a specified offset.
fn write_val<T: Pod>(&self, offset: usize, new_val: &T) -> Result<()> { fn write_val<T: Pod>(&self, offset: usize, new_val: &T) -> Result<()> {
let paddr = self.paddr() + offset; let paddr = self.paddr() + offset;
unsafe { (crate::mm::address::phys_to_virt(paddr) as *mut T).write(*new_val) }; unsafe { (super::phys_to_virt(paddr) as *mut T).write(*new_val) };
Ok(()) Ok(())
} }
} }

View File

@ -8,8 +8,6 @@ use spin::{Mutex, Once};
use crate::{config::PAGE_SIZE, vm::Paddr}; use crate::{config::PAGE_SIZE, vm::Paddr};
use super::address::PhysAddr;
static FRAME_ALLOCATOR: Once<Mutex<FrameAllocator>> = Once::new(); static FRAME_ALLOCATOR: Once<Mutex<FrameAllocator>> = Once::new();
bitflags::bitflags! { bitflags::bitflags! {
@ -25,12 +23,12 @@ pub struct PhysFrame {
} }
impl PhysFrame { impl PhysFrame {
pub const fn start_pa(&self) -> PhysAddr { pub const fn start_pa(&self) -> Paddr {
PhysAddr(self.frame_index() * PAGE_SIZE) self.frame_index() * PAGE_SIZE
} }
pub const fn end_pa(&self) -> PhysAddr { pub const fn end_pa(&self) -> Paddr {
PhysAddr((self.frame_index() + 1) * PAGE_SIZE) (self.frame_index() + 1) * PAGE_SIZE
} }
pub fn alloc() -> Option<Self> { pub fn alloc() -> Option<Self> {
@ -68,20 +66,6 @@ impl PhysFrame {
}) })
} }
pub fn alloc_zero() -> Option<Self> {
let mut f = Self::alloc()?;
f.zero();
Some(f)
}
pub fn zero(&mut self) {
unsafe { core::ptr::write_bytes(self.start_pa().kvaddr().as_ptr(), 0, PAGE_SIZE) }
}
pub fn as_slice(&self) -> &mut [u8] {
unsafe { core::slice::from_raw_parts_mut(self.start_pa().kvaddr().as_ptr(), PAGE_SIZE) }
}
const fn need_dealloc(&self) -> bool { const fn need_dealloc(&self) -> bool {
(self.frame_index & PhysFrameFlags::NEED_DEALLOC.bits()) != 0 (self.frame_index & PhysFrameFlags::NEED_DEALLOC.bits()) != 0
} }

View File

@ -1,29 +1,24 @@
use super::{page_table::PageTable, *}; use super::page_table::{PTFlags, PageTable};
use crate::prelude::*;
use crate::{ use crate::{
config::PAGE_SIZE, config::PAGE_SIZE,
mm::address::is_aligned, vm::is_aligned,
vm::{VmFrame, VmFrameVec}, vm::{VmFrame, VmFrameVec},
*,
}; };
use crate::{prelude::*, Error};
use alloc::collections::{btree_map::Entry, BTreeMap}; use alloc::collections::{btree_map::Entry, BTreeMap};
use core::fmt; use core::fmt;
pub struct MapArea { pub struct MapArea {
/// flags
pub flags: PTFlags, pub flags: PTFlags,
/// start virtual address pub start_va: Vaddr,
pub start_va: VirtAddr,
/// the size of these area
pub size: usize, pub size: usize,
/// all the map information pub mapper: BTreeMap<Vaddr, VmFrame>,
pub mapper: BTreeMap<VirtAddr, VmFrame>,
} }
pub struct MemorySet { pub struct MemorySet {
pub pt: PageTable, pub pt: PageTable,
/// all the map area, sort by the start virtual address /// all the map area, sort by the start virtual address
areas: BTreeMap<VirtAddr, MapArea>, areas: BTreeMap<Vaddr, MapArea>,
} }
impl MapArea { impl MapArea {
@ -34,10 +29,11 @@ impl MapArea {
pub fn clone(&self) -> Self { pub fn clone(&self) -> Self {
let mut mapper = BTreeMap::new(); let mut mapper = BTreeMap::new();
for (&va, old) in &self.mapper { for (&va, old) in &self.mapper {
let new = PhysFrame::alloc().unwrap(); let new = VmFrame::alloc().unwrap();
new.as_slice() unsafe {
.copy_from_slice(old.physical_frame.as_slice()); new.as_slice().copy_from_slice(old.as_slice());
mapper.insert(va, unsafe { VmFrame::new(new) }); }
mapper.insert(va, new.clone());
} }
Self { Self {
start_va: self.start_va, start_va: self.start_va,
@ -48,16 +44,9 @@ impl MapArea {
} }
/// This function will map the vitural address to the given physical frames /// This function will map the vitural address to the given physical frames
pub fn new( pub fn new(start_va: Vaddr, size: usize, flags: PTFlags, physical_frames: VmFrameVec) -> Self {
start_va: VirtAddr,
size: usize,
flags: PTFlags,
physical_frames: VmFrameVec,
) -> Self {
assert!( assert!(
start_va.is_aligned() is_aligned(start_va) && is_aligned(size) && physical_frames.len() == (size / PAGE_SIZE)
&& is_aligned(size)
&& physical_frames.len() == (size / PAGE_SIZE)
); );
let mut map_area = Self { let mut map_area = Self {
@ -79,8 +68,8 @@ impl MapArea {
map_area map_area
} }
pub fn map_with_physical_address(&mut self, va: VirtAddr, pa: VmFrame) -> PhysAddr { pub fn map_with_physical_address(&mut self, va: Vaddr, pa: VmFrame) -> Paddr {
assert!(va.is_aligned()); assert!(is_aligned(va));
match self.mapper.entry(va) { match self.mapper.entry(va) {
Entry::Occupied(e) => panic!("already mapped a input physical address"), Entry::Occupied(e) => panic!("already mapped a input physical address"),
@ -88,8 +77,9 @@ impl MapArea {
} }
} }
pub fn map(&mut self, va: VirtAddr) -> PhysAddr { pub fn map(&mut self, va: Vaddr) -> Paddr {
assert!(va.is_aligned()); assert!(is_aligned(va));
match self.mapper.entry(va) { match self.mapper.entry(va) {
Entry::Occupied(e) => e.get().physical_frame.start_pa(), Entry::Occupied(e) => e.get().physical_frame.start_pa(),
Entry::Vacant(e) => e Entry::Vacant(e) => e
@ -99,7 +89,7 @@ impl MapArea {
} }
} }
pub fn unmap(&mut self, va: VirtAddr) -> Option<VmFrame> { pub fn unmap(&mut self, va: Vaddr) -> Option<VmFrame> {
self.mapper.remove(&va) self.mapper.remove(&va)
} }
@ -108,19 +98,18 @@ impl MapArea {
let mut remain = data.len(); let mut remain = data.len();
let mut processed = 0; let mut processed = 0;
for (va, pa) in self.mapper.iter() { for (va, pa) in self.mapper.iter() {
if current_start_address >= va.0 && current_start_address < va.0 + PAGE_SIZE { if current_start_address >= *va && current_start_address < va + PAGE_SIZE {
let offset = current_start_address - va.0; let offset = current_start_address - va;
let copy_len = (va.0 + PAGE_SIZE - current_start_address).min(remain); let copy_len = (va + PAGE_SIZE - current_start_address).min(remain);
let src = &data[processed..processed + copy_len]; let src = &data[processed..processed + copy_len];
let dst = let dst = unsafe { &mut pa.as_slice()[offset..(offset + copy_len)] };
&mut pa.start_pa().kvaddr().get_bytes_array()[offset..(offset + copy_len)];
dst.copy_from_slice(src); dst.copy_from_slice(src);
processed += copy_len; processed += copy_len;
remain -= copy_len; remain -= copy_len;
if remain == 0 { if remain == 0 {
return; return;
} }
current_start_address = va.0 + PAGE_SIZE; current_start_address = va + PAGE_SIZE;
} }
} }
} }
@ -130,41 +119,23 @@ impl MapArea {
let mut remain = data.len(); let mut remain = data.len();
let mut processed = 0; let mut processed = 0;
for (va, pa) in self.mapper.iter() { for (va, pa) in self.mapper.iter() {
if start >= va.0 && start < va.0 + PAGE_SIZE { if start >= *va && start < va + PAGE_SIZE {
let offset = start - va.0; let offset = start - va;
let copy_len = (va.0 + PAGE_SIZE - start).min(remain); let copy_len = (va + PAGE_SIZE - start).min(remain);
let src = &mut data[processed..processed + copy_len]; let src = &mut data[processed..processed + copy_len];
let dst = &pa.start_pa().kvaddr().get_bytes_array()[offset..(offset + copy_len)]; let dst = unsafe { &pa.as_slice()[offset..(offset + copy_len)] };
src.copy_from_slice(dst); src.copy_from_slice(dst);
processed += copy_len; processed += copy_len;
remain -= copy_len; remain -= copy_len;
if remain == 0 { if remain == 0 {
return; return;
} }
start = va.0 + PAGE_SIZE; start = va + PAGE_SIZE;
} }
} }
} }
} }
// impl Clone for MapArea {
// fn clone(&self) -> Self {
// let mut mapper = BTreeMap::new();
// for (&va, old) in &self.mapper {
// let new = VmFrame::alloc().unwrap();
// new.physical_frame
// .exclusive_access()
// .as_slice()
// .copy_from_slice(old.physical_frame.exclusive_access().as_slice());
// mapper.insert(va, new);
// }
// Self {
// flags: self.flags,
// mapper,
// }
// }
// }
impl MemorySet { impl MemorySet {
pub fn map(&mut self, area: MapArea) { pub fn map(&mut self, area: MapArea) {
if area.size > 0 { if area.size > 0 {
@ -180,8 +151,8 @@ impl MemorySet {
} }
} }
/// determine whether a virtaddr is in a mapped area /// determine whether a Vaddr is in a mapped area
pub fn is_mapped(&self, vaddr: VirtAddr) -> bool { pub fn is_mapped(&self, vaddr: Vaddr) -> bool {
for (start_address, map_area) in self.areas.iter() { for (start_address, map_area) in self.areas.iter() {
if *start_address > vaddr { if *start_address > vaddr {
break; break;
@ -200,7 +171,7 @@ impl MemorySet {
} }
} }
pub fn unmap(&mut self, va: VirtAddr) -> Result<()> { pub fn unmap(&mut self, va: Vaddr) -> Result<()> {
if let Some(area) = self.areas.remove(&va) { if let Some(area) = self.areas.remove(&va) {
self.pt.unmap_area(&area); self.pt.unmap_area(&area);
Ok(()) Ok(())
@ -222,11 +193,11 @@ impl MemorySet {
let start_write = false; let start_write = false;
let mut offset = 0usize; let mut offset = 0usize;
for (va, area) in self.areas.iter_mut() { for (va, area) in self.areas.iter_mut() {
if current_addr >= va.0 && current_addr < area.size + va.0 { if current_addr >= *va && current_addr < area.size + va {
if !area.flags.contains(PTFlags::WRITABLE) { if !area.flags.contains(PTFlags::WRITABLE) {
return Err(Error::PageFault); return Err(Error::PageFault);
} }
let write_len = remain.min(area.size + va.0 - current_addr); let write_len = remain.min(area.size + va - current_addr);
area.write_data(current_addr, &data[offset..(offset + write_len)]); area.write_data(current_addr, &data[offset..(offset + write_len)]);
offset += write_len; offset += write_len;
remain -= write_len; remain -= write_len;
@ -234,7 +205,7 @@ impl MemorySet {
if remain == 0 { if remain == 0 {
return Ok(()); return Ok(());
} }
current_addr = va.0 + area.size; current_addr = va + area.size;
} else if start_write { } else if start_write {
return Err(Error::PageFault); return Err(Error::PageFault);
} }
@ -248,8 +219,8 @@ impl MemorySet {
let mut offset = 0usize; let mut offset = 0usize;
let start_read = false; let start_read = false;
for (va, area) in self.areas.iter() { for (va, area) in self.areas.iter() {
if current_addr >= va.0 && current_addr < area.size + va.0 { if current_addr >= *va && current_addr < area.size + va {
let read_len = remain.min(area.size + va.0 - current_addr); let read_len = remain.min(area.size + va - current_addr);
area.read_data(current_addr, &mut data[offset..(offset + read_len)]); area.read_data(current_addr, &mut data[offset..(offset + read_len)]);
remain -= read_len; remain -= read_len;
offset += read_len; offset += read_len;
@ -257,7 +228,7 @@ impl MemorySet {
if remain == 0 { if remain == 0 {
return Ok(()); return Ok(());
} }
current_addr = va.0 + area.size; current_addr = va + area.size;
} else if start_read { } else if start_read {
return Err(Error::PageFault); return Err(Error::PageFault);
} }
@ -266,7 +237,7 @@ impl MemorySet {
} }
pub fn protect(&mut self, addr: Vaddr, flags: PTFlags) { pub fn protect(&mut self, addr: Vaddr, flags: PTFlags) {
let va = VirtAddr(addr); let va = addr;
self.pt.protect(va, flags) self.pt.protect(va, flags)
} }
} }
@ -303,35 +274,3 @@ impl fmt::Debug for MemorySet {
.finish() .finish()
} }
} }
// pub fn load_app(elf_data: &[u8]) -> (usize, MemorySet) {
// let elf = ElfFile::new(elf_data).expect("invalid ELF file");
// assert_eq!(elf.header.pt1.class(), header::Class::SixtyFour, "64-bit ELF required");
// assert_eq!(elf.header.pt2.type_().as_type(), header::Type::Executable, "ELF is not an executable object");
// assert_eq!(elf.header.pt2.machine().as_machine(), header::Machine::X86_64, "invalid ELF arch");
// let mut ms = MemorySet::new();
// for ph in elf.program_iter() {
// if ph.get_type() != Ok(Type::Load) {
// continue;
// }
// let va = VirtAddr(ph.virtual_addr() as _);
// let offset = va.page_offset();
// let area_start = va.align_down();
// let area_end = VirtAddr((ph.virtual_addr() + ph.mem_size()) as _).align_up();
// let data = match ph.get_data(&elf).unwrap() {
// SegmentData::Undefined(data) => data,
// _ => panic!("failed to get ELF segment data"),
// };
// let mut flags = PTFlags::PRESENT | PTFlags::USER;
// if ph.flags().is_write() {
// flags |= PTFlags::WRITABLE;
// }
// let mut area = MapArea::new(area_start, area_end.0 - area_start.0, flags);
// area.write_data(offset, data);
// ms.insert(area);
// }
// ms.insert(MapArea::new(VirtAddr(USTACK_TOP - USTACK_SIZE), USTACK_SIZE,
// PTFlags::PRESENT | PTFlags::WRITABLE | PTFlags::USER));
// (elf.header.pt2.entry_point() as usize, ms)
// }

View File

@ -7,10 +7,72 @@ pub type Vaddr = usize;
pub type Paddr = usize; pub type Paddr = usize;
mod frame; mod frame;
mod frame_allocator;
mod heap_allocator;
mod io; mod io;
mod memory_set;
mod offset; mod offset;
pub(crate) mod page_table;
mod space; mod space;
use crate::config::{PAGE_SIZE, PHYS_OFFSET};
pub use self::frame::{VmAllocOptions, VmFrame, VmFrameVec, VmFrameVecIter}; pub use self::frame::{VmAllocOptions, VmFrame, VmFrameVec, VmFrameVecIter};
pub use self::io::VmIo; pub use self::io::VmIo;
pub use self::space::{VmMapOptions, VmPerm, VmSpace}; pub use self::space::{VmMapOptions, VmPerm, VmSpace};
pub use self::{
memory_set::{MapArea, MemorySet},
page_table::{translate_not_offset_virtual_address, PageTable},
};
use alloc::vec::Vec;
use limine::LimineMemmapRequest;
use log::debug;
use spin::Once;
pub const fn phys_to_virt(pa: usize) -> usize {
pa + PHYS_OFFSET
}
pub const fn virt_to_phys(va: usize) -> usize {
va - PHYS_OFFSET
}
pub const fn align_down(p: usize) -> usize {
p & !(PAGE_SIZE - 1)
}
pub const fn align_up(p: usize) -> usize {
(p + PAGE_SIZE - 1) & !(PAGE_SIZE - 1)
}
pub const fn page_offset(p: usize) -> usize {
p & (PAGE_SIZE - 1)
}
pub const fn is_aligned(p: usize) -> bool {
page_offset(p) == 0
}
/// Only available inside jinux-frame
pub(crate) static MEMORY_REGIONS: Once<Vec<&limine::LimineMemmapEntry>> = Once::new();
static MEMMAP_REQUEST: LimineMemmapRequest = LimineMemmapRequest::new(0);
pub(crate) fn init() {
heap_allocator::init();
let mut memory_regions = Vec::new();
let response = MEMMAP_REQUEST
.get_response()
.get()
.expect("Not found memory region information");
for i in response.memmap() {
debug!("Found memory region:{:x?}", **i);
memory_regions.push(&**i);
}
frame_allocator::init(&memory_regions);
page_table::init();
MEMORY_REGIONS.call_once(|| memory_regions);
}

View File

@ -1,11 +1,10 @@
use super::{ use super::{
address::{PhysAddr, VirtAddr}, align_down,
memory_set::MapArea, memory_set::MapArea,
PTFlags, {Paddr, Vaddr},
}; };
use crate::{ use crate::{
config::{ENTRY_COUNT, PAGE_SIZE, PHYS_OFFSET}, config::{ENTRY_COUNT, PAGE_SIZE, PHYS_OFFSET},
println,
vm::VmFrame, vm::VmFrame,
}; };
use alloc::{collections::BTreeMap, vec, vec::Vec}; use alloc::{collections::BTreeMap, vec, vec::Vec};
@ -18,6 +17,28 @@ lazy_static! {
Mutex::new(BTreeMap::new()); Mutex::new(BTreeMap::new());
} }
bitflags::bitflags! {
/// Possible flags for a page table entry.
pub struct PTFlags: usize {
/// Specifies whether the mapped frame or page table is loaded in memory.
const PRESENT = 1;
/// Controls whether writes to the mapped frames are allowed.
const WRITABLE = 1 << 1;
/// Controls whether accesses from userspace (i.e. ring 3) are permitted.
const USER = 1 << 2;
/// If this bit is set, a “write-through” policy is used for the cache, else a “write-back”
/// policy is used.
const WRITE_THROUGH = 1 << 3;
/// Disables caching for the pointed entry is cacheable.
const NO_CACHE = 1 << 4;
/// Indicates that the mapping is present in all address spaces, so it isn't flushed from
/// the TLB on an address space switch.
const GLOBAL = 1 << 8;
/// Forbid execute codes on the page. The NXE bits in EFER msr must be set.
const NO_EXECUTE = 1 << 63;
}
}
#[derive(Clone, Copy)] #[derive(Clone, Copy)]
#[repr(transparent)] #[repr(transparent)]
pub struct PageTableEntry(usize); pub struct PageTableEntry(usize);
@ -25,11 +46,11 @@ pub struct PageTableEntry(usize);
impl PageTableEntry { impl PageTableEntry {
const PHYS_ADDR_MASK: usize = !(PAGE_SIZE - 1); const PHYS_ADDR_MASK: usize = !(PAGE_SIZE - 1);
pub const fn new_page(pa: PhysAddr, flags: PTFlags) -> Self { pub const fn new_page(pa: Paddr, flags: PTFlags) -> Self {
Self((pa.0 & Self::PHYS_ADDR_MASK) | flags.bits) Self((pa & Self::PHYS_ADDR_MASK) | flags.bits)
} }
const fn pa(self) -> PhysAddr { const fn pa(self) -> Paddr {
PhysAddr(self.0 as usize & Self::PHYS_ADDR_MASK) self.0 as usize & Self::PHYS_ADDR_MASK
} }
const fn flags(self) -> PTFlags { const fn flags(self) -> PTFlags {
PTFlags::from_bits_truncate(self.0) PTFlags::from_bits_truncate(self.0)
@ -53,7 +74,7 @@ impl fmt::Debug for PageTableEntry {
} }
pub struct PageTable { pub struct PageTable {
pub root_pa: PhysAddr, pub root_pa: Paddr,
/// store all the physical frame that the page table need to map all the frame e.g. the frame of the root_pa /// store all the physical frame that the page table need to map all the frame e.g. the frame of the root_pa
tables: Vec<VmFrame>, tables: Vec<VmFrame>,
} }
@ -72,26 +93,15 @@ impl PageTable {
} }
} }
pub fn print_kernel(&self) { pub fn map(&mut self, va: Vaddr, pa: Paddr, flags: PTFlags) {
let p4 = table_of(self.root_pa);
for i in 0..(256) {
let phys = PhysAddr(i << (12 + 27));
let a = p4[p4_index(phys.kvaddr())];
if a.is_present() {
println!("index:{:?},PTE:{:?}", i, a);
}
}
}
pub fn map(&mut self, va: VirtAddr, pa: PhysAddr, flags: PTFlags) {
let entry = self.get_entry_or_create(va).unwrap(); let entry = self.get_entry_or_create(va).unwrap();
if !entry.is_unused() { if !entry.is_unused() {
panic!("{:#x?} is mapped before mapping", va); panic!("{:#x?} is mapped before mapping", va);
} }
*entry = PageTableEntry::new_page(pa.align_down(), flags); *entry = PageTableEntry::new_page(align_down(pa), flags);
} }
pub fn unmap(&mut self, va: VirtAddr) { pub fn unmap(&mut self, va: Vaddr) {
let entry = get_entry(self.root_pa, va).unwrap(); let entry = get_entry(self.root_pa, va).unwrap();
if entry.is_unused() { if entry.is_unused() {
panic!("{:#x?} is invalid before unmapping", va); panic!("{:#x?} is invalid before unmapping", va);
@ -99,7 +109,7 @@ impl PageTable {
entry.0 = 0; entry.0 = 0;
} }
pub fn protect(&mut self, va: VirtAddr, flags: PTFlags) { pub fn protect(&mut self, va: Vaddr, flags: PTFlags) {
let entry = self.get_entry_or_create(va).unwrap(); let entry = self.get_entry_or_create(va).unwrap();
if entry.is_unused() || !entry.is_present() { if entry.is_unused() || !entry.is_present() {
panic!("{:#x?} is invalid before protect", va); panic!("{:#x?} is invalid before protect", va);
@ -113,7 +123,7 @@ impl PageTable {
pub fn map_area(&mut self, area: &MapArea) { pub fn map_area(&mut self, area: &MapArea) {
for (va, pa) in area.mapper.iter() { for (va, pa) in area.mapper.iter() {
assert!(pa.start_pa().0 < PHYS_OFFSET); assert!(pa.start_pa() < PHYS_OFFSET);
self.map(*va, pa.start_pa(), area.flags); self.map(*va, pa.start_pa(), area.flags);
} }
} }
@ -126,14 +136,14 @@ impl PageTable {
} }
impl PageTable { impl PageTable {
fn alloc_table(&mut self) -> PhysAddr { fn alloc_table(&mut self) -> Paddr {
let frame = VmFrame::alloc_zero().unwrap(); let frame = VmFrame::alloc_zero().unwrap();
let pa = frame.start_pa(); let pa = frame.start_pa();
self.tables.push(frame); self.tables.push(frame);
pa pa
} }
fn get_entry_or_create(&mut self, va: VirtAddr) -> Option<&mut PageTableEntry> { fn get_entry_or_create(&mut self, va: Vaddr) -> Option<&mut PageTableEntry> {
let p4 = table_of(self.root_pa); let p4 = table_of(self.root_pa);
let p4e = &mut p4[p4_index(va)]; let p4e = &mut p4[p4_index(va)];
let p3 = next_table_or_create(p4e, || self.alloc_table())?; let p3 = next_table_or_create(p4e, || self.alloc_table())?;
@ -146,32 +156,23 @@ impl PageTable {
} }
} }
const fn p4_index(va: VirtAddr) -> usize { const fn p4_index(va: Vaddr) -> usize {
(va.0 >> (12 + 27)) & (ENTRY_COUNT - 1) (va >> (12 + 27)) & (ENTRY_COUNT - 1)
} }
const fn p3_index(va: VirtAddr) -> usize { const fn p3_index(va: Vaddr) -> usize {
(va.0 >> (12 + 18)) & (ENTRY_COUNT - 1) (va >> (12 + 18)) & (ENTRY_COUNT - 1)
} }
const fn p2_index(va: VirtAddr) -> usize { const fn p2_index(va: Vaddr) -> usize {
(va.0 >> (12 + 9)) & (ENTRY_COUNT - 1) (va >> (12 + 9)) & (ENTRY_COUNT - 1)
} }
const fn p1_index(va: VirtAddr) -> usize { const fn p1_index(va: Vaddr) -> usize {
(va.0 >> 12) & (ENTRY_COUNT - 1) (va >> 12) & (ENTRY_COUNT - 1)
} }
pub fn query(root_pa: PhysAddr, va: VirtAddr) -> Option<(PhysAddr, PTFlags)> { fn get_entry(root_pa: Paddr, va: Vaddr) -> Option<&'static mut PageTableEntry> {
let entry = get_entry(root_pa, va)?;
if entry.is_unused() {
return None;
}
let off = va.page_offset();
Some((PhysAddr(entry.pa().0 + off), entry.flags()))
}
fn get_entry(root_pa: PhysAddr, va: VirtAddr) -> Option<&'static mut PageTableEntry> {
let p4 = table_of(root_pa); let p4 = table_of(root_pa);
let p4e = &mut p4[p4_index(va)]; let p4e = &mut p4[p4_index(va)];
let p3 = next_table(p4e)?; let p3 = next_table(p4e)?;
@ -183,8 +184,8 @@ fn get_entry(root_pa: PhysAddr, va: VirtAddr) -> Option<&'static mut PageTableEn
Some(p1e) Some(p1e)
} }
fn table_of<'a>(pa: PhysAddr) -> &'a mut [PageTableEntry] { fn table_of<'a>(pa: Paddr) -> &'a mut [PageTableEntry] {
let ptr = pa.kvaddr().as_ptr() as *mut _; let ptr = super::phys_to_virt(pa) as *mut _;
unsafe { core::slice::from_raw_parts_mut(ptr, ENTRY_COUNT) } unsafe { core::slice::from_raw_parts_mut(ptr, ENTRY_COUNT) }
} }
@ -198,7 +199,7 @@ fn next_table<'a>(entry: &PageTableEntry) -> Option<&'a mut [PageTableEntry]> {
fn next_table_or_create<'a>( fn next_table_or_create<'a>(
entry: &mut PageTableEntry, entry: &mut PageTableEntry,
mut alloc: impl FnMut() -> PhysAddr, mut alloc: impl FnMut() -> Paddr,
) -> Option<&'a mut [PageTableEntry]> { ) -> Option<&'a mut [PageTableEntry]> {
if entry.is_unused() { if entry.is_unused() {
let pa = alloc(); let pa = alloc();
@ -215,9 +216,9 @@ pub fn translate_not_offset_virtual_address(address: usize) -> usize {
let (cr3, _) = x86_64::registers::control::Cr3::read(); let (cr3, _) = x86_64::registers::control::Cr3::read();
let cr3 = cr3.start_address().as_u64() as usize; let cr3 = cr3.start_address().as_u64() as usize;
let p4 = table_of(PhysAddr(cr3)); let p4 = table_of(cr3);
let virtual_address = VirtAddr(address); let virtual_address = address;
let pte = p4[p4_index(virtual_address)]; let pte = p4[p4_index(virtual_address)];
let p3 = table_of(pte.pa()); let p3 = table_of(pte.pa());
@ -229,23 +230,20 @@ pub fn translate_not_offset_virtual_address(address: usize) -> usize {
let p1 = table_of(pte.pa()); let p1 = table_of(pte.pa());
let pte = p1[p1_index(virtual_address)]; let pte = p1[p1_index(virtual_address)];
(pte.pa().0 & ((1 << 48) - 1)) + (address & ((1 << 12) - 1)) (pte.pa() & ((1 << 48) - 1)) + (address & ((1 << 12) - 1))
} }
pub(crate) fn init() { pub(crate) fn init() {
let (cr3, _) = x86_64::registers::control::Cr3::read(); let (cr3, _) = x86_64::registers::control::Cr3::read();
let cr3 = cr3.start_address().as_u64() as usize; let cr3 = cr3.start_address().as_u64() as usize;
let p4 = table_of(PhysAddr(cr3)); let p4 = table_of(cr3);
// Cancel mapping in lowest addresses. // Cancel mapping in lowest addresses.
p4[0].0 = 0; p4[0].0 = 0;
// there is mapping where index is 1,2,3, so user may not use these value
let mut map_pte = ALL_MAPPED_PTE.lock(); let mut map_pte = ALL_MAPPED_PTE.lock();
for i in 0..512 { for i in 0..512 {
if p4[i].flags().contains(PTFlags::PRESENT) { if p4[i].flags().contains(PTFlags::PRESENT) {
map_pte.insert(i, p4[i]); map_pte.insert(i, p4[i]);
} }
} }
// Cancel mapping in lowest addresses.
// p4[0].0 = 0;
} }

View File

@ -1,12 +1,13 @@
use crate::config::PAGE_SIZE; use crate::config::PAGE_SIZE;
use crate::vm::page_table::PTFlags;
use bitflags::bitflags; use bitflags::bitflags;
use core::ops::Range; use core::ops::Range;
use spin::Mutex; use spin::Mutex;
use x86_64::structures::paging::PhysFrame; use x86_64::structures::paging::PhysFrame;
use crate::mm::address::{is_aligned, VirtAddr}; use super::VmFrameVec;
use crate::mm::{MapArea, MemorySet, PTFlags}; use super::{is_aligned, Vaddr};
use crate::vm::VmFrameVec; use super::{MapArea, MemorySet};
use crate::{prelude::*, Error}; use crate::{prelude::*, Error};
use super::VmIo; use super::VmIo;
@ -39,7 +40,7 @@ impl VmSpace {
pub unsafe fn activate(&self) { pub unsafe fn activate(&self) {
x86_64::registers::control::Cr3::write( x86_64::registers::control::Cr3::write(
PhysFrame::from_start_address(x86_64::PhysAddr::new( PhysFrame::from_start_address(x86_64::PhysAddr::new(
self.memory_set.lock().pt.root_pa.0 as u64, self.memory_set.lock().pt.root_pa as u64,
)) ))
.unwrap(), .unwrap(),
x86_64::registers::control::Cr3Flags::PAGE_LEVEL_CACHE_DISABLE, x86_64::registers::control::Cr3Flags::PAGE_LEVEL_CACHE_DISABLE,
@ -59,7 +60,7 @@ impl VmSpace {
} }
// debug!("map to vm space: 0x{:x}", options.addr.unwrap()); // debug!("map to vm space: 0x{:x}", options.addr.unwrap());
self.memory_set.lock().map(MapArea::new( self.memory_set.lock().map(MapArea::new(
VirtAddr(options.addr.unwrap()), options.addr.unwrap(),
frames.len() * PAGE_SIZE, frames.len() * PAGE_SIZE,
flags, flags,
frames, frames,
@ -71,7 +72,7 @@ impl VmSpace {
/// determine whether a vaddr is already mapped /// determine whether a vaddr is already mapped
pub fn is_mapped(&self, vaddr: Vaddr) -> bool { pub fn is_mapped(&self, vaddr: Vaddr) -> bool {
let memory_set = self.memory_set.lock(); let memory_set = self.memory_set.lock();
memory_set.is_mapped(VirtAddr(vaddr)) memory_set.is_mapped(vaddr)
} }
/// Unmaps the physical memory pages within the VM address range. /// Unmaps the physical memory pages within the VM address range.
@ -80,7 +81,7 @@ impl VmSpace {
/// are mapped. /// are mapped.
pub fn unmap(&self, range: &Range<Vaddr>) -> Result<()> { pub fn unmap(&self, range: &Range<Vaddr>) -> Result<()> {
assert!(is_aligned(range.start) && is_aligned(range.end)); assert!(is_aligned(range.start) && is_aligned(range.end));
let mut start_va = VirtAddr(range.start); let mut start_va = range.start;
let page_size = (range.end - range.start) / PAGE_SIZE; let page_size = (range.end - range.start) / PAGE_SIZE;
let mut inner = self.memory_set.lock(); let mut inner = self.memory_set.lock();
for i in 0..page_size { for i in 0..page_size {

View File

@ -338,7 +338,7 @@ struct Descriptor {
impl Descriptor { impl Descriptor {
fn set_buf(&mut self, buf: &[u8]) { fn set_buf(&mut self, buf: &[u8]) {
self.addr = self.addr =
jinux_frame::mm::translate_not_offset_virtual_address(buf.as_ptr() as usize) as u64; jinux_frame::vm::translate_not_offset_virtual_address(buf.as_ptr() as usize) as u64;
self.len = buf.len() as u32; self.len = buf.len() as u32;
} }
@ -348,9 +348,9 @@ fn set_buf(inframe_ptr: &InFramePtr<Descriptor>, buf: &[u8]) {
let va = buf.as_ptr() as usize; let va = buf.as_ptr() as usize;
let pa = if va >= jinux_frame::config::PHYS_OFFSET && va <= jinux_frame::config::KERNEL_OFFSET { let pa = if va >= jinux_frame::config::PHYS_OFFSET && va <= jinux_frame::config::KERNEL_OFFSET {
// can use offset // can use offset
jinux_frame::mm::address::virt_to_phys(va) jinux_frame::vm::virt_to_phys(va)
} else { } else {
jinux_frame::mm::translate_not_offset_virtual_address(buf.as_ptr() as usize) jinux_frame::vm::translate_not_offset_virtual_address(buf.as_ptr() as usize)
}; };
debug!("set buf write virt address:{:x}", va); debug!("set buf write virt address:{:x}", va);
debug!("set buf write phys address:{:x}", pa); debug!("set buf write phys address:{:x}", pa);