diff --git a/framework/aster-frame/src/mm/frame/mod.rs b/framework/aster-frame/src/mm/frame/mod.rs index e119d5b71..583e40130 100644 --- a/framework/aster-frame/src/mm/frame/mod.rs +++ b/framework/aster-frame/src/mm/frame/mod.rs @@ -18,7 +18,6 @@ pub use frame_vec::{FrameVec, FrameVecIter}; pub use segment::Segment; use super::page::{ - allocator, meta::{FrameMeta, MetaSlot, PageMeta, PageUsage}, Page, }; @@ -41,7 +40,19 @@ use crate::{ /// will be globally freed. #[derive(Debug, Clone)] pub struct Frame { - pub(in crate::mm) page: Page, + page: Page, +} + +impl From> for Frame { + fn from(page: Page) -> Self { + Self { page } + } +} + +impl From for Page { + fn from(frame: Frame) -> Self { + frame.page + } } impl HasPaddr for Frame { @@ -140,8 +151,9 @@ impl VmIo for Frame { impl PageMeta for FrameMeta { const USAGE: PageUsage = PageUsage::Frame; - fn on_drop(page: &mut Page) { - unsafe { allocator::dealloc(page.paddr() / PAGE_SIZE, 1) }; + fn on_drop(_page: &mut Page) { + // Nothing should be done so far since the dropping the page would + // take all cared. } } diff --git a/framework/aster-frame/src/mm/frame/options.rs b/framework/aster-frame/src/mm/frame/options.rs index 03792509c..a5006a6ba 100644 --- a/framework/aster-frame/src/mm/frame/options.rs +++ b/framework/aster-frame/src/mm/frame/options.rs @@ -3,7 +3,14 @@ //! Options for allocating frames use super::{Frame, FrameVec, Segment}; -use crate::{mm::page::allocator, prelude::*, Error}; +use crate::{ + mm::{ + page::{self, meta::FrameMeta}, + PAGE_SIZE, + }, + prelude::*, + Error, +}; /// Options for allocating physical memory pages (or frames). /// @@ -49,17 +56,14 @@ impl FrameAllocOptions { /// Allocates a collection of page frames according to the given options. pub fn alloc(&self) -> Result { - let frames = if self.is_contiguous { - allocator::alloc(self.nframes).ok_or(Error::NoMemory)? + let pages = if self.is_contiguous { + page::allocator::alloc(self.nframes * PAGE_SIZE).ok_or(Error::NoMemory)? } else { - let mut frame_list = Vec::new(); - for _ in 0..self.nframes { - let page = allocator::alloc_single().ok_or(Error::NoMemory)?; - let frame = Frame { page }; - frame_list.push(frame); - } - FrameVec(frame_list) + page::allocator::alloc_contiguous(self.nframes * PAGE_SIZE) + .ok_or(Error::NoMemory)? + .into() }; + let frames = FrameVec(pages.into_iter().map(|page| Frame { page }).collect()); if !self.uninit { for frame in frames.iter() { frame.writer().fill(0); @@ -75,7 +79,7 @@ impl FrameAllocOptions { return Err(Error::InvalidArgs); } - let page = allocator::alloc_single().ok_or(Error::NoMemory)?; + let page = page::allocator::alloc_single().ok_or(Error::NoMemory)?; let frame = Frame { page }; if !self.uninit { frame.writer().fill(0); @@ -93,7 +97,10 @@ impl FrameAllocOptions { return Err(Error::InvalidArgs); } - let segment = allocator::alloc_contiguous(self.nframes).ok_or(Error::NoMemory)?; + let segment: Segment = + page::allocator::alloc_contiguous::(self.nframes * PAGE_SIZE) + .ok_or(Error::NoMemory)? + .into(); if !self.uninit { segment.writer().fill(0); } diff --git a/framework/aster-frame/src/mm/frame/segment.rs b/framework/aster-frame/src/mm/frame/segment.rs index 40e07dee2..c49e60360 100644 --- a/framework/aster-frame/src/mm/frame/segment.rs +++ b/framework/aster-frame/src/mm/frame/segment.rs @@ -8,7 +8,7 @@ use core::ops::Range; use super::Frame; use crate::{ mm::{ - page::{meta::FrameMeta, Page}, + page::{cont_pages::ContPages, meta::FrameMeta, Page}, HasPaddr, Paddr, VmIo, VmReader, VmWriter, PAGE_SIZE, }, Error, Result, @@ -35,33 +35,10 @@ use crate::{ /// ``` #[derive(Debug, Clone)] pub struct Segment { - inner: Arc, + inner: Arc>, range: Range, } -/// This behaves like a [`Frame`] that owns a list of frame handles. -/// -/// The ownership is acheived by the reference counting mechanism of -/// frames. When constructing a `SegmentInner`, the frame handles are -/// forgotten. When dropping a `SegmentInner`, the frame handles are -/// restored and dropped. -#[derive(Debug)] -struct SegmentInner { - start: Paddr, - nframes: usize, -} - -impl Drop for SegmentInner { - fn drop(&mut self) { - for i in 0..self.nframes { - let pa_i = self.start + i * PAGE_SIZE; - // SAFETY: for each page there would be a forgotten handle - // when creating the `SegmentInner` object. - drop(unsafe { Page::::from_raw(pa_i) }); - } - } -} - impl HasPaddr for Segment { fn paddr(&self) -> Paddr { self.start_paddr() @@ -69,28 +46,6 @@ impl HasPaddr for Segment { } impl Segment { - /// Creates a new `Segment`. - /// - /// # Safety - /// - /// The given range of page frames must be contiguous and valid for use. - /// The given range of page frames must not have been allocated before, - /// as part of either a [`Frame`] or `Segment`. - pub(crate) unsafe fn new(paddr: Paddr, nframes: usize) -> Self { - for i in 0..nframes { - let pa_i = paddr + i * PAGE_SIZE; - let page = Page::::from_unused(pa_i); - core::mem::forget(page); - } - Self { - inner: Arc::new(SegmentInner { - start: paddr, - nframes, - }), - range: 0..nframes, - } - } - /// Returns a part of the `Segment`. /// /// # Panics @@ -129,7 +84,7 @@ impl Segment { } fn start_frame_index(&self) -> usize { - self.inner.start / PAGE_SIZE + self.range.start + self.inner.start_paddr() / PAGE_SIZE + self.range.start } /// Returns a raw pointer to the starting virtual address of the `Segment`. @@ -183,14 +138,19 @@ impl VmIo for Segment { impl From for Segment { fn from(frame: Frame) -> Self { - let paddr = frame.paddr(); - core::mem::forget(frame); Self { - inner: Arc::new(SegmentInner { - start: paddr, - nframes: 1, - }), + inner: Arc::new(Page::::from(frame).into()), range: 0..1, } } } + +impl From> for Segment { + fn from(cont_pages: ContPages) -> Self { + let len = cont_pages.len(); + Self { + inner: Arc::new(cont_pages), + range: 0..len / PAGE_SIZE, + } + } +} diff --git a/framework/aster-frame/src/mm/heap_allocator.rs b/framework/aster-frame/src/mm/heap_allocator.rs index 02e612fb8..b357e54bd 100644 --- a/framework/aster-frame/src/mm/heap_allocator.rs +++ b/framework/aster-frame/src/mm/heap_allocator.rs @@ -11,7 +11,7 @@ use log::debug; use super::paddr_to_vaddr; use crate::{ - mm::{page::allocator::FRAME_ALLOCATOR, PAGE_SIZE}, + mm::{page::allocator::PAGE_ALLOCATOR, PAGE_SIZE}, prelude::*, sync::SpinLock, trap::disable_local, @@ -105,12 +105,12 @@ fn rescue(heap: &LockedHeapWithRescue, layout: &Layou }; let allocation_start = { - let mut frame_allocator = FRAME_ALLOCATOR.get().unwrap().lock(); + let mut page_allocator = PAGE_ALLOCATOR.get().unwrap().lock(); if num_frames >= MIN_NUM_FRAMES { - frame_allocator.alloc(num_frames).ok_or(Error::NoMemory)? + page_allocator.alloc(num_frames).ok_or(Error::NoMemory)? } else { - match frame_allocator.alloc(MIN_NUM_FRAMES) { - None => frame_allocator.alloc(num_frames).ok_or(Error::NoMemory)?, + match page_allocator.alloc(MIN_NUM_FRAMES) { + None => page_allocator.alloc(num_frames).ok_or(Error::NoMemory)?, Some(start) => { num_frames = MIN_NUM_FRAMES; start diff --git a/framework/aster-frame/src/mm/page/allocator.rs b/framework/aster-frame/src/mm/page/allocator.rs index 26556237c..4aab59519 100644 --- a/framework/aster-frame/src/mm/page/allocator.rs +++ b/framework/aster-frame/src/mm/page/allocator.rs @@ -12,72 +12,53 @@ use buddy_system_allocator::FrameAllocator; use log::info; use spin::Once; -use super::{ - meta::{FrameMeta, PageMeta}, - Page, -}; -use crate::{ - boot::memory_region::MemoryRegionType, - mm::{Frame, FrameVec, Segment, PAGE_SIZE}, - sync::SpinLock, -}; +use super::{cont_pages::ContPages, meta::PageMeta, Page}; +use crate::{boot::memory_region::MemoryRegionType, mm::PAGE_SIZE, sync::SpinLock}; -pub(in crate::mm) static FRAME_ALLOCATOR: Once> = Once::new(); +pub(in crate::mm) static PAGE_ALLOCATOR: Once> = Once::new(); -pub(crate) fn alloc(nframes: usize) -> Option { - FRAME_ALLOCATOR - .get() - .unwrap() - .lock() - .alloc(nframes) - .map(|start| { - let mut vector = Vec::new(); - for i in 0..nframes { - let paddr = (start + i) * PAGE_SIZE; - let frame = Frame { - page: Page::::from_unused(paddr), - }; - vector.push(frame); - } - FrameVec(vector) - }) -} - -pub(crate) fn alloc_single() -> Option> { - FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).map(|idx| { +/// Allocate a single page. +pub(crate) fn alloc_single() -> Option> { + PAGE_ALLOCATOR.get().unwrap().lock().alloc(1).map(|idx| { let paddr = idx * PAGE_SIZE; - Page::::from_unused(paddr) + Page::::from_unused(paddr) }) } -pub(crate) fn alloc_contiguous(nframes: usize) -> Option { - FRAME_ALLOCATOR +/// Allocate a contiguous range of pages of a given length in bytes. +/// +/// # Panics +/// +/// The function panics if the length is not base-page-aligned. +pub(crate) fn alloc_contiguous(len: usize) -> Option> { + assert!(len % PAGE_SIZE == 0); + PAGE_ALLOCATOR .get() .unwrap() .lock() - .alloc(nframes) - .map(|start| - // SAFETY: The range of page frames is contiguous and valid. - unsafe { - Segment::new( - start * PAGE_SIZE, - nframes, - ) - }) + .alloc(len / PAGE_SIZE) + .map(|start| ContPages::from_unused(start * PAGE_SIZE..start * PAGE_SIZE + len)) } -/// Deallocates a contiguous range of page frames. +/// Allocate pages. /// -/// # Safety +/// The allocated pages are not guarenteed to be contiguous. +/// The total length of the allocated pages is `len`. /// -/// User should ensure the range of page frames is valid. +/// # Panics /// -pub(crate) unsafe fn dealloc(start_index: usize, nframes: usize) { - FRAME_ALLOCATOR - .get() - .unwrap() - .lock() - .dealloc(start_index, nframes); +/// The function panics if the length is not base-page-aligned. +pub(crate) fn alloc(len: usize) -> Option>> { + assert!(len % PAGE_SIZE == 0); + let nframes = len / PAGE_SIZE; + let mut allocator = PAGE_ALLOCATOR.get().unwrap().lock(); + let mut vector = Vec::new(); + for _ in 0..nframes { + let paddr = allocator.alloc(1)? * PAGE_SIZE; + let page = Page::::from_unused(paddr); + vector.push(page); + } + Some(vector) } pub(crate) fn init() { @@ -101,5 +82,5 @@ pub(crate) fn init() { ); } } - FRAME_ALLOCATOR.call_once(|| SpinLock::new(allocator)); + PAGE_ALLOCATOR.call_once(|| SpinLock::new(allocator)); } diff --git a/framework/aster-frame/src/mm/page/cont_pages.rs b/framework/aster-frame/src/mm/page/cont_pages.rs new file mode 100644 index 000000000..4eae13232 --- /dev/null +++ b/framework/aster-frame/src/mm/page/cont_pages.rs @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! A contiguous range of pages. + +use alloc::vec::Vec; +use core::{mem::ManuallyDrop, ops::Range}; + +use super::{meta::PageMeta, Page}; +use crate::mm::{Paddr, PAGE_SIZE}; + +/// A contiguous range of physical memory pages. +/// +/// This is a handle to many contiguous pages. It will be more lightweight +/// than owning an array of page handles. +/// +/// The ownership is acheived by the reference counting mechanism of pages. +/// When constructing a `ContPages`, the page handles are created then +/// forgotten, leaving the reference count. When dropping a it, the page +/// handles are restored and dropped, decrementing the reference count. +#[derive(Debug)] +pub struct ContPages { + range: Range, + _marker: core::marker::PhantomData, +} + +impl Drop for ContPages { + fn drop(&mut self) { + for i in self.range.clone().step_by(PAGE_SIZE) { + // SAFETY: for each page there would be a forgotten handle + // when creating the `ContPages` object. + drop(unsafe { Page::::from_raw(i) }); + } + } +} + +impl ContPages { + /// Create a new `ContPages` from unused pages. + /// + /// # Panics + /// + /// The function panics if: + /// - the physical address is invalid or not aligned; + /// - any of the pages are already in use. + pub fn from_unused(range: Range) -> Self { + for i in range.clone().step_by(PAGE_SIZE) { + let _ = ManuallyDrop::new(Page::::from_unused(i)); + } + Self { + range, + _marker: core::marker::PhantomData, + } + } + + /// Get the start physical address of the contiguous pages. + pub fn start_paddr(&self) -> Paddr { + self.range.start + } + + /// Get the length in bytes of the contiguous pages. + pub fn len(&self) -> usize { + self.range.end - self.range.start + } +} + +impl From> for ContPages { + fn from(page: Page) -> Self { + let pa = page.paddr(); + let _ = ManuallyDrop::new(page); + Self { + range: pa..pa + PAGE_SIZE, + _marker: core::marker::PhantomData, + } + } +} + +impl From> for Vec> { + fn from(pages: ContPages) -> Self { + let vector = pages + .range + .clone() + .step_by(PAGE_SIZE) + .map(|i| + // SAFETY: for each page there would be a forgotten handle + // when creating the `ContPages` object. + unsafe { Page::::from_raw(i) }) + .collect(); + let _ = ManuallyDrop::new(pages); + vector + } +} diff --git a/framework/aster-frame/src/mm/page/meta.rs b/framework/aster-frame/src/mm/page/meta.rs index 27d25be82..fef24b05f 100644 --- a/framework/aster-frame/src/mm/page/meta.rs +++ b/framework/aster-frame/src/mm/page/meta.rs @@ -50,7 +50,7 @@ use super::Page; use crate::{ arch::mm::{PageTableEntry, PagingConsts}, mm::{ - kspace::BOOT_PAGE_TABLE, paddr_to_vaddr, page::allocator::FRAME_ALLOCATOR, page_size, + kspace::BOOT_PAGE_TABLE, paddr_to_vaddr, page::allocator::PAGE_ALLOCATOR, page_size, page_table::PageTableEntryTrait, CachePolicy, Paddr, PageFlags, PageProperty, PagingConstsTrait, PagingLevel, PrivilegedPageFlags, PAGE_SIZE, }, @@ -230,13 +230,7 @@ pub(crate) fn init() -> Vec> { fn alloc_meta_pages(nframes: usize) -> Vec { let mut meta_pages = Vec::new(); - let start_frame = FRAME_ALLOCATOR - .get() - .unwrap() - .lock() - .alloc(nframes) - .unwrap() - * PAGE_SIZE; + let start_frame = PAGE_ALLOCATOR.get().unwrap().lock().alloc(nframes).unwrap() * PAGE_SIZE; // Zero them out as initialization. let vaddr = paddr_to_vaddr(start_frame) as *mut u8; unsafe { core::ptr::write_bytes(vaddr, 0, PAGE_SIZE * nframes) }; diff --git a/framework/aster-frame/src/mm/page/mod.rs b/framework/aster-frame/src/mm/page/mod.rs index 7480c9427..022598de2 100644 --- a/framework/aster-frame/src/mm/page/mod.rs +++ b/framework/aster-frame/src/mm/page/mod.rs @@ -15,6 +15,7 @@ //! the handle only a pointer to the metadata. pub(crate) mod allocator; +pub(in crate::mm) mod cont_pages; pub(in crate::mm) mod meta; use core::{ @@ -40,51 +41,17 @@ pub struct Page { unsafe impl Send for Page {} unsafe impl Sync for Page {} -/// Errors that can occur when getting a page handle. -#[derive(Debug)] -pub enum PageHandleError { - /// The physical address is out of range. - OutOfRange, - /// The physical address is not aligned to the page size. - NotAligned, - /// The page is already in use. - InUse, -} - impl Page { /// Get a `Page` handle with a specific usage from a raw, unused page. /// - /// If the provided physical address is invalid or not aligned, this - /// function will panic. + /// # Panics /// - /// If the provided page is already in use this function will block - /// until the page is released. This is a workaround since the page - /// allocator is decoupled from metadata management and page would be - /// reusable in the page allocator before resetting all metadata. - /// - /// TODO: redesign the page allocator to be aware of metadata management. + /// The function panics if: + /// - the physical address is out of bound or not aligned; + /// - the page is already in use. pub fn from_unused(paddr: Paddr) -> Self { - loop { - match Self::try_from_unused(paddr) { - Ok(page) => return page, - Err(PageHandleError::InUse) => { - // Wait for the page to be released. - core::hint::spin_loop(); - } - Err(e) => panic!("Failed to get a page handle: {:?}", e), - } - } - } - - /// Get a `Page` handle with a specific usage from a raw, unused page. - pub(in crate::mm) fn try_from_unused(paddr: Paddr) -> Result { - if paddr % PAGE_SIZE != 0 { - return Err(PageHandleError::NotAligned); - } - if paddr > MAX_PADDR.load(Ordering::Relaxed) { - return Err(PageHandleError::OutOfRange); - } - + assert!(paddr % PAGE_SIZE == 0); + assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr); let vaddr = mapping::page_to_meta::(paddr); let ptr = vaddr as *const MetaSlot; @@ -93,18 +60,21 @@ impl Page { usage .compare_exchange(0, M::USAGE as u8, Ordering::SeqCst, Ordering::Relaxed) - .map_err(|_| PageHandleError::InUse)?; + .expect("page already in use when trying to get a new handle"); let old_get_ref_count = get_ref_count.fetch_add(1, Ordering::Relaxed); debug_assert!(old_get_ref_count == 0); // Initialize the metadata - unsafe { (ptr as *mut M).write(M::default()) } + // SAFETY: The pointer points to the first byte of the `MetaSlot` + // structure, and layout ensured enoungh space for `M`. The original + // value does not represent any object that's needed to be dropped. + unsafe { (ptr as *mut M).write(M::default()) }; - Ok(Self { + Self { ptr, _marker: PhantomData, - }) + } } /// Forget the handle to the page. @@ -191,6 +161,15 @@ impl Drop for Page { // No handles means no usage. This also releases the page as unused for further // calls to `Page::from_unused`. unsafe { &*self.ptr }.usage.store(0, Ordering::Release); + // Deallocate the page. + // It would return the page to the allocator for further use. This would be done + // after the release of the metadata to avoid re-allocation before the metadata + // is reset. + allocator::PAGE_ALLOCATOR + .get() + .unwrap() + .lock() + .dealloc(self.paddr() / PAGE_SIZE, 1); }; } } diff --git a/framework/aster-frame/src/mm/page_table/boot_pt.rs b/framework/aster-frame/src/mm/page_table/boot_pt.rs index 96c82c9ee..d6739651f 100644 --- a/framework/aster-frame/src/mm/page_table/boot_pt.rs +++ b/framework/aster-frame/src/mm/page_table/boot_pt.rs @@ -10,7 +10,7 @@ use super::{pte_index, PageTableEntryTrait}; use crate::{ arch::mm::{PageTableEntry, PagingConsts}, mm::{ - nr_subpage_per_huge, paddr_to_vaddr, page::allocator::FRAME_ALLOCATOR, PageProperty, + nr_subpage_per_huge, paddr_to_vaddr, page::allocator::PAGE_ALLOCATOR, PageProperty, PagingConstsTrait, Vaddr, PAGE_SIZE, }, }; @@ -144,7 +144,7 @@ impl BootPageTable { } fn alloc_frame(&mut self) -> FrameNumber { - let frame = FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap(); + let frame = PAGE_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap(); self.frames.push(frame); // Zero it out. let vaddr = paddr_to_vaddr(frame * PAGE_SIZE) as *mut u8; @@ -156,7 +156,7 @@ impl BootPageTable { impl Drop for BootPageTable { fn drop(&mut self) { for frame in &self.frames { - FRAME_ALLOCATOR.get().unwrap().lock().dealloc(*frame, 1); + PAGE_ALLOCATOR.get().unwrap().lock().dealloc(*frame, 1); } } } diff --git a/framework/aster-frame/src/mm/page_table/node.rs b/framework/aster-frame/src/mm/page_table/node.rs index 8234df21b..58f8e98e3 100644 --- a/framework/aster-frame/src/mm/page_table/node.rs +++ b/framework/aster-frame/src/mm/page_table/node.rs @@ -33,7 +33,7 @@ use crate::{ mm::{ paddr_to_vaddr, page::{ - allocator::FRAME_ALLOCATOR, + allocator::PAGE_ALLOCATOR, meta::{FrameMeta, PageMeta, PageTablePageMeta, PageUsage}, Page, }, @@ -216,8 +216,8 @@ where /// set the lock bit for performance as it is exclusive and unlocking is an /// extra unnecessary expensive operation. pub(super) fn alloc(level: PagingLevel) -> Self { - let frame = FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap() * PAGE_SIZE; - let mut page = Page::>::from_unused(frame); + let page_paddr = PAGE_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap() * PAGE_SIZE; + let mut page = Page::>::from_unused(page_paddr); // The lock is initialized as held. page.meta().lock.store(1, Ordering::Relaxed); @@ -293,7 +293,7 @@ where // the reference count so we restore and forget a cloned one. let page = unsafe { Page::::from_raw(paddr) }; core::mem::forget(page.clone()); - Child::Frame(Frame { page }) + Child::Frame(page.into()) } else { Child::Untracked(paddr) } @@ -552,12 +552,5 @@ where } } } - - // Recycle this page table node. - FRAME_ALLOCATOR - .get() - .unwrap() - .lock() - .dealloc(paddr / PAGE_SIZE, 1); } }