Refactor allocation APIs for pages and frames

This commit is contained in:
Zhang Junyang
2024-06-14 10:34:08 +00:00
committed by Tate, Hongliang Tian
parent a2060039c2
commit 40c32b5ff5
10 changed files with 210 additions and 194 deletions

View File

@ -18,7 +18,6 @@ pub use frame_vec::{FrameVec, FrameVecIter};
pub use segment::Segment; pub use segment::Segment;
use super::page::{ use super::page::{
allocator,
meta::{FrameMeta, MetaSlot, PageMeta, PageUsage}, meta::{FrameMeta, MetaSlot, PageMeta, PageUsage},
Page, Page,
}; };
@ -41,7 +40,19 @@ use crate::{
/// will be globally freed. /// will be globally freed.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct Frame { pub struct Frame {
pub(in crate::mm) page: Page<FrameMeta>, page: Page<FrameMeta>,
}
impl From<Page<FrameMeta>> for Frame {
fn from(page: Page<FrameMeta>) -> Self {
Self { page }
}
}
impl From<Frame> for Page<FrameMeta> {
fn from(frame: Frame) -> Self {
frame.page
}
} }
impl HasPaddr for Frame { impl HasPaddr for Frame {
@ -140,8 +151,9 @@ impl VmIo for Frame {
impl PageMeta for FrameMeta { impl PageMeta for FrameMeta {
const USAGE: PageUsage = PageUsage::Frame; const USAGE: PageUsage = PageUsage::Frame;
fn on_drop(page: &mut Page<Self>) { fn on_drop(_page: &mut Page<Self>) {
unsafe { allocator::dealloc(page.paddr() / PAGE_SIZE, 1) }; // Nothing should be done so far since the dropping the page would
// take all cared.
} }
} }

View File

@ -3,7 +3,14 @@
//! Options for allocating frames //! Options for allocating frames
use super::{Frame, FrameVec, Segment}; use super::{Frame, FrameVec, Segment};
use crate::{mm::page::allocator, prelude::*, Error}; use crate::{
mm::{
page::{self, meta::FrameMeta},
PAGE_SIZE,
},
prelude::*,
Error,
};
/// Options for allocating physical memory pages (or frames). /// Options for allocating physical memory pages (or frames).
/// ///
@ -49,17 +56,14 @@ impl FrameAllocOptions {
/// Allocates a collection of page frames according to the given options. /// Allocates a collection of page frames according to the given options.
pub fn alloc(&self) -> Result<FrameVec> { pub fn alloc(&self) -> Result<FrameVec> {
let frames = if self.is_contiguous { let pages = if self.is_contiguous {
allocator::alloc(self.nframes).ok_or(Error::NoMemory)? page::allocator::alloc(self.nframes * PAGE_SIZE).ok_or(Error::NoMemory)?
} else { } else {
let mut frame_list = Vec::new(); page::allocator::alloc_contiguous(self.nframes * PAGE_SIZE)
for _ in 0..self.nframes { .ok_or(Error::NoMemory)?
let page = allocator::alloc_single().ok_or(Error::NoMemory)?; .into()
let frame = Frame { page };
frame_list.push(frame);
}
FrameVec(frame_list)
}; };
let frames = FrameVec(pages.into_iter().map(|page| Frame { page }).collect());
if !self.uninit { if !self.uninit {
for frame in frames.iter() { for frame in frames.iter() {
frame.writer().fill(0); frame.writer().fill(0);
@ -75,7 +79,7 @@ impl FrameAllocOptions {
return Err(Error::InvalidArgs); return Err(Error::InvalidArgs);
} }
let page = allocator::alloc_single().ok_or(Error::NoMemory)?; let page = page::allocator::alloc_single().ok_or(Error::NoMemory)?;
let frame = Frame { page }; let frame = Frame { page };
if !self.uninit { if !self.uninit {
frame.writer().fill(0); frame.writer().fill(0);
@ -93,7 +97,10 @@ impl FrameAllocOptions {
return Err(Error::InvalidArgs); return Err(Error::InvalidArgs);
} }
let segment = allocator::alloc_contiguous(self.nframes).ok_or(Error::NoMemory)?; let segment: Segment =
page::allocator::alloc_contiguous::<FrameMeta>(self.nframes * PAGE_SIZE)
.ok_or(Error::NoMemory)?
.into();
if !self.uninit { if !self.uninit {
segment.writer().fill(0); segment.writer().fill(0);
} }

View File

@ -8,7 +8,7 @@ use core::ops::Range;
use super::Frame; use super::Frame;
use crate::{ use crate::{
mm::{ mm::{
page::{meta::FrameMeta, Page}, page::{cont_pages::ContPages, meta::FrameMeta, Page},
HasPaddr, Paddr, VmIo, VmReader, VmWriter, PAGE_SIZE, HasPaddr, Paddr, VmIo, VmReader, VmWriter, PAGE_SIZE,
}, },
Error, Result, Error, Result,
@ -35,33 +35,10 @@ use crate::{
/// ``` /// ```
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct Segment { pub struct Segment {
inner: Arc<SegmentInner>, inner: Arc<ContPages<FrameMeta>>,
range: Range<usize>, range: Range<usize>,
} }
/// This behaves like a [`Frame`] that owns a list of frame handles.
///
/// The ownership is acheived by the reference counting mechanism of
/// frames. When constructing a `SegmentInner`, the frame handles are
/// forgotten. When dropping a `SegmentInner`, the frame handles are
/// restored and dropped.
#[derive(Debug)]
struct SegmentInner {
start: Paddr,
nframes: usize,
}
impl Drop for SegmentInner {
fn drop(&mut self) {
for i in 0..self.nframes {
let pa_i = self.start + i * PAGE_SIZE;
// SAFETY: for each page there would be a forgotten handle
// when creating the `SegmentInner` object.
drop(unsafe { Page::<FrameMeta>::from_raw(pa_i) });
}
}
}
impl HasPaddr for Segment { impl HasPaddr for Segment {
fn paddr(&self) -> Paddr { fn paddr(&self) -> Paddr {
self.start_paddr() self.start_paddr()
@ -69,28 +46,6 @@ impl HasPaddr for Segment {
} }
impl Segment { impl Segment {
/// Creates a new `Segment`.
///
/// # Safety
///
/// The given range of page frames must be contiguous and valid for use.
/// The given range of page frames must not have been allocated before,
/// as part of either a [`Frame`] or `Segment`.
pub(crate) unsafe fn new(paddr: Paddr, nframes: usize) -> Self {
for i in 0..nframes {
let pa_i = paddr + i * PAGE_SIZE;
let page = Page::<FrameMeta>::from_unused(pa_i);
core::mem::forget(page);
}
Self {
inner: Arc::new(SegmentInner {
start: paddr,
nframes,
}),
range: 0..nframes,
}
}
/// Returns a part of the `Segment`. /// Returns a part of the `Segment`.
/// ///
/// # Panics /// # Panics
@ -129,7 +84,7 @@ impl Segment {
} }
fn start_frame_index(&self) -> usize { fn start_frame_index(&self) -> usize {
self.inner.start / PAGE_SIZE + self.range.start self.inner.start_paddr() / PAGE_SIZE + self.range.start
} }
/// Returns a raw pointer to the starting virtual address of the `Segment`. /// Returns a raw pointer to the starting virtual address of the `Segment`.
@ -183,14 +138,19 @@ impl VmIo for Segment {
impl From<Frame> for Segment { impl From<Frame> for Segment {
fn from(frame: Frame) -> Self { fn from(frame: Frame) -> Self {
let paddr = frame.paddr();
core::mem::forget(frame);
Self { Self {
inner: Arc::new(SegmentInner { inner: Arc::new(Page::<FrameMeta>::from(frame).into()),
start: paddr,
nframes: 1,
}),
range: 0..1, range: 0..1,
} }
} }
} }
impl From<ContPages<FrameMeta>> for Segment {
fn from(cont_pages: ContPages<FrameMeta>) -> Self {
let len = cont_pages.len();
Self {
inner: Arc::new(cont_pages),
range: 0..len / PAGE_SIZE,
}
}
}

View File

@ -11,7 +11,7 @@ use log::debug;
use super::paddr_to_vaddr; use super::paddr_to_vaddr;
use crate::{ use crate::{
mm::{page::allocator::FRAME_ALLOCATOR, PAGE_SIZE}, mm::{page::allocator::PAGE_ALLOCATOR, PAGE_SIZE},
prelude::*, prelude::*,
sync::SpinLock, sync::SpinLock,
trap::disable_local, trap::disable_local,
@ -105,12 +105,12 @@ fn rescue<const ORDER: usize>(heap: &LockedHeapWithRescue<ORDER>, layout: &Layou
}; };
let allocation_start = { let allocation_start = {
let mut frame_allocator = FRAME_ALLOCATOR.get().unwrap().lock(); let mut page_allocator = PAGE_ALLOCATOR.get().unwrap().lock();
if num_frames >= MIN_NUM_FRAMES { if num_frames >= MIN_NUM_FRAMES {
frame_allocator.alloc(num_frames).ok_or(Error::NoMemory)? page_allocator.alloc(num_frames).ok_or(Error::NoMemory)?
} else { } else {
match frame_allocator.alloc(MIN_NUM_FRAMES) { match page_allocator.alloc(MIN_NUM_FRAMES) {
None => frame_allocator.alloc(num_frames).ok_or(Error::NoMemory)?, None => page_allocator.alloc(num_frames).ok_or(Error::NoMemory)?,
Some(start) => { Some(start) => {
num_frames = MIN_NUM_FRAMES; num_frames = MIN_NUM_FRAMES;
start start

View File

@ -12,72 +12,53 @@ use buddy_system_allocator::FrameAllocator;
use log::info; use log::info;
use spin::Once; use spin::Once;
use super::{ use super::{cont_pages::ContPages, meta::PageMeta, Page};
meta::{FrameMeta, PageMeta}, use crate::{boot::memory_region::MemoryRegionType, mm::PAGE_SIZE, sync::SpinLock};
Page,
};
use crate::{
boot::memory_region::MemoryRegionType,
mm::{Frame, FrameVec, Segment, PAGE_SIZE},
sync::SpinLock,
};
pub(in crate::mm) static FRAME_ALLOCATOR: Once<SpinLock<FrameAllocator>> = Once::new(); pub(in crate::mm) static PAGE_ALLOCATOR: Once<SpinLock<FrameAllocator>> = Once::new();
pub(crate) fn alloc(nframes: usize) -> Option<FrameVec> { /// Allocate a single page.
FRAME_ALLOCATOR pub(crate) fn alloc_single<M: PageMeta>() -> Option<Page<M>> {
.get() PAGE_ALLOCATOR.get().unwrap().lock().alloc(1).map(|idx| {
.unwrap()
.lock()
.alloc(nframes)
.map(|start| {
let mut vector = Vec::new();
for i in 0..nframes {
let paddr = (start + i) * PAGE_SIZE;
let frame = Frame {
page: Page::<FrameMeta>::from_unused(paddr),
};
vector.push(frame);
}
FrameVec(vector)
})
}
pub(crate) fn alloc_single<T: PageMeta>() -> Option<Page<T>> {
FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).map(|idx| {
let paddr = idx * PAGE_SIZE; let paddr = idx * PAGE_SIZE;
Page::<T>::from_unused(paddr) Page::<M>::from_unused(paddr)
}) })
} }
pub(crate) fn alloc_contiguous(nframes: usize) -> Option<Segment> { /// Allocate a contiguous range of pages of a given length in bytes.
FRAME_ALLOCATOR ///
/// # Panics
///
/// The function panics if the length is not base-page-aligned.
pub(crate) fn alloc_contiguous<M: PageMeta>(len: usize) -> Option<ContPages<M>> {
assert!(len % PAGE_SIZE == 0);
PAGE_ALLOCATOR
.get() .get()
.unwrap() .unwrap()
.lock() .lock()
.alloc(nframes) .alloc(len / PAGE_SIZE)
.map(|start| .map(|start| ContPages::from_unused(start * PAGE_SIZE..start * PAGE_SIZE + len))
// SAFETY: The range of page frames is contiguous and valid.
unsafe {
Segment::new(
start * PAGE_SIZE,
nframes,
)
})
} }
/// Deallocates a contiguous range of page frames. /// Allocate pages.
/// ///
/// # Safety /// The allocated pages are not guarenteed to be contiguous.
/// The total length of the allocated pages is `len`.
/// ///
/// User should ensure the range of page frames is valid. /// # Panics
/// ///
pub(crate) unsafe fn dealloc(start_index: usize, nframes: usize) { /// The function panics if the length is not base-page-aligned.
FRAME_ALLOCATOR pub(crate) fn alloc<M: PageMeta>(len: usize) -> Option<Vec<Page<M>>> {
.get() assert!(len % PAGE_SIZE == 0);
.unwrap() let nframes = len / PAGE_SIZE;
.lock() let mut allocator = PAGE_ALLOCATOR.get().unwrap().lock();
.dealloc(start_index, nframes); let mut vector = Vec::new();
for _ in 0..nframes {
let paddr = allocator.alloc(1)? * PAGE_SIZE;
let page = Page::<M>::from_unused(paddr);
vector.push(page);
}
Some(vector)
} }
pub(crate) fn init() { pub(crate) fn init() {
@ -101,5 +82,5 @@ pub(crate) fn init() {
); );
} }
} }
FRAME_ALLOCATOR.call_once(|| SpinLock::new(allocator)); PAGE_ALLOCATOR.call_once(|| SpinLock::new(allocator));
} }

View File

@ -0,0 +1,90 @@
// SPDX-License-Identifier: MPL-2.0
//! A contiguous range of pages.
use alloc::vec::Vec;
use core::{mem::ManuallyDrop, ops::Range};
use super::{meta::PageMeta, Page};
use crate::mm::{Paddr, PAGE_SIZE};
/// A contiguous range of physical memory pages.
///
/// This is a handle to many contiguous pages. It will be more lightweight
/// than owning an array of page handles.
///
/// The ownership is acheived by the reference counting mechanism of pages.
/// When constructing a `ContPages`, the page handles are created then
/// forgotten, leaving the reference count. When dropping a it, the page
/// handles are restored and dropped, decrementing the reference count.
#[derive(Debug)]
pub struct ContPages<M: PageMeta> {
range: Range<Paddr>,
_marker: core::marker::PhantomData<M>,
}
impl<M: PageMeta> Drop for ContPages<M> {
fn drop(&mut self) {
for i in self.range.clone().step_by(PAGE_SIZE) {
// SAFETY: for each page there would be a forgotten handle
// when creating the `ContPages` object.
drop(unsafe { Page::<M>::from_raw(i) });
}
}
}
impl<M: PageMeta> ContPages<M> {
/// Create a new `ContPages` from unused pages.
///
/// # Panics
///
/// The function panics if:
/// - the physical address is invalid or not aligned;
/// - any of the pages are already in use.
pub fn from_unused(range: Range<Paddr>) -> Self {
for i in range.clone().step_by(PAGE_SIZE) {
let _ = ManuallyDrop::new(Page::<M>::from_unused(i));
}
Self {
range,
_marker: core::marker::PhantomData,
}
}
/// Get the start physical address of the contiguous pages.
pub fn start_paddr(&self) -> Paddr {
self.range.start
}
/// Get the length in bytes of the contiguous pages.
pub fn len(&self) -> usize {
self.range.end - self.range.start
}
}
impl<M: PageMeta> From<Page<M>> for ContPages<M> {
fn from(page: Page<M>) -> Self {
let pa = page.paddr();
let _ = ManuallyDrop::new(page);
Self {
range: pa..pa + PAGE_SIZE,
_marker: core::marker::PhantomData,
}
}
}
impl<M: PageMeta> From<ContPages<M>> for Vec<Page<M>> {
fn from(pages: ContPages<M>) -> Self {
let vector = pages
.range
.clone()
.step_by(PAGE_SIZE)
.map(|i|
// SAFETY: for each page there would be a forgotten handle
// when creating the `ContPages` object.
unsafe { Page::<M>::from_raw(i) })
.collect();
let _ = ManuallyDrop::new(pages);
vector
}
}

View File

@ -50,7 +50,7 @@ use super::Page;
use crate::{ use crate::{
arch::mm::{PageTableEntry, PagingConsts}, arch::mm::{PageTableEntry, PagingConsts},
mm::{ mm::{
kspace::BOOT_PAGE_TABLE, paddr_to_vaddr, page::allocator::FRAME_ALLOCATOR, page_size, kspace::BOOT_PAGE_TABLE, paddr_to_vaddr, page::allocator::PAGE_ALLOCATOR, page_size,
page_table::PageTableEntryTrait, CachePolicy, Paddr, PageFlags, PageProperty, page_table::PageTableEntryTrait, CachePolicy, Paddr, PageFlags, PageProperty,
PagingConstsTrait, PagingLevel, PrivilegedPageFlags, PAGE_SIZE, PagingConstsTrait, PagingLevel, PrivilegedPageFlags, PAGE_SIZE,
}, },
@ -230,13 +230,7 @@ pub(crate) fn init() -> Vec<Range<Paddr>> {
fn alloc_meta_pages(nframes: usize) -> Vec<Paddr> { fn alloc_meta_pages(nframes: usize) -> Vec<Paddr> {
let mut meta_pages = Vec::new(); let mut meta_pages = Vec::new();
let start_frame = FRAME_ALLOCATOR let start_frame = PAGE_ALLOCATOR.get().unwrap().lock().alloc(nframes).unwrap() * PAGE_SIZE;
.get()
.unwrap()
.lock()
.alloc(nframes)
.unwrap()
* PAGE_SIZE;
// Zero them out as initialization. // Zero them out as initialization.
let vaddr = paddr_to_vaddr(start_frame) as *mut u8; let vaddr = paddr_to_vaddr(start_frame) as *mut u8;
unsafe { core::ptr::write_bytes(vaddr, 0, PAGE_SIZE * nframes) }; unsafe { core::ptr::write_bytes(vaddr, 0, PAGE_SIZE * nframes) };

View File

@ -15,6 +15,7 @@
//! the handle only a pointer to the metadata. //! the handle only a pointer to the metadata.
pub(crate) mod allocator; pub(crate) mod allocator;
pub(in crate::mm) mod cont_pages;
pub(in crate::mm) mod meta; pub(in crate::mm) mod meta;
use core::{ use core::{
@ -40,51 +41,17 @@ pub struct Page<M: PageMeta> {
unsafe impl<M: PageMeta> Send for Page<M> {} unsafe impl<M: PageMeta> Send for Page<M> {}
unsafe impl<M: PageMeta> Sync for Page<M> {} unsafe impl<M: PageMeta> Sync for Page<M> {}
/// Errors that can occur when getting a page handle.
#[derive(Debug)]
pub enum PageHandleError {
/// The physical address is out of range.
OutOfRange,
/// The physical address is not aligned to the page size.
NotAligned,
/// The page is already in use.
InUse,
}
impl<M: PageMeta> Page<M> { impl<M: PageMeta> Page<M> {
/// Get a `Page` handle with a specific usage from a raw, unused page. /// Get a `Page` handle with a specific usage from a raw, unused page.
/// ///
/// If the provided physical address is invalid or not aligned, this /// # Panics
/// function will panic.
/// ///
/// If the provided page is already in use this function will block /// The function panics if:
/// until the page is released. This is a workaround since the page /// - the physical address is out of bound or not aligned;
/// allocator is decoupled from metadata management and page would be /// - the page is already in use.
/// reusable in the page allocator before resetting all metadata.
///
/// TODO: redesign the page allocator to be aware of metadata management.
pub fn from_unused(paddr: Paddr) -> Self { pub fn from_unused(paddr: Paddr) -> Self {
loop { assert!(paddr % PAGE_SIZE == 0);
match Self::try_from_unused(paddr) { assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr);
Ok(page) => return page,
Err(PageHandleError::InUse) => {
// Wait for the page to be released.
core::hint::spin_loop();
}
Err(e) => panic!("Failed to get a page handle: {:?}", e),
}
}
}
/// Get a `Page` handle with a specific usage from a raw, unused page.
pub(in crate::mm) fn try_from_unused(paddr: Paddr) -> Result<Self, PageHandleError> {
if paddr % PAGE_SIZE != 0 {
return Err(PageHandleError::NotAligned);
}
if paddr > MAX_PADDR.load(Ordering::Relaxed) {
return Err(PageHandleError::OutOfRange);
}
let vaddr = mapping::page_to_meta::<PagingConsts>(paddr); let vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
let ptr = vaddr as *const MetaSlot; let ptr = vaddr as *const MetaSlot;
@ -93,18 +60,21 @@ impl<M: PageMeta> Page<M> {
usage usage
.compare_exchange(0, M::USAGE as u8, Ordering::SeqCst, Ordering::Relaxed) .compare_exchange(0, M::USAGE as u8, Ordering::SeqCst, Ordering::Relaxed)
.map_err(|_| PageHandleError::InUse)?; .expect("page already in use when trying to get a new handle");
let old_get_ref_count = get_ref_count.fetch_add(1, Ordering::Relaxed); let old_get_ref_count = get_ref_count.fetch_add(1, Ordering::Relaxed);
debug_assert!(old_get_ref_count == 0); debug_assert!(old_get_ref_count == 0);
// Initialize the metadata // Initialize the metadata
unsafe { (ptr as *mut M).write(M::default()) } // SAFETY: The pointer points to the first byte of the `MetaSlot`
// structure, and layout ensured enoungh space for `M`. The original
// value does not represent any object that's needed to be dropped.
unsafe { (ptr as *mut M).write(M::default()) };
Ok(Self { Self {
ptr, ptr,
_marker: PhantomData, _marker: PhantomData,
}) }
} }
/// Forget the handle to the page. /// Forget the handle to the page.
@ -191,6 +161,15 @@ impl<M: PageMeta> Drop for Page<M> {
// No handles means no usage. This also releases the page as unused for further // No handles means no usage. This also releases the page as unused for further
// calls to `Page::from_unused`. // calls to `Page::from_unused`.
unsafe { &*self.ptr }.usage.store(0, Ordering::Release); unsafe { &*self.ptr }.usage.store(0, Ordering::Release);
// Deallocate the page.
// It would return the page to the allocator for further use. This would be done
// after the release of the metadata to avoid re-allocation before the metadata
// is reset.
allocator::PAGE_ALLOCATOR
.get()
.unwrap()
.lock()
.dealloc(self.paddr() / PAGE_SIZE, 1);
}; };
} }
} }

View File

@ -10,7 +10,7 @@ use super::{pte_index, PageTableEntryTrait};
use crate::{ use crate::{
arch::mm::{PageTableEntry, PagingConsts}, arch::mm::{PageTableEntry, PagingConsts},
mm::{ mm::{
nr_subpage_per_huge, paddr_to_vaddr, page::allocator::FRAME_ALLOCATOR, PageProperty, nr_subpage_per_huge, paddr_to_vaddr, page::allocator::PAGE_ALLOCATOR, PageProperty,
PagingConstsTrait, Vaddr, PAGE_SIZE, PagingConstsTrait, Vaddr, PAGE_SIZE,
}, },
}; };
@ -144,7 +144,7 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
} }
fn alloc_frame(&mut self) -> FrameNumber { fn alloc_frame(&mut self) -> FrameNumber {
let frame = FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap(); let frame = PAGE_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap();
self.frames.push(frame); self.frames.push(frame);
// Zero it out. // Zero it out.
let vaddr = paddr_to_vaddr(frame * PAGE_SIZE) as *mut u8; let vaddr = paddr_to_vaddr(frame * PAGE_SIZE) as *mut u8;
@ -156,7 +156,7 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for BootPageTable<E, C> { impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for BootPageTable<E, C> {
fn drop(&mut self) { fn drop(&mut self) {
for frame in &self.frames { for frame in &self.frames {
FRAME_ALLOCATOR.get().unwrap().lock().dealloc(*frame, 1); PAGE_ALLOCATOR.get().unwrap().lock().dealloc(*frame, 1);
} }
} }
} }

View File

@ -33,7 +33,7 @@ use crate::{
mm::{ mm::{
paddr_to_vaddr, paddr_to_vaddr,
page::{ page::{
allocator::FRAME_ALLOCATOR, allocator::PAGE_ALLOCATOR,
meta::{FrameMeta, PageMeta, PageTablePageMeta, PageUsage}, meta::{FrameMeta, PageMeta, PageTablePageMeta, PageUsage},
Page, Page,
}, },
@ -216,8 +216,8 @@ where
/// set the lock bit for performance as it is exclusive and unlocking is an /// set the lock bit for performance as it is exclusive and unlocking is an
/// extra unnecessary expensive operation. /// extra unnecessary expensive operation.
pub(super) fn alloc(level: PagingLevel) -> Self { pub(super) fn alloc(level: PagingLevel) -> Self {
let frame = FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap() * PAGE_SIZE; let page_paddr = PAGE_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap() * PAGE_SIZE;
let mut page = Page::<PageTablePageMeta<E, C>>::from_unused(frame); let mut page = Page::<PageTablePageMeta<E, C>>::from_unused(page_paddr);
// The lock is initialized as held. // The lock is initialized as held.
page.meta().lock.store(1, Ordering::Relaxed); page.meta().lock.store(1, Ordering::Relaxed);
@ -293,7 +293,7 @@ where
// the reference count so we restore and forget a cloned one. // the reference count so we restore and forget a cloned one.
let page = unsafe { Page::<FrameMeta>::from_raw(paddr) }; let page = unsafe { Page::<FrameMeta>::from_raw(paddr) };
core::mem::forget(page.clone()); core::mem::forget(page.clone());
Child::Frame(Frame { page }) Child::Frame(page.into())
} else { } else {
Child::Untracked(paddr) Child::Untracked(paddr)
} }
@ -552,12 +552,5 @@ where
} }
} }
} }
// Recycle this page table node.
FRAME_ALLOCATOR
.get()
.unwrap()
.lock()
.dealloc(paddr / PAGE_SIZE, 1);
} }
} }