mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-23 01:13:23 +00:00
Refactor allocation APIs for pages and frames
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
a2060039c2
commit
40c32b5ff5
@ -18,7 +18,6 @@ pub use frame_vec::{FrameVec, FrameVecIter};
|
||||
pub use segment::Segment;
|
||||
|
||||
use super::page::{
|
||||
allocator,
|
||||
meta::{FrameMeta, MetaSlot, PageMeta, PageUsage},
|
||||
Page,
|
||||
};
|
||||
@ -41,7 +40,19 @@ use crate::{
|
||||
/// will be globally freed.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Frame {
|
||||
pub(in crate::mm) page: Page<FrameMeta>,
|
||||
page: Page<FrameMeta>,
|
||||
}
|
||||
|
||||
impl From<Page<FrameMeta>> for Frame {
|
||||
fn from(page: Page<FrameMeta>) -> Self {
|
||||
Self { page }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Frame> for Page<FrameMeta> {
|
||||
fn from(frame: Frame) -> Self {
|
||||
frame.page
|
||||
}
|
||||
}
|
||||
|
||||
impl HasPaddr for Frame {
|
||||
@ -140,8 +151,9 @@ impl VmIo for Frame {
|
||||
impl PageMeta for FrameMeta {
|
||||
const USAGE: PageUsage = PageUsage::Frame;
|
||||
|
||||
fn on_drop(page: &mut Page<Self>) {
|
||||
unsafe { allocator::dealloc(page.paddr() / PAGE_SIZE, 1) };
|
||||
fn on_drop(_page: &mut Page<Self>) {
|
||||
// Nothing should be done so far since the dropping the page would
|
||||
// take all cared.
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,14 @@
|
||||
//! Options for allocating frames
|
||||
|
||||
use super::{Frame, FrameVec, Segment};
|
||||
use crate::{mm::page::allocator, prelude::*, Error};
|
||||
use crate::{
|
||||
mm::{
|
||||
page::{self, meta::FrameMeta},
|
||||
PAGE_SIZE,
|
||||
},
|
||||
prelude::*,
|
||||
Error,
|
||||
};
|
||||
|
||||
/// Options for allocating physical memory pages (or frames).
|
||||
///
|
||||
@ -49,17 +56,14 @@ impl FrameAllocOptions {
|
||||
|
||||
/// Allocates a collection of page frames according to the given options.
|
||||
pub fn alloc(&self) -> Result<FrameVec> {
|
||||
let frames = if self.is_contiguous {
|
||||
allocator::alloc(self.nframes).ok_or(Error::NoMemory)?
|
||||
let pages = if self.is_contiguous {
|
||||
page::allocator::alloc(self.nframes * PAGE_SIZE).ok_or(Error::NoMemory)?
|
||||
} else {
|
||||
let mut frame_list = Vec::new();
|
||||
for _ in 0..self.nframes {
|
||||
let page = allocator::alloc_single().ok_or(Error::NoMemory)?;
|
||||
let frame = Frame { page };
|
||||
frame_list.push(frame);
|
||||
}
|
||||
FrameVec(frame_list)
|
||||
page::allocator::alloc_contiguous(self.nframes * PAGE_SIZE)
|
||||
.ok_or(Error::NoMemory)?
|
||||
.into()
|
||||
};
|
||||
let frames = FrameVec(pages.into_iter().map(|page| Frame { page }).collect());
|
||||
if !self.uninit {
|
||||
for frame in frames.iter() {
|
||||
frame.writer().fill(0);
|
||||
@ -75,7 +79,7 @@ impl FrameAllocOptions {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
|
||||
let page = allocator::alloc_single().ok_or(Error::NoMemory)?;
|
||||
let page = page::allocator::alloc_single().ok_or(Error::NoMemory)?;
|
||||
let frame = Frame { page };
|
||||
if !self.uninit {
|
||||
frame.writer().fill(0);
|
||||
@ -93,7 +97,10 @@ impl FrameAllocOptions {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
|
||||
let segment = allocator::alloc_contiguous(self.nframes).ok_or(Error::NoMemory)?;
|
||||
let segment: Segment =
|
||||
page::allocator::alloc_contiguous::<FrameMeta>(self.nframes * PAGE_SIZE)
|
||||
.ok_or(Error::NoMemory)?
|
||||
.into();
|
||||
if !self.uninit {
|
||||
segment.writer().fill(0);
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ use core::ops::Range;
|
||||
use super::Frame;
|
||||
use crate::{
|
||||
mm::{
|
||||
page::{meta::FrameMeta, Page},
|
||||
page::{cont_pages::ContPages, meta::FrameMeta, Page},
|
||||
HasPaddr, Paddr, VmIo, VmReader, VmWriter, PAGE_SIZE,
|
||||
},
|
||||
Error, Result,
|
||||
@ -35,33 +35,10 @@ use crate::{
|
||||
/// ```
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Segment {
|
||||
inner: Arc<SegmentInner>,
|
||||
inner: Arc<ContPages<FrameMeta>>,
|
||||
range: Range<usize>,
|
||||
}
|
||||
|
||||
/// This behaves like a [`Frame`] that owns a list of frame handles.
|
||||
///
|
||||
/// The ownership is acheived by the reference counting mechanism of
|
||||
/// frames. When constructing a `SegmentInner`, the frame handles are
|
||||
/// forgotten. When dropping a `SegmentInner`, the frame handles are
|
||||
/// restored and dropped.
|
||||
#[derive(Debug)]
|
||||
struct SegmentInner {
|
||||
start: Paddr,
|
||||
nframes: usize,
|
||||
}
|
||||
|
||||
impl Drop for SegmentInner {
|
||||
fn drop(&mut self) {
|
||||
for i in 0..self.nframes {
|
||||
let pa_i = self.start + i * PAGE_SIZE;
|
||||
// SAFETY: for each page there would be a forgotten handle
|
||||
// when creating the `SegmentInner` object.
|
||||
drop(unsafe { Page::<FrameMeta>::from_raw(pa_i) });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasPaddr for Segment {
|
||||
fn paddr(&self) -> Paddr {
|
||||
self.start_paddr()
|
||||
@ -69,28 +46,6 @@ impl HasPaddr for Segment {
|
||||
}
|
||||
|
||||
impl Segment {
|
||||
/// Creates a new `Segment`.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The given range of page frames must be contiguous and valid for use.
|
||||
/// The given range of page frames must not have been allocated before,
|
||||
/// as part of either a [`Frame`] or `Segment`.
|
||||
pub(crate) unsafe fn new(paddr: Paddr, nframes: usize) -> Self {
|
||||
for i in 0..nframes {
|
||||
let pa_i = paddr + i * PAGE_SIZE;
|
||||
let page = Page::<FrameMeta>::from_unused(pa_i);
|
||||
core::mem::forget(page);
|
||||
}
|
||||
Self {
|
||||
inner: Arc::new(SegmentInner {
|
||||
start: paddr,
|
||||
nframes,
|
||||
}),
|
||||
range: 0..nframes,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a part of the `Segment`.
|
||||
///
|
||||
/// # Panics
|
||||
@ -129,7 +84,7 @@ impl Segment {
|
||||
}
|
||||
|
||||
fn start_frame_index(&self) -> usize {
|
||||
self.inner.start / PAGE_SIZE + self.range.start
|
||||
self.inner.start_paddr() / PAGE_SIZE + self.range.start
|
||||
}
|
||||
|
||||
/// Returns a raw pointer to the starting virtual address of the `Segment`.
|
||||
@ -183,14 +138,19 @@ impl VmIo for Segment {
|
||||
|
||||
impl From<Frame> for Segment {
|
||||
fn from(frame: Frame) -> Self {
|
||||
let paddr = frame.paddr();
|
||||
core::mem::forget(frame);
|
||||
Self {
|
||||
inner: Arc::new(SegmentInner {
|
||||
start: paddr,
|
||||
nframes: 1,
|
||||
}),
|
||||
inner: Arc::new(Page::<FrameMeta>::from(frame).into()),
|
||||
range: 0..1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ContPages<FrameMeta>> for Segment {
|
||||
fn from(cont_pages: ContPages<FrameMeta>) -> Self {
|
||||
let len = cont_pages.len();
|
||||
Self {
|
||||
inner: Arc::new(cont_pages),
|
||||
range: 0..len / PAGE_SIZE,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ use log::debug;
|
||||
|
||||
use super::paddr_to_vaddr;
|
||||
use crate::{
|
||||
mm::{page::allocator::FRAME_ALLOCATOR, PAGE_SIZE},
|
||||
mm::{page::allocator::PAGE_ALLOCATOR, PAGE_SIZE},
|
||||
prelude::*,
|
||||
sync::SpinLock,
|
||||
trap::disable_local,
|
||||
@ -105,12 +105,12 @@ fn rescue<const ORDER: usize>(heap: &LockedHeapWithRescue<ORDER>, layout: &Layou
|
||||
};
|
||||
|
||||
let allocation_start = {
|
||||
let mut frame_allocator = FRAME_ALLOCATOR.get().unwrap().lock();
|
||||
let mut page_allocator = PAGE_ALLOCATOR.get().unwrap().lock();
|
||||
if num_frames >= MIN_NUM_FRAMES {
|
||||
frame_allocator.alloc(num_frames).ok_or(Error::NoMemory)?
|
||||
page_allocator.alloc(num_frames).ok_or(Error::NoMemory)?
|
||||
} else {
|
||||
match frame_allocator.alloc(MIN_NUM_FRAMES) {
|
||||
None => frame_allocator.alloc(num_frames).ok_or(Error::NoMemory)?,
|
||||
match page_allocator.alloc(MIN_NUM_FRAMES) {
|
||||
None => page_allocator.alloc(num_frames).ok_or(Error::NoMemory)?,
|
||||
Some(start) => {
|
||||
num_frames = MIN_NUM_FRAMES;
|
||||
start
|
||||
|
@ -12,72 +12,53 @@ use buddy_system_allocator::FrameAllocator;
|
||||
use log::info;
|
||||
use spin::Once;
|
||||
|
||||
use super::{
|
||||
meta::{FrameMeta, PageMeta},
|
||||
Page,
|
||||
};
|
||||
use crate::{
|
||||
boot::memory_region::MemoryRegionType,
|
||||
mm::{Frame, FrameVec, Segment, PAGE_SIZE},
|
||||
sync::SpinLock,
|
||||
};
|
||||
use super::{cont_pages::ContPages, meta::PageMeta, Page};
|
||||
use crate::{boot::memory_region::MemoryRegionType, mm::PAGE_SIZE, sync::SpinLock};
|
||||
|
||||
pub(in crate::mm) static FRAME_ALLOCATOR: Once<SpinLock<FrameAllocator>> = Once::new();
|
||||
pub(in crate::mm) static PAGE_ALLOCATOR: Once<SpinLock<FrameAllocator>> = Once::new();
|
||||
|
||||
pub(crate) fn alloc(nframes: usize) -> Option<FrameVec> {
|
||||
FRAME_ALLOCATOR
|
||||
.get()
|
||||
.unwrap()
|
||||
.lock()
|
||||
.alloc(nframes)
|
||||
.map(|start| {
|
||||
let mut vector = Vec::new();
|
||||
for i in 0..nframes {
|
||||
let paddr = (start + i) * PAGE_SIZE;
|
||||
let frame = Frame {
|
||||
page: Page::<FrameMeta>::from_unused(paddr),
|
||||
};
|
||||
vector.push(frame);
|
||||
}
|
||||
FrameVec(vector)
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn alloc_single<T: PageMeta>() -> Option<Page<T>> {
|
||||
FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).map(|idx| {
|
||||
/// Allocate a single page.
|
||||
pub(crate) fn alloc_single<M: PageMeta>() -> Option<Page<M>> {
|
||||
PAGE_ALLOCATOR.get().unwrap().lock().alloc(1).map(|idx| {
|
||||
let paddr = idx * PAGE_SIZE;
|
||||
Page::<T>::from_unused(paddr)
|
||||
Page::<M>::from_unused(paddr)
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn alloc_contiguous(nframes: usize) -> Option<Segment> {
|
||||
FRAME_ALLOCATOR
|
||||
/// Allocate a contiguous range of pages of a given length in bytes.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// The function panics if the length is not base-page-aligned.
|
||||
pub(crate) fn alloc_contiguous<M: PageMeta>(len: usize) -> Option<ContPages<M>> {
|
||||
assert!(len % PAGE_SIZE == 0);
|
||||
PAGE_ALLOCATOR
|
||||
.get()
|
||||
.unwrap()
|
||||
.lock()
|
||||
.alloc(nframes)
|
||||
.map(|start|
|
||||
// SAFETY: The range of page frames is contiguous and valid.
|
||||
unsafe {
|
||||
Segment::new(
|
||||
start * PAGE_SIZE,
|
||||
nframes,
|
||||
)
|
||||
})
|
||||
.alloc(len / PAGE_SIZE)
|
||||
.map(|start| ContPages::from_unused(start * PAGE_SIZE..start * PAGE_SIZE + len))
|
||||
}
|
||||
|
||||
/// Deallocates a contiguous range of page frames.
|
||||
/// Allocate pages.
|
||||
///
|
||||
/// # Safety
|
||||
/// The allocated pages are not guarenteed to be contiguous.
|
||||
/// The total length of the allocated pages is `len`.
|
||||
///
|
||||
/// User should ensure the range of page frames is valid.
|
||||
/// # Panics
|
||||
///
|
||||
pub(crate) unsafe fn dealloc(start_index: usize, nframes: usize) {
|
||||
FRAME_ALLOCATOR
|
||||
.get()
|
||||
.unwrap()
|
||||
.lock()
|
||||
.dealloc(start_index, nframes);
|
||||
/// The function panics if the length is not base-page-aligned.
|
||||
pub(crate) fn alloc<M: PageMeta>(len: usize) -> Option<Vec<Page<M>>> {
|
||||
assert!(len % PAGE_SIZE == 0);
|
||||
let nframes = len / PAGE_SIZE;
|
||||
let mut allocator = PAGE_ALLOCATOR.get().unwrap().lock();
|
||||
let mut vector = Vec::new();
|
||||
for _ in 0..nframes {
|
||||
let paddr = allocator.alloc(1)? * PAGE_SIZE;
|
||||
let page = Page::<M>::from_unused(paddr);
|
||||
vector.push(page);
|
||||
}
|
||||
Some(vector)
|
||||
}
|
||||
|
||||
pub(crate) fn init() {
|
||||
@ -101,5 +82,5 @@ pub(crate) fn init() {
|
||||
);
|
||||
}
|
||||
}
|
||||
FRAME_ALLOCATOR.call_once(|| SpinLock::new(allocator));
|
||||
PAGE_ALLOCATOR.call_once(|| SpinLock::new(allocator));
|
||||
}
|
||||
|
90
framework/aster-frame/src/mm/page/cont_pages.rs
Normal file
90
framework/aster-frame/src/mm/page/cont_pages.rs
Normal file
@ -0,0 +1,90 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
//! A contiguous range of pages.
|
||||
|
||||
use alloc::vec::Vec;
|
||||
use core::{mem::ManuallyDrop, ops::Range};
|
||||
|
||||
use super::{meta::PageMeta, Page};
|
||||
use crate::mm::{Paddr, PAGE_SIZE};
|
||||
|
||||
/// A contiguous range of physical memory pages.
|
||||
///
|
||||
/// This is a handle to many contiguous pages. It will be more lightweight
|
||||
/// than owning an array of page handles.
|
||||
///
|
||||
/// The ownership is acheived by the reference counting mechanism of pages.
|
||||
/// When constructing a `ContPages`, the page handles are created then
|
||||
/// forgotten, leaving the reference count. When dropping a it, the page
|
||||
/// handles are restored and dropped, decrementing the reference count.
|
||||
#[derive(Debug)]
|
||||
pub struct ContPages<M: PageMeta> {
|
||||
range: Range<Paddr>,
|
||||
_marker: core::marker::PhantomData<M>,
|
||||
}
|
||||
|
||||
impl<M: PageMeta> Drop for ContPages<M> {
|
||||
fn drop(&mut self) {
|
||||
for i in self.range.clone().step_by(PAGE_SIZE) {
|
||||
// SAFETY: for each page there would be a forgotten handle
|
||||
// when creating the `ContPages` object.
|
||||
drop(unsafe { Page::<M>::from_raw(i) });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: PageMeta> ContPages<M> {
|
||||
/// Create a new `ContPages` from unused pages.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// The function panics if:
|
||||
/// - the physical address is invalid or not aligned;
|
||||
/// - any of the pages are already in use.
|
||||
pub fn from_unused(range: Range<Paddr>) -> Self {
|
||||
for i in range.clone().step_by(PAGE_SIZE) {
|
||||
let _ = ManuallyDrop::new(Page::<M>::from_unused(i));
|
||||
}
|
||||
Self {
|
||||
range,
|
||||
_marker: core::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the start physical address of the contiguous pages.
|
||||
pub fn start_paddr(&self) -> Paddr {
|
||||
self.range.start
|
||||
}
|
||||
|
||||
/// Get the length in bytes of the contiguous pages.
|
||||
pub fn len(&self) -> usize {
|
||||
self.range.end - self.range.start
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: PageMeta> From<Page<M>> for ContPages<M> {
|
||||
fn from(page: Page<M>) -> Self {
|
||||
let pa = page.paddr();
|
||||
let _ = ManuallyDrop::new(page);
|
||||
Self {
|
||||
range: pa..pa + PAGE_SIZE,
|
||||
_marker: core::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: PageMeta> From<ContPages<M>> for Vec<Page<M>> {
|
||||
fn from(pages: ContPages<M>) -> Self {
|
||||
let vector = pages
|
||||
.range
|
||||
.clone()
|
||||
.step_by(PAGE_SIZE)
|
||||
.map(|i|
|
||||
// SAFETY: for each page there would be a forgotten handle
|
||||
// when creating the `ContPages` object.
|
||||
unsafe { Page::<M>::from_raw(i) })
|
||||
.collect();
|
||||
let _ = ManuallyDrop::new(pages);
|
||||
vector
|
||||
}
|
||||
}
|
@ -50,7 +50,7 @@ use super::Page;
|
||||
use crate::{
|
||||
arch::mm::{PageTableEntry, PagingConsts},
|
||||
mm::{
|
||||
kspace::BOOT_PAGE_TABLE, paddr_to_vaddr, page::allocator::FRAME_ALLOCATOR, page_size,
|
||||
kspace::BOOT_PAGE_TABLE, paddr_to_vaddr, page::allocator::PAGE_ALLOCATOR, page_size,
|
||||
page_table::PageTableEntryTrait, CachePolicy, Paddr, PageFlags, PageProperty,
|
||||
PagingConstsTrait, PagingLevel, PrivilegedPageFlags, PAGE_SIZE,
|
||||
},
|
||||
@ -230,13 +230,7 @@ pub(crate) fn init() -> Vec<Range<Paddr>> {
|
||||
|
||||
fn alloc_meta_pages(nframes: usize) -> Vec<Paddr> {
|
||||
let mut meta_pages = Vec::new();
|
||||
let start_frame = FRAME_ALLOCATOR
|
||||
.get()
|
||||
.unwrap()
|
||||
.lock()
|
||||
.alloc(nframes)
|
||||
.unwrap()
|
||||
* PAGE_SIZE;
|
||||
let start_frame = PAGE_ALLOCATOR.get().unwrap().lock().alloc(nframes).unwrap() * PAGE_SIZE;
|
||||
// Zero them out as initialization.
|
||||
let vaddr = paddr_to_vaddr(start_frame) as *mut u8;
|
||||
unsafe { core::ptr::write_bytes(vaddr, 0, PAGE_SIZE * nframes) };
|
||||
|
@ -15,6 +15,7 @@
|
||||
//! the handle only a pointer to the metadata.
|
||||
|
||||
pub(crate) mod allocator;
|
||||
pub(in crate::mm) mod cont_pages;
|
||||
pub(in crate::mm) mod meta;
|
||||
|
||||
use core::{
|
||||
@ -40,51 +41,17 @@ pub struct Page<M: PageMeta> {
|
||||
unsafe impl<M: PageMeta> Send for Page<M> {}
|
||||
unsafe impl<M: PageMeta> Sync for Page<M> {}
|
||||
|
||||
/// Errors that can occur when getting a page handle.
|
||||
#[derive(Debug)]
|
||||
pub enum PageHandleError {
|
||||
/// The physical address is out of range.
|
||||
OutOfRange,
|
||||
/// The physical address is not aligned to the page size.
|
||||
NotAligned,
|
||||
/// The page is already in use.
|
||||
InUse,
|
||||
}
|
||||
|
||||
impl<M: PageMeta> Page<M> {
|
||||
/// Get a `Page` handle with a specific usage from a raw, unused page.
|
||||
///
|
||||
/// If the provided physical address is invalid or not aligned, this
|
||||
/// function will panic.
|
||||
/// # Panics
|
||||
///
|
||||
/// If the provided page is already in use this function will block
|
||||
/// until the page is released. This is a workaround since the page
|
||||
/// allocator is decoupled from metadata management and page would be
|
||||
/// reusable in the page allocator before resetting all metadata.
|
||||
///
|
||||
/// TODO: redesign the page allocator to be aware of metadata management.
|
||||
/// The function panics if:
|
||||
/// - the physical address is out of bound or not aligned;
|
||||
/// - the page is already in use.
|
||||
pub fn from_unused(paddr: Paddr) -> Self {
|
||||
loop {
|
||||
match Self::try_from_unused(paddr) {
|
||||
Ok(page) => return page,
|
||||
Err(PageHandleError::InUse) => {
|
||||
// Wait for the page to be released.
|
||||
core::hint::spin_loop();
|
||||
}
|
||||
Err(e) => panic!("Failed to get a page handle: {:?}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a `Page` handle with a specific usage from a raw, unused page.
|
||||
pub(in crate::mm) fn try_from_unused(paddr: Paddr) -> Result<Self, PageHandleError> {
|
||||
if paddr % PAGE_SIZE != 0 {
|
||||
return Err(PageHandleError::NotAligned);
|
||||
}
|
||||
if paddr > MAX_PADDR.load(Ordering::Relaxed) {
|
||||
return Err(PageHandleError::OutOfRange);
|
||||
}
|
||||
|
||||
assert!(paddr % PAGE_SIZE == 0);
|
||||
assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr);
|
||||
let vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
|
||||
let ptr = vaddr as *const MetaSlot;
|
||||
|
||||
@ -93,18 +60,21 @@ impl<M: PageMeta> Page<M> {
|
||||
|
||||
usage
|
||||
.compare_exchange(0, M::USAGE as u8, Ordering::SeqCst, Ordering::Relaxed)
|
||||
.map_err(|_| PageHandleError::InUse)?;
|
||||
.expect("page already in use when trying to get a new handle");
|
||||
|
||||
let old_get_ref_count = get_ref_count.fetch_add(1, Ordering::Relaxed);
|
||||
debug_assert!(old_get_ref_count == 0);
|
||||
|
||||
// Initialize the metadata
|
||||
unsafe { (ptr as *mut M).write(M::default()) }
|
||||
// SAFETY: The pointer points to the first byte of the `MetaSlot`
|
||||
// structure, and layout ensured enoungh space for `M`. The original
|
||||
// value does not represent any object that's needed to be dropped.
|
||||
unsafe { (ptr as *mut M).write(M::default()) };
|
||||
|
||||
Ok(Self {
|
||||
Self {
|
||||
ptr,
|
||||
_marker: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Forget the handle to the page.
|
||||
@ -191,6 +161,15 @@ impl<M: PageMeta> Drop for Page<M> {
|
||||
// No handles means no usage. This also releases the page as unused for further
|
||||
// calls to `Page::from_unused`.
|
||||
unsafe { &*self.ptr }.usage.store(0, Ordering::Release);
|
||||
// Deallocate the page.
|
||||
// It would return the page to the allocator for further use. This would be done
|
||||
// after the release of the metadata to avoid re-allocation before the metadata
|
||||
// is reset.
|
||||
allocator::PAGE_ALLOCATOR
|
||||
.get()
|
||||
.unwrap()
|
||||
.lock()
|
||||
.dealloc(self.paddr() / PAGE_SIZE, 1);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ use super::{pte_index, PageTableEntryTrait};
|
||||
use crate::{
|
||||
arch::mm::{PageTableEntry, PagingConsts},
|
||||
mm::{
|
||||
nr_subpage_per_huge, paddr_to_vaddr, page::allocator::FRAME_ALLOCATOR, PageProperty,
|
||||
nr_subpage_per_huge, paddr_to_vaddr, page::allocator::PAGE_ALLOCATOR, PageProperty,
|
||||
PagingConstsTrait, Vaddr, PAGE_SIZE,
|
||||
},
|
||||
};
|
||||
@ -144,7 +144,7 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
|
||||
}
|
||||
|
||||
fn alloc_frame(&mut self) -> FrameNumber {
|
||||
let frame = FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap();
|
||||
let frame = PAGE_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap();
|
||||
self.frames.push(frame);
|
||||
// Zero it out.
|
||||
let vaddr = paddr_to_vaddr(frame * PAGE_SIZE) as *mut u8;
|
||||
@ -156,7 +156,7 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
|
||||
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for BootPageTable<E, C> {
|
||||
fn drop(&mut self) {
|
||||
for frame in &self.frames {
|
||||
FRAME_ALLOCATOR.get().unwrap().lock().dealloc(*frame, 1);
|
||||
PAGE_ALLOCATOR.get().unwrap().lock().dealloc(*frame, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ use crate::{
|
||||
mm::{
|
||||
paddr_to_vaddr,
|
||||
page::{
|
||||
allocator::FRAME_ALLOCATOR,
|
||||
allocator::PAGE_ALLOCATOR,
|
||||
meta::{FrameMeta, PageMeta, PageTablePageMeta, PageUsage},
|
||||
Page,
|
||||
},
|
||||
@ -216,8 +216,8 @@ where
|
||||
/// set the lock bit for performance as it is exclusive and unlocking is an
|
||||
/// extra unnecessary expensive operation.
|
||||
pub(super) fn alloc(level: PagingLevel) -> Self {
|
||||
let frame = FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap() * PAGE_SIZE;
|
||||
let mut page = Page::<PageTablePageMeta<E, C>>::from_unused(frame);
|
||||
let page_paddr = PAGE_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap() * PAGE_SIZE;
|
||||
let mut page = Page::<PageTablePageMeta<E, C>>::from_unused(page_paddr);
|
||||
|
||||
// The lock is initialized as held.
|
||||
page.meta().lock.store(1, Ordering::Relaxed);
|
||||
@ -293,7 +293,7 @@ where
|
||||
// the reference count so we restore and forget a cloned one.
|
||||
let page = unsafe { Page::<FrameMeta>::from_raw(paddr) };
|
||||
core::mem::forget(page.clone());
|
||||
Child::Frame(Frame { page })
|
||||
Child::Frame(page.into())
|
||||
} else {
|
||||
Child::Untracked(paddr)
|
||||
}
|
||||
@ -552,12 +552,5 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Recycle this page table node.
|
||||
FRAME_ALLOCATOR
|
||||
.get()
|
||||
.unwrap()
|
||||
.lock()
|
||||
.dealloc(paddr / PAGE_SIZE, 1);
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user