diff --git a/kernel/src/vm/vmo/options.rs b/kernel/src/vm/vmo/options.rs index 0455122d4..b669ba02a 100644 --- a/kernel/src/vm/vmo/options.rs +++ b/kernel/src/vm/vmo/options.rs @@ -141,12 +141,12 @@ fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result Result> { - let pages = if self.is_contiguous { - page::allocator::alloc(self.nframes * PAGE_SIZE, |_| FrameMeta::default()) - .ok_or(Error::NoMemory)? - } else { - page::allocator::alloc_contiguous(self.nframes * PAGE_SIZE, |_| FrameMeta::default()) - .ok_or(Error::NoMemory)? - .into() - }; - let frames: Vec<_> = pages.into_iter().map(|page| Frame { page }).collect(); - if !self.uninit { - for frame in frames.iter() { - frame.writer().fill(0); - } - } - - Ok(frames) - } - /// Allocates a single page frame according to the given options. pub fn alloc_single(&self) -> Result { if self.nframes != 1 { @@ -113,7 +93,6 @@ fn test_alloc_dealloc() { // Here we allocate and deallocate frames in random orders to test the allocator. // We expect the test to fail if the underlying implementation panics. let single_options = FrameAllocOptions::new(1); - let multi_options = FrameAllocOptions::new(10); let mut contiguous_options = FrameAllocOptions::new(10); contiguous_options.is_contiguous(true); let mut remember_vec = Vec::new(); @@ -126,8 +105,6 @@ fn test_alloc_dealloc() { } let contiguous_segment = contiguous_options.alloc_contiguous().unwrap(); drop(contiguous_segment); - let multi_frames = multi_options.alloc().unwrap(); - remember_vec.extend(multi_frames.into_iter()); remember_vec.pop(); } } diff --git a/ostd/src/mm/page/allocator.rs b/ostd/src/mm/page/allocator.rs index 5b7660676..d563a1ba1 100644 --- a/ostd/src/mm/page/allocator.rs +++ b/ostd/src/mm/page/allocator.rs @@ -5,8 +5,6 @@ //! TODO: Decouple it with the frame allocator in [`crate::mm::frame::options`] by //! allocating pages rather untyped memory from this module. -use alloc::vec::Vec; - use align_ext::AlignExt; use buddy_system_allocator::FrameAllocator; use log::info; @@ -102,34 +100,6 @@ where }) } -/// Allocate pages. -/// -/// The allocated pages are not guaranteed to be contiguous. -/// The total length of the allocated pages is `len`. -/// -/// The caller must provide a closure to initialize metadata for all the pages. -/// The closure receives the physical address of the page and returns the -/// metadata, which is similar to [`core::array::from_fn`]. -/// -/// # Panics -/// -/// The function panics if the length is not base-page-aligned. -pub(crate) fn alloc(len: usize, mut metadata_fn: F) -> Option>> -where - F: FnMut(Paddr) -> M, -{ - assert!(len % PAGE_SIZE == 0); - let nframes = len / PAGE_SIZE; - let mut allocator = PAGE_ALLOCATOR.get().unwrap().disable_irq().lock(); - let mut vector = Vec::new(); - for _ in 0..nframes { - let paddr = allocator.alloc(1)? * PAGE_SIZE; - let page = Page::::from_unused(paddr, metadata_fn(paddr)); - vector.push(page); - } - Some(vector) -} - pub(crate) fn init() { let regions = crate::boot::memory_regions(); let mut total: usize = 0; diff --git a/ostd/src/mm/page_table/test.rs b/ostd/src/mm/page_table/test.rs index 6e27aa1e7..4de0ea862 100644 --- a/ostd/src/mm/page_table/test.rs +++ b/ostd/src/mm/page_table/test.rs @@ -173,12 +173,12 @@ fn test_base_protect_query() { let from_ppn = 1..1000; let from = PAGE_SIZE * from_ppn.start..PAGE_SIZE * from_ppn.end; - let to = allocator::alloc(999 * PAGE_SIZE, |_| FrameMeta::default()).unwrap(); + let to = allocator::alloc_contiguous(999 * PAGE_SIZE, |_| FrameMeta::default()).unwrap(); let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback); unsafe { let mut cursor = pt.cursor_mut(&from).unwrap(); for page in to { - cursor.map(page.clone().into(), prop); + cursor.map(page.into(), prop); } } for (item, i) in pt.cursor(&from).unwrap().zip(from_ppn) { diff --git a/ostd/src/task/kernel_stack.rs b/ostd/src/task/kernel_stack.rs index 9d916c198..7efd19cfe 100644 --- a/ostd/src/task/kernel_stack.rs +++ b/ostd/src/task/kernel_stack.rs @@ -47,13 +47,14 @@ impl KernelStack { let mut new_kvirt_area = KVirtArea::::new(KERNEL_STACK_SIZE + 4 * PAGE_SIZE); let mapped_start = new_kvirt_area.range().start + 2 * PAGE_SIZE; let mapped_end = mapped_start + KERNEL_STACK_SIZE; - let pages = allocator::alloc(KERNEL_STACK_SIZE, |_| KernelStackMeta::default()).unwrap(); + let pages = + allocator::alloc_contiguous(KERNEL_STACK_SIZE, |_| KernelStackMeta::default()).unwrap(); let prop = PageProperty { flags: PageFlags::RW, cache: CachePolicy::Writeback, priv_flags: PrivilegedPageFlags::empty(), }; - new_kvirt_area.map_pages(mapped_start..mapped_end, pages.iter().cloned(), prop); + new_kvirt_area.map_pages(mapped_start..mapped_end, pages, prop); Ok(Self { kvirt_area: new_kvirt_area,