diff --git a/Cargo.lock b/Cargo.lock index 2649f50d9..f31be35e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -211,6 +211,7 @@ dependencies = [ "libflate", "log", "lru", + "osdk-frame-allocator", "ostd", "paste", "rand", @@ -1254,6 +1255,14 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" +[[package]] +name = "osdk-frame-allocator" +version = "0.1.0" +dependencies = [ + "log", + "ostd", +] + [[package]] name = "osdk-test-kernel" version = "0.11.3" diff --git a/Cargo.toml b/Cargo.toml index f48c5dac3..d4f85f477 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,7 @@ [workspace] resolver = "2" members = [ + "osdk/deps/frame-allocator", "osdk/deps/test-kernel", "ostd", "ostd/libs/align_ext", diff --git a/Makefile b/Makefile index 8df4b826e..478bca236 100644 --- a/Makefile +++ b/Makefile @@ -144,6 +144,7 @@ NON_OSDK_CRATES := \ # In contrast, OSDK crates depend on OSTD (or being `ostd` itself) # and need to be built or tested with OSDK. OSDK_CRATES := \ + osdk/deps/frame-allocator \ osdk/deps/test-kernel \ ostd \ ostd/libs/linux-bzimage/setup \ diff --git a/kernel/Cargo.toml b/kernel/Cargo.toml index 06767c03c..58d494e2c 100644 --- a/kernel/Cargo.toml +++ b/kernel/Cargo.toml @@ -19,6 +19,7 @@ aster-virtio = { path = "comps/virtio" } aster-rights = { path = "libs/aster-rights" } component = { path = "libs/comp-sys/component" } controlled = { path = "libs/comp-sys/controlled" } +osdk-frame-allocator = { path = "../osdk/deps/frame-allocator" } ostd = { path = "../ostd" } typeflags = { path = "libs/typeflags" } typeflags-util = { path = "libs/typeflags-util" } diff --git a/kernel/src/fs/procfs/meminfo.rs b/kernel/src/fs/procfs/meminfo.rs index 1ceef60cc..3783ee381 100644 --- a/kernel/src/fs/procfs/meminfo.rs +++ b/kernel/src/fs/procfs/meminfo.rs @@ -8,8 +8,6 @@ use alloc::format; -use ostd::mm::stat; - use crate::{ fs::{ procfs::template::{FileOps, ProcFileBuilder}, @@ -27,21 +25,17 @@ impl MemInfoFileOps { } } -/// Total memory in the entire system in bytes. -fn mem_total() -> usize { - stat::mem_total() -} - -/// An estimation of how much memory is available for starting new -/// applications, without disk operations. -fn mem_available() -> usize { - stat::mem_available() -} - impl FileOps for MemInfoFileOps { fn data(&self) -> Result> { - let total = mem_total() / 1024; - let available = mem_available() / 1024; + // The total amount of physical memory available to the system. + let total = crate::vm::mem_total(); + // An estimation of how much memory is available for starting new + // applications, without disk operations. + let available = osdk_frame_allocator::load_total_free_size(); + + // Convert the values to KiB. + let total = total / 1024; + let available = available / 1024; let free = total - available; let output = format!( "MemTotal:\t{} kB\nMemFree:\t{} kB\nMemAvailable:\t{} kB\n", diff --git a/kernel/src/syscall/sysinfo.rs b/kernel/src/syscall/sysinfo.rs index b3e6c6f7e..dad7ed620 100644 --- a/kernel/src/syscall/sysinfo.rs +++ b/kernel/src/syscall/sysinfo.rs @@ -1,7 +1,6 @@ // SPDX-License-Identifier: MPL-2.0 use aster_time::read_monotonic_time; -use ostd::mm::stat::{mem_available, mem_total}; use super::SyscallReturn; use crate::prelude::*; @@ -26,8 +25,8 @@ pub struct sysinfo { pub fn sys_sysinfo(sysinfo_addr: Vaddr, ctx: &Context) -> Result { let info = sysinfo { uptime: read_monotonic_time().as_secs() as i64, - totalram: mem_total() as u64, - freeram: mem_available() as u64, + totalram: crate::vm::mem_total() as u64, + freeram: osdk_frame_allocator::load_total_free_size() as u64, ..Default::default() // TODO: add other system information }; ctx.user_space().write_val(sysinfo_addr, &info)?; diff --git a/kernel/src/vm/mod.rs b/kernel/src/vm/mod.rs index 6a932007f..d29e02503 100644 --- a/kernel/src/vm/mod.rs +++ b/kernel/src/vm/mod.rs @@ -16,8 +16,27 @@ //! In Asterinas, VMARs and VMOs, as well as other capabilities, are implemented //! as zero-cost capabilities. +use osdk_frame_allocator::FrameAllocator; + pub mod page_fault_handler; pub mod perms; pub mod util; pub mod vmar; pub mod vmo; + +#[ostd::global_frame_allocator] +static FRAME_ALLOCATOR: FrameAllocator = FrameAllocator; + +/// Total physical memory in the entire system in bytes. +pub fn mem_total() -> usize { + use ostd::boot::{boot_info, memory_region::MemoryRegionType}; + + let regions = &boot_info().memory_regions; + let total = regions + .iter() + .filter(|region| region.typ() == MemoryRegionType::Usable) + .map(|region| region.len()) + .sum::(); + + total +} diff --git a/osdk/deps/frame-allocator/Cargo.toml b/osdk/deps/frame-allocator/Cargo.toml new file mode 100644 index 000000000..de281c676 --- /dev/null +++ b/osdk/deps/frame-allocator/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "osdk-frame-allocator" +version = "0.1.0" +edition = "2021" + +[dependencies] +log = "0.4" +ostd = { version = "0.11.1", path = "../../../ostd" } + +[lints] +workspace = true diff --git a/osdk/deps/frame-allocator/src/allocator.rs b/osdk/deps/frame-allocator/src/allocator.rs new file mode 100644 index 000000000..9334634f1 --- /dev/null +++ b/osdk/deps/frame-allocator/src/allocator.rs @@ -0,0 +1,293 @@ +// SPDX-License-Identifier: MPL-2.0 + +use core::{ + alloc::Layout, + cell::RefCell, + ops::DerefMut, + sync::atomic::{AtomicUsize, Ordering}, +}; + +use ostd::{ + cpu::{all_cpus, PinCurrentCpu}, + cpu_local, + mm::{frame::GlobalFrameAllocator, Paddr, PAGE_SIZE}, + sync::{LocalIrqDisabled, SpinLock}, + trap, +}; + +use crate::chunk::{size_of_order, BuddyOrder}; + +use super::set::BuddySet; + +/// The global free buddies. +static GLOBAL_POOL: SpinLock, LocalIrqDisabled> = + SpinLock::new(BuddySet::new_empty()); +static GLOBAL_POOL_SIZE: AtomicUsize = AtomicUsize::new(0); + +// CPU-local free buddies. +cpu_local! { + static LOCAL_POOL: RefCell> = RefCell::new(BuddySet::new_empty()); + static LOCAL_POOL_SIZE: AtomicUsize = AtomicUsize::new(0); +} + +/// Maximum supported order of the buddy system. +/// +/// i.e., it is the number of classes of free blocks. It determines the +/// maximum size of each allocation. +/// +/// A maximum buddy order of 32 supports up to 4KiB*2^31 = 8 TiB of chunks. +const MAX_BUDDY_ORDER: BuddyOrder = 32; + +/// Maximum supported order of the buddy system for CPU-local buddy system. +/// +/// Since large blocks are rarely allocated, caching such blocks will lead +/// to much fragmentation. +/// +/// Lock guards are also allocated on stack. We can limit the stack usage +/// for common paths in this way. +/// +/// A maximum local buddy order of 18 supports up to 4KiB*2^17 = 512 MiB of +/// chunks. +const MAX_LOCAL_BUDDY_ORDER: BuddyOrder = 18; + +/// The global frame allocator provided by OSDK. +/// +/// It is a singleton that provides frame allocation for the kernel. If +/// multiple instances of this struct are created, all the member functions +/// will eventually access the same allocator. +pub struct FrameAllocator; + +impl GlobalFrameAllocator for FrameAllocator { + fn alloc(&self, layout: Layout) -> Option { + let irq_guard = trap::disable_local(); + let local_pool_cell = LOCAL_POOL.get_with(&irq_guard); + let mut local_pool = local_pool_cell.borrow_mut(); + + let size_order = greater_order_of(layout.size()); + let align_order = greater_order_of(layout.align()); + + let order = size_order.max(align_order); + let mut chunk_addr = None; + + if order < MAX_LOCAL_BUDDY_ORDER { + chunk_addr = local_pool.alloc_chunk(order); + } + + // Fall back to the global free lists if the local free lists are empty. + if chunk_addr.is_none() { + chunk_addr = alloc_from_global_pool(order); + } + // TODO: On memory pressure the global pool may be not enough. We may need + // to merge all buddy chunks from the local pools to the global pool and + // try again. + + // If the alignment order is larger than the size order, we need to split + // the chunk and return the rest part back to the free lists. + if align_order > size_order { + if let Some(chunk_addr) = chunk_addr { + let addr = chunk_addr + size_of_order(size_order); + let size = size_of_order(align_order) - size_of_order(size_order); + self.add_free_memory(addr, size); + } + } else { + balancing::balance(local_pool.deref_mut()); + } + + LOCAL_POOL_SIZE + .get_on_cpu(irq_guard.current_cpu()) + .store(local_pool.total_size(), Ordering::Relaxed); + + chunk_addr + } + + fn add_free_memory(&self, mut addr: Paddr, mut size: usize) { + let irq_guard = trap::disable_local(); + let local_pool_cell = LOCAL_POOL.get_with(&irq_guard); + let mut local_pool = local_pool_cell.borrow_mut(); + + // Split the range into chunks and return them to the local free lists + // respectively. + while size > 0 { + let next_chunk_order = max_order_from(addr).min(lesser_order_of(size)); + + if next_chunk_order >= MAX_LOCAL_BUDDY_ORDER { + dealloc_to_global_pool(addr, next_chunk_order); + } else { + local_pool.insert_chunk(addr, next_chunk_order); + } + + size -= size_of_order(next_chunk_order); + addr += size_of_order(next_chunk_order); + } + + balancing::balance(local_pool.deref_mut()); + LOCAL_POOL_SIZE + .get_on_cpu(irq_guard.current_cpu()) + .store(local_pool.total_size(), Ordering::Relaxed); + } +} + +fn alloc_from_global_pool(order: BuddyOrder) -> Option { + let mut lock_guard = GLOBAL_POOL.lock(); + let res = lock_guard.alloc_chunk(order); + GLOBAL_POOL_SIZE.store(lock_guard.total_size(), Ordering::Relaxed); + res +} + +fn dealloc_to_global_pool(addr: Paddr, order: BuddyOrder) { + let mut lock_guard = GLOBAL_POOL.lock(); + lock_guard.insert_chunk(addr, order); + GLOBAL_POOL_SIZE.store(lock_guard.total_size(), Ordering::Relaxed); +} + +/// Loads the total size (in bytes) of free memory in the allocator. +pub fn load_total_free_size() -> usize { + let mut total = 0; + total += GLOBAL_POOL_SIZE.load(Ordering::Relaxed); + for cpu in all_cpus() { + total += LOCAL_POOL_SIZE.get_on_cpu(cpu).load(Ordering::Relaxed); + } + total +} + +/// Returns an order that covers at least the given size. +fn greater_order_of(size: usize) -> BuddyOrder { + let size = size / PAGE_SIZE; + size.next_power_of_two().trailing_zeros() as BuddyOrder +} + +/// Returns a order that covers at most the given size. +fn lesser_order_of(size: usize) -> BuddyOrder { + let size = size / PAGE_SIZE; + (usize::BITS - size.leading_zeros() - 1) as BuddyOrder +} + +/// Returns the maximum order starting from the address. +/// +/// If the start address is not aligned to the order, the address/order pair +/// cannot form a buddy chunk. +/// +/// # Panics +/// +/// Panics if the address is not page-aligned in debug mode. +fn max_order_from(addr: Paddr) -> BuddyOrder { + (addr.trailing_zeros() - PAGE_SIZE.trailing_zeros()) as BuddyOrder +} + +pub mod balancing { + //! Controlling the balancing between CPU-local free pools and the global free pool. + + use core::sync::atomic::Ordering; + + use ostd::cpu::num_cpus; + + use super::{ + lesser_order_of, BuddyOrder, BuddySet, GLOBAL_POOL, GLOBAL_POOL_SIZE, MAX_LOCAL_BUDDY_ORDER, + }; + + use crate::chunk::size_of_order; + + /// Controls the expected size of cache for each CPU-local free pool. + /// + /// The expected size will be the size of `GLOBAL_POOL` divided by the number + /// of the CPUs, and then divided by this constant. + const CACHE_EXPECTED_PORTION: usize = 2; + + /// Returns the expected size of cache for each CPU-local free pool. + /// + /// It depends on the size of the global free pool. + fn cache_expected_size(global_size: usize) -> usize { + global_size / num_cpus() / CACHE_EXPECTED_PORTION + } + + /// Controls the minimal size of cache for each CPU-local free pool. + /// + /// The minimal will be the expected size divided by this constant. + const CACHE_MINIMAL_PORTION: usize = 8; + + /// Returns the minimal size of cache for each CPU-local free pool. + /// + /// It depends on the size of the global free pool. + fn cache_minimal_size(global_size: usize) -> usize { + cache_expected_size(global_size) / CACHE_MINIMAL_PORTION + } + + /// Controls the maximal size of cache for each CPU-local free pool. + /// + /// The maximal will be the expected size multiplied by this constant. + const CACHE_MAXIMAL_MULTIPLIER: usize = 2; + + /// Returns the maximal size of cache for each CPU-local free pool. + /// + /// It depends on the size of the global free pool. + fn cache_maximal_size(global_size: usize) -> usize { + cache_expected_size(global_size) * CACHE_MAXIMAL_MULTIPLIER + } + + /// Balances a local cache and the global free pool. + pub fn balance(local: &mut BuddySet) { + let global_size = GLOBAL_POOL_SIZE.load(Ordering::Relaxed); + + let minimal_local_size = cache_minimal_size(global_size); + let expected_local_size = cache_expected_size(global_size); + let maximal_local_size = cache_maximal_size(global_size); + + let local_size = local.total_size(); + + if local_size >= maximal_local_size { + // Move local frames to the global pool. + if local_size == 0 { + return; + } + + let expected_removal = local_size - expected_local_size; + let lesser_order = lesser_order_of(expected_removal); + let mut global_pool_lock = GLOBAL_POOL.lock(); + + balance_to(local, &mut *global_pool_lock, lesser_order); + + GLOBAL_POOL_SIZE.store(global_pool_lock.total_size(), Ordering::Relaxed); + } else if local_size < minimal_local_size { + // Move global frames to the local pool. + if global_size == 0 { + return; + } + + let expected_allocation = expected_local_size - local_size; + let lesser_order = lesser_order_of(expected_allocation); + let mut global_pool_lock = GLOBAL_POOL.lock(); + + balance_to(&mut *global_pool_lock, local, lesser_order); + + GLOBAL_POOL_SIZE.store(global_pool_lock.total_size(), Ordering::Relaxed); + } + } + + /// Balances from `a` to `b`. + fn balance_to( + a: &mut BuddySet, + b: &mut BuddySet, + order: BuddyOrder, + ) { + let allocated_from_a = a.alloc_chunk(order); + + if let Some(addr) = allocated_from_a { + if order >= MAX_ORDER2 { + let inserted_order = MAX_ORDER2 - 1; + for i in 0..(1 << (order - inserted_order)) as usize { + let split_addr = addr + size_of_order(inserted_order) * i; + b.insert_chunk(split_addr, inserted_order); + } + } else { + b.insert_chunk(addr, order); + } + } else { + // Maybe the chunk size is too large. + // Try to reduce the order and balance again. + if order > 1 { + balance_to(a, b, order - 1); + balance_to(a, b, order - 1); + } + } + } +} diff --git a/osdk/deps/frame-allocator/src/chunk.rs b/osdk/deps/frame-allocator/src/chunk.rs new file mode 100644 index 000000000..ecc1017c4 --- /dev/null +++ b/osdk/deps/frame-allocator/src/chunk.rs @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: MPL-2.0 + +use ostd::{ + impl_frame_meta_for, + mm::{frame::linked_list::Link, Paddr, UniqueFrame, PAGE_SIZE}, +}; + +/// The order of a buddy chunk. +/// +/// The size of a buddy chunk is `(1 << order) * PAGE_SIZE`. +pub(crate) type BuddyOrder = usize; + +/// Returns the size of a buddy chunk of the given order. +pub(crate) const fn size_of_order(order: BuddyOrder) -> usize { + (1 << order) * PAGE_SIZE +} + +/// The metadata of the head frame in a free buddy chunk. +#[derive(Debug)] +pub(crate) struct FreeHeadMeta { + /// The order of the buddy chunk. + order: BuddyOrder, +} + +impl_frame_meta_for!(FreeHeadMeta); + +impl FreeHeadMeta { + /// Returns the order of the buddy chunk. + pub(crate) fn order(&self) -> BuddyOrder { + self.order + } +} + +/// A free buddy chunk. +#[derive(Debug)] +pub(crate) struct FreeChunk { + head: UniqueFrame>, +} + +impl FreeChunk { + /// Gets a buddy chunk from the head frame. + /// + /// The caller must ensure that the head frame should be uniquely free. + /// Otherwise it waits indefinitely. + /// + /// We need a unique ownership of this chunk. Other threads may be + /// deallocating it's buddy and inspecting this chunk (see + /// [`Self::buddy`]). So we may spuriously fail to acquire it. But + /// they will soon release it so we can acquire it ultimately. + pub(crate) fn from_free_head(head: UniqueFrame>) -> FreeChunk { + FreeChunk { head } + } + + /// Gets a buddy chunk from unused frames. + /// + /// # Panics + /// + /// Panics if: + /// - the range is not actually unused; + /// - the address is not aligned to the order. + pub(crate) fn from_unused(addr: Paddr, order: BuddyOrder) -> FreeChunk { + assert!(addr % size_of_order(order) == 0); + + let head = UniqueFrame::from_unused(addr, Link::new(FreeHeadMeta { order })) + .expect("The head frame is not unused"); + + #[cfg(debug_assertions)] + { + use ostd::mm::{ + frame::meta::{AnyFrameMeta, GetFrameError}, + Frame, + }; + + let end = addr + size_of_order(order); + for paddr in (addr + PAGE_SIZE..end).step_by(PAGE_SIZE) { + let Err(GetFrameError::Unused) = Frame::::from_in_use(paddr) + else { + panic!("The range is not actually unused"); + }; + } + } + + FreeChunk { head } + } + + /// Turns the free chunk into a pointer to the head frame. + pub(crate) fn into_unique_head(self) -> UniqueFrame> { + self.head + } + + /// Returns the order of the buddy chunk. + pub(crate) fn order(&self) -> BuddyOrder { + self.head.meta().order() + } + + /// Returns the address of the buddy chunk. + pub(crate) fn addr(&self) -> Paddr { + self.head.start_paddr() + } + + /// Gets the address of the buddy of this chunk. + pub(crate) fn buddy(&self) -> Paddr { + let addr = self.addr(); + let order = self.order(); + addr ^ size_of_order(order) + } + + /// Splits the buddy chunk into two smaller buddies. + /// + /// # Panics + /// + /// Panics if the buddy chunk is not uniquely free. + pub(crate) fn split_free(self) -> (FreeChunk, FreeChunk) { + let order = self.order(); + let addr = self.addr(); + let new_order = order - 1; + let left_child_addr = addr; + let right_child_addr = addr ^ size_of_order(new_order); + + let mut unique_head = self.into_unique_head(); + debug_assert_eq!(unique_head.start_paddr(), left_child_addr); + unique_head.meta_mut().order = new_order; + + let left_child = FreeChunk { head: unique_head }; + let right_child = FreeChunk { + head: UniqueFrame::from_unused( + right_child_addr, + Link::new(FreeHeadMeta { order: new_order }), + ) + .expect("Tail frames are not unused"), + }; + (left_child, right_child) + } + + /// Merges the buddy chunk with the sibling buddy. + /// + /// # Panics + /// + /// Panics if either the buddy chunks are not free or not buddies. + pub(crate) fn merge_free(mut self, mut buddy: FreeChunk) -> FreeChunk { + if self.addr() > buddy.addr() { + core::mem::swap(&mut self, &mut buddy); + } + + let order = self.order(); + let addr = self.addr(); + let buddy_order = buddy.order(); + let buddy_addr = buddy.addr(); + + buddy.into_unique_head().reset_as_unused(); // This will "drop" the frame without up-calling us. + + assert_eq!(order, buddy_order); + assert_eq!(addr ^ size_of_order(order), buddy_addr); + let new_order = order + 1; + let mut unique_head = self.into_unique_head(); + unique_head.meta_mut().order = new_order; + FreeChunk { head: unique_head } + } +} + +#[cfg(ktest)] +mod test { + use super::*; + use crate::test::MockMemoryRegion; + use ostd::prelude::ktest; + + #[ktest] + fn test_free_chunk_ops() { + let order = 3; + let size = size_of_order(order); + let region = MockMemoryRegion::alloc(size); + let addr1 = region.start_paddr(); + let addr2 = addr1 + size_of_order(order - 2); + let addr3 = addr1 + size_of_order(order - 2) * 2; + + let chunk = FreeChunk::from_unused(addr1, order); + assert_eq!(chunk.order(), order); + assert_eq!(chunk.addr(), addr1); + assert_eq!(chunk.buddy(), addr1 ^ size); + + let (left, right) = chunk.split_free(); + + assert_eq!(left.order(), order - 1); + assert_eq!(left.addr(), addr1); + assert_eq!(left.buddy(), addr3); + + assert_eq!(right.order(), order - 1); + assert_eq!(right.addr(), addr3); + assert_eq!(right.buddy(), addr1); + + let (r1, r2) = left.split_free(); + + assert_eq!(r1.order(), order - 2); + assert_eq!(r1.addr(), addr1); + assert_eq!(r1.buddy(), addr2); + + assert_eq!(r2.order(), order - 2); + assert_eq!(r2.addr(), addr2); + assert_eq!(r2.buddy(), addr1); + + let left = r1.merge_free(r2); + let chunk = left.merge_free(right); + assert_eq!(chunk.order(), order); + assert_eq!(chunk.addr(), addr1); + + chunk.into_unique_head().reset_as_unused(); + } +} diff --git a/osdk/deps/frame-allocator/src/lib.rs b/osdk/deps/frame-allocator/src/lib.rs new file mode 100644 index 000000000..fc7ff5bd0 --- /dev/null +++ b/osdk/deps/frame-allocator/src/lib.rs @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: MPL-2.0 + +#![no_std] +#![deny(unsafe_code)] + +//! An implementation of the global physical memory frame allocator for +//! [OSTD](https://crates.io/crates/ostd) based kernels. +//! +//! # Background +//! +//! `OSTD` has provided a page allocator interface, namely [`GlobalFrameAllocator`] +//! and [`global_frame_allocator`] procedure macro, allowing users to plug in +//! their own frame allocator into the kernel safely. You can refer to the +//! [`ostd::mm::frame::allocator`] module for detailed introduction. +//! +//! # Introduction +//! +//! This crate is an implementation of a scalable and efficient global frame +//! allocator based on the buddy system. It is by default shipped with OSDK +//! for users that don't have special requirements on the frame allocator. +//! +//! [`GlobalFrameAllocator`]: ostd::mm::GlobalFrameAllocator +//! [`global_frame_allocator`]: ostd::global_frame_allocator + +mod allocator; +mod chunk; +mod set; + +#[cfg(ktest)] +mod test; + +pub use allocator::{load_total_free_size, FrameAllocator}; diff --git a/osdk/deps/frame-allocator/src/set.rs b/osdk/deps/frame-allocator/src/set.rs new file mode 100644 index 000000000..4e576a201 --- /dev/null +++ b/osdk/deps/frame-allocator/src/set.rs @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: MPL-2.0 + +use ostd::mm::{frame::linked_list::LinkedList, Paddr}; + +use crate::chunk::{size_of_order, BuddyOrder, FreeChunk, FreeHeadMeta}; + +/// A set of free buddy chunks. +pub(crate) struct BuddySet { + /// The sum of the sizes of all free chunks. + total_size: usize, + /// The lists of free buddy chunks for each orders. + lists: [LinkedList; MAX_ORDER], +} + +impl BuddySet { + /// Creates a new empty set of free lists. + pub(crate) const fn new_empty() -> Self { + Self { + total_size: 0, + lists: [const { LinkedList::new() }; MAX_ORDER], + } + } + + /// Gets the total size of free chunks. + pub(crate) fn total_size(&self) -> usize { + self.total_size + } + + /// Inserts a free chunk into the set. + pub(crate) fn insert_chunk(&mut self, addr: Paddr, order: BuddyOrder) { + debug_assert!(order < MAX_ORDER); + + let inserted_size = size_of_order(order); + let mut chunk = FreeChunk::from_unused(addr, order); + + let order = chunk.order(); + // Coalesce the chunk with its buddy whenever possible. + for (i, list) in self.lists.iter_mut().enumerate().skip(order) { + if i + 1 >= MAX_ORDER { + // The chunk is already the largest one. + break; + } + let buddy_addr = chunk.buddy(); + let Some(mut cursor) = list.cursor_mut_at(buddy_addr) else { + // The buddy is not in this free list, so we can't coalesce. + break; + }; + let taken = cursor.take_current().unwrap(); + debug_assert_eq!(buddy_addr, taken.start_paddr()); + chunk = chunk.merge_free(FreeChunk::from_free_head(taken)); + } + // Insert the coalesced chunk into the free lists. + let order = chunk.order(); + self.lists[order].push_front(chunk.into_unique_head()); + + self.total_size += inserted_size; + } + + /// Allocates a chunk from the set. + /// + /// The function will choose and remove a buddy chunk of the given order + /// from the set. The address of the chunk will be returned. + pub(crate) fn alloc_chunk(&mut self, order: BuddyOrder) -> Option { + // Find the first non-empty size class larger than the requested order. + let mut non_empty = None; + for (i, list) in self.lists.iter_mut().enumerate().skip(order) { + if !list.is_empty() { + non_empty = Some(i); + break; + } + } + let non_empty = non_empty?; + let mut chunk = { + let head = self.lists[non_empty].pop_front().unwrap(); + debug_assert_eq!(head.meta().order(), non_empty as BuddyOrder); + + Some(FreeChunk::from_free_head(head)) + }; + + // Split the chunk. + for i in (order + 1..=non_empty).rev() { + let (left_sub, right_sub) = chunk.take().unwrap().split_free(); + // Push the right sub-chunk back to the free lists. + let right_sub = right_sub.into_unique_head(); + debug_assert_eq!(right_sub.meta().order(), (i - 1) as BuddyOrder); + self.lists[i - 1].push_front(right_sub); + // Pass the left sub-chunk to the next iteration. + chunk = Some(left_sub); + } + + let allocated_size = size_of_order(order); + + self.total_size -= allocated_size; + + // The remaining chunk is the one we want. + let head_frame = chunk.take().unwrap().into_unique_head(); + let paddr = head_frame.start_paddr(); + head_frame.reset_as_unused(); // It will "drop" the frame without up-calling us. + Some(paddr) + } +} + +#[cfg(ktest)] +mod test { + use super::*; + use crate::test::MockMemoryRegion; + use ostd::prelude::ktest; + + #[ktest] + fn test_buddy_set_insert_alloc() { + let region_order = 4; + let region_size = size_of_order(region_order); + let region = MockMemoryRegion::alloc(region_size); + let region_start = region.start_paddr(); + + let mut set = BuddySet::<5>::new_empty(); + set.insert_chunk(region_start, region_order); + assert!(set.total_size() == region_size); + + // Allocating chunks of orders of 0, 0, 1, 2, 3 should be okay. + let chunk1 = set.alloc_chunk(0).unwrap(); + assert!(set.total_size() == region_size - size_of_order(0)); + let chunk2 = set.alloc_chunk(0).unwrap(); + assert!(set.total_size() == region_size - size_of_order(1)); + let chunk3 = set.alloc_chunk(1).unwrap(); + assert!(set.total_size() == region_size - size_of_order(2)); + let chunk4 = set.alloc_chunk(2).unwrap(); + assert!(set.total_size() == region_size - size_of_order(3)); + let chunk5 = set.alloc_chunk(3).unwrap(); + assert!(set.total_size() == 0); + + // Putting them back should enable us to allocate the original region. + set.insert_chunk(chunk3, 1); + assert!(set.total_size() == size_of_order(1)); + set.insert_chunk(chunk1, 0); + assert!(set.total_size() == size_of_order(0) + size_of_order(1)); + set.insert_chunk(chunk5, 3); + assert!(set.total_size() == size_of_order(0) + size_of_order(1) + size_of_order(3)); + set.insert_chunk(chunk2, 0); + assert!(set.total_size() == size_of_order(2) + size_of_order(3)); + set.insert_chunk(chunk4, 2); + assert!(set.total_size() == size_of_order(4)); + + let chunk = set.alloc_chunk(region_order).unwrap(); + assert!(chunk == region_start); + assert!(set.total_size() == 0); + } +} diff --git a/osdk/deps/frame-allocator/src/test.rs b/osdk/deps/frame-allocator/src/test.rs new file mode 100644 index 000000000..70ec8ed86 --- /dev/null +++ b/osdk/deps/frame-allocator/src/test.rs @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! Providing test utilities and high-level test cases for the frame allocator. + +use core::alloc::Layout; + +use ostd::{ + mm::{frame::GlobalFrameAllocator, FrameAllocOptions, Paddr, Segment, UniqueFrame, PAGE_SIZE}, + prelude::ktest, +}; + +use super::FrameAllocator; + +#[ktest] +fn frame_allocator_alloc_layout_match() { + assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap()); + assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 2, PAGE_SIZE).unwrap()); + assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 3, PAGE_SIZE).unwrap()); + assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 4, PAGE_SIZE).unwrap()); + + assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 2, PAGE_SIZE * 2).unwrap()); + assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 4, PAGE_SIZE * 4).unwrap()); + assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 8, PAGE_SIZE * 8).unwrap()); + assert_allocation_well_formed(Layout::from_size_align(PAGE_SIZE * 16, PAGE_SIZE * 16).unwrap()); +} + +#[track_caller] +fn assert_allocation_well_formed(layout: Layout) { + let instance = FrameAllocator; + + let allocated = instance.alloc(layout).unwrap(); + assert_eq!( + allocated % layout.align(), + 0, + "Allocation alignment mismatch" + ); + + for offset in (0..layout.size()).step_by(PAGE_SIZE) { + let frame = allocated + offset; + let frame = UniqueFrame::from_unused(frame, ()).unwrap_or_else(|e| { + panic!( + "Metadata not well-formed after allocation at offset {:#x}: {:#?}", + offset, e + ) + }); + frame.reset_as_unused(); + } + + instance.add_free_memory(allocated, layout.size()); +} + +/// A mocked memory region for testing. +/// +/// All the frames in the returned memory region will be marked as used. +/// When the region is dropped, all the frames will be returned to the global +/// frame allocator. If any frame is not unused by that time, the drop will panic. +pub(crate) struct MockMemoryRegion { + addr: Paddr, + size: usize, +} + +impl MockMemoryRegion { + /// Gets a memory region for testing. + pub(crate) fn alloc(size: usize) -> Self { + let seg = FrameAllocOptions::new() + .alloc_segment(size / PAGE_SIZE) + .unwrap(); + let addr = seg.start_paddr(); + for frame in seg { + UniqueFrame::try_from(frame).unwrap().reset_as_unused(); + } + Self { addr, size } + } + + /// Gets the start address of the memory region. + pub(crate) fn start_paddr(&self) -> Paddr { + self.addr + } +} + +impl Drop for MockMemoryRegion { + fn drop(&mut self) { + let seg = Segment::from_unused(self.addr..self.addr + self.size, |_| ()).unwrap(); + drop(seg); + } +} diff --git a/osdk/src/base_crate/main.rs.template b/osdk/src/base_crate/main.rs.template index 9b5503858..fe31d285c 100644 --- a/osdk/src/base_crate/main.rs.template +++ b/osdk/src/base_crate/main.rs.template @@ -1,6 +1,8 @@ #![no_std] #![no_main] +#![feature(linkage)] + extern crate #TARGET_NAME#; #[panic_handler] @@ -10,3 +12,12 @@ fn panic(info: &core::panic::PanicInfo) -> ! { } unsafe { __ostd_panic_handler(info); } } + +use ostd::mm::frame::GlobalFrameAllocator; + +use osdk_frame_allocator::FrameAllocator; +static FRAME_ALLOCATOR: FrameAllocator = FrameAllocator; + +#[no_mangle] +#[linkage = "weak"] +static __GLOBAL_FRAME_ALLOCATOR_REF: &'static dyn GlobalFrameAllocator = &FRAME_ALLOCATOR; diff --git a/osdk/src/base_crate/mod.rs b/osdk/src/base_crate/mod.rs index 43f7d1267..4d32913f9 100644 --- a/osdk/src/base_crate/mod.rs +++ b/osdk/src/base_crate/mod.rs @@ -57,8 +57,8 @@ pub enum BaseCrateType { /// Create a new base crate that will be built by cargo. /// /// The dependencies of the base crate will be the target crate. If -/// `link_unit_test_runner` is set to true, the base crate will also depend on -/// the `ostd-test-runner` crate. +/// `link_unit_test_kernel` is set to true, the base crate will also depend on +/// the `ostd-test-kernel` crate. /// /// It returns the path to the base crate. pub fn new_base_crate( @@ -66,7 +66,7 @@ pub fn new_base_crate( base_crate_path_stem: impl AsRef, dep_crate_name: &str, dep_crate_path: impl AsRef, - link_unit_test_runner: bool, + link_unit_test_kernel: bool, ) -> PathBuf { let base_crate_path: PathBuf = PathBuf::from( (base_crate_path_stem.as_ref().as_os_str().to_string_lossy() @@ -85,7 +85,7 @@ pub fn new_base_crate( &base_crate_tmp_path, dep_crate_name, &dep_crate_path, - link_unit_test_runner, + link_unit_test_kernel, ); let cargo_result = are_files_identical( &base_crate_path.join("Cargo.toml"), @@ -105,7 +105,7 @@ pub fn new_base_crate( &base_crate_path, dep_crate_name, dep_crate_path, - link_unit_test_runner, + link_unit_test_kernel, ); base_crate_path @@ -115,7 +115,7 @@ fn do_new_base_crate( base_crate_path: impl AsRef, dep_crate_name: &str, dep_crate_path: impl AsRef, - link_unit_test_runner: bool, + link_unit_test_kernel: bool, ) { let workspace_root = { let meta = get_cargo_metadata(None::<&str>, None::<&[&str]>).unwrap(); @@ -182,7 +182,7 @@ fn do_new_base_crate( fs::write("src/main.rs", main_rs).unwrap(); // Add dependencies to the Cargo.toml - add_manifest_dependency(dep_crate_name, dep_crate_path, link_unit_test_runner); + add_manifest_dependency(dep_crate_name, dep_crate_path, link_unit_test_kernel); // Copy the manifest configurations from the target crate to the base crate copy_profile_configurations(workspace_root); @@ -197,7 +197,7 @@ fn do_new_base_crate( fn add_manifest_dependency( crate_name: &str, crate_path: impl AsRef, - link_unit_test_runner: bool, + link_unit_test_kernel: bool, ) { let manifest_path = "Cargo.toml"; @@ -224,31 +224,47 @@ fn add_manifest_dependency( .unwrap(); dependencies.as_table_mut().unwrap().extend(target_dep); - if link_unit_test_runner { - let dep_str = match option_env!("OSDK_LOCAL_DEV") { - Some("1") => { - let crate_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - let test_kernel_dir = crate_dir.join("deps").join("test-kernel"); - format!( - "osdk-test-kernel = {{ path = \"{}\" }}", - test_kernel_dir.display() - ) - } - _ => concat!( - "osdk-test-kernel = { version = \"", - env!("CARGO_PKG_VERSION"), - "\" }" - ) - .to_owned(), - }; - let test_runner_dep = toml::Table::from_str(&dep_str).unwrap(); - dependencies.as_table_mut().unwrap().extend(test_runner_dep); + if link_unit_test_kernel { + add_manifest_dependency_to( + dependencies, + "osdk-test-kernel", + Path::new("deps").join("test-kernel"), + ); } + add_manifest_dependency_to( + dependencies, + "osdk-frame-allocator", + Path::new("deps").join("frame-allocator"), + ); + + add_manifest_dependency_to(dependencies, "ostd", Path::new("..").join("ostd")); + let content = toml::to_string(&manifest).unwrap(); fs::write(manifest_path, content).unwrap(); } +fn add_manifest_dependency_to(manifest: &mut toml::Value, dep_name: &str, path: PathBuf) { + let dep_str = match option_env!("OSDK_LOCAL_DEV") { + Some("1") => { + let crate_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + let dep_crate_dir = crate_dir.join(path); + format!( + "{} = {{ path = \"{}\" }}", + dep_name, + dep_crate_dir.display() + ) + } + _ => format!( + "{} = {{ version = \"{}\" }}", + dep_name, + env!("CARGO_PKG_VERSION"), + ), + }; + let dep_val = toml::Table::from_str(&dep_str).unwrap(); + manifest.as_table_mut().unwrap().extend(dep_val); +} + fn copy_profile_configurations(workspace_root: impl AsRef) { let target_manifest_path = workspace_root.as_ref().join("Cargo.toml"); let manifest_path = "Cargo.toml"; diff --git a/ostd/libs/ostd-macros/src/lib.rs b/ostd/libs/ostd-macros/src/lib.rs index 4456a6e3d..68173d22a 100644 --- a/ostd/libs/ostd-macros/src/lib.rs +++ b/ostd/libs/ostd-macros/src/lib.rs @@ -65,6 +65,43 @@ pub fn test_main(_attr: TokenStream, item: TokenStream) -> TokenStream { .into() } +/// A macro attribute for the global frame allocator. +/// +/// The attributed static variable will be used to provide frame allocation +/// for the kernel. The variable should have type `ostd::mm::GlobalFrameAllocator`. +/// +/// # Example +/// +/// ```ignore +/// use ostd::{mm::{frame::GlobalFrameAllocator, Paddr}, global_frame_allocator}; +/// +/// // Of course it won't work because all allocations will fail. +/// // It's just an example. +/// #[global_frame_allocator] +/// static ALLOCATOR: MyFrameAllocator = MyFrameAllocator; +/// +/// struct MyFrameAllocator; +/// +/// impl GlobalFrameAllocator for MyFrameAllocator { +/// fn alloc(&self, _layout: Layout) -> Option { None } +/// fn dealloc(&self, _paddr: Paddr, _size: usize) {} +/// } +/// ``` +#[proc_macro_attribute] +pub fn global_frame_allocator(_attr: TokenStream, item: TokenStream) -> TokenStream { + // Make a `static __GLOBAL_FRAME_ALLOCATOR_REF: &'static dyn GlobalFrameAllocator` + // That points to the annotated static variable. + let item = parse_macro_input!(item as syn::ItemStatic); + let static_name = &item.ident; + + quote!( + #[no_mangle] + static __GLOBAL_FRAME_ALLOCATOR_REF: &'static dyn ostd::mm::frame::GlobalFrameAllocator = &#static_name; + #item + ) + .into() +} + /// A macro attribute for the panic handler. /// /// The attributed function will be used to override OSTD's default diff --git a/ostd/src/lib.rs b/ostd/src/lib.rs index 79e5588d5..44a1c10c5 100644 --- a/ostd/src/lib.rs +++ b/ostd/src/lib.rs @@ -67,6 +67,13 @@ pub use self::{error::Error, prelude::Result}; #[doc(hidden)] unsafe fn init() { arch::enable_cpu_features(); + + // SAFETY: This function is called only once, before `allocator::init` + // and after memory regions are initialized. + unsafe { + mm::frame::allocator::init_early_allocator(); + } + arch::serial::init(); #[cfg(feature = "cvm_guest")] diff --git a/ostd/src/mm/frame/allocator.rs b/ostd/src/mm/frame/allocator.rs index d706f31f9..049d1b285 100644 --- a/ostd/src/mm/frame/allocator.rs +++ b/ostd/src/mm/frame/allocator.rs @@ -2,18 +2,18 @@ //! The physical memory allocator. +use core::{alloc::Layout, ops::Range}; + use align_ext::AlignExt; -use buddy_system_allocator::FrameAllocator; -use log::info; -use spin::Once; use super::{meta::AnyFrameMeta, segment::Segment, Frame}; use crate::{ boot::memory_region::MemoryRegionType, error::Error, + impl_frame_meta_for, mm::{paddr_to_vaddr, Paddr, PAGE_SIZE}, prelude::*, - sync::SpinLock, + util::range_difference, }; /// Options for allocating physical memory frames. @@ -52,16 +52,9 @@ impl FrameAllocOptions { /// Allocates a single frame with additional metadata. pub fn alloc_frame_with(&self, metadata: M) -> Result> { - let frame = FRAME_ALLOCATOR - .get() - .unwrap() - .disable_irq() - .lock() - .alloc(1) - .map(|idx| { - let paddr = idx * PAGE_SIZE; - Frame::from_unused(paddr, metadata).unwrap() - }) + let single_layout = Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap(); + let frame = alloc_upcall(single_layout) + .map(|paddr| Frame::from_unused(paddr, metadata).unwrap()) .ok_or(Error::NoMemory)?; if self.zeroed { @@ -93,18 +86,10 @@ impl FrameAllocOptions { if nframes == 0 { return Err(Error::InvalidArgs); } - let segment = FRAME_ALLOCATOR - .get() - .unwrap() - .disable_irq() - .lock() - .alloc(nframes) + let layout = Layout::from_size_align(nframes * PAGE_SIZE, PAGE_SIZE).unwrap(); + let segment = alloc_upcall(layout) .map(|start| { - Segment::from_unused( - start * PAGE_SIZE..start * PAGE_SIZE + nframes * PAGE_SIZE, - metadata_fn, - ) - .unwrap() + Segment::from_unused(start..start + nframes * PAGE_SIZE, metadata_fn).unwrap() }) .ok_or(Error::NoMemory)?; @@ -140,73 +125,236 @@ fn test_alloc_dealloc() { } } -/// FrameAllocator with a counter for allocated memory -pub(in crate::mm) struct CountingFrameAllocator { - allocator: FrameAllocator, - total: usize, - allocated: usize, +/// The trait for the global frame allocator. +/// +/// OSTD allows a customized frame allocator by the [`global_frame_allocator`] +/// attribute, which marks a static variable of this type. +/// +/// The API mimics the standard Rust allocator API ([`GlobalAlloc`] and +/// [`global_allocator`]). However, this trait is much safer. Double free +/// or freeing in-use memory through this trait only mess up the allocator's +/// state rather than causing undefined behavior. +/// +/// Whenever OSTD or other modules need to allocate or deallocate frames via +/// [`FrameAllocOptions`], they are forwarded to the global frame allocator. +/// It is not encoraged to call the global allocator directly. +/// +/// [`global_frame_allocator`]: crate::global_frame_allocator +/// [`GlobalAlloc`]: core::alloc::GlobalAlloc +pub trait GlobalFrameAllocator: Sync { + /// Allocates a contiguous range of frames. + /// + /// The caller guarantees that `layout.size()` is aligned to [`PAGE_SIZE`]. + /// + /// When the allocated memory is not in use, OSTD return them by calling + /// [`GlobalFrameAllocator::add_free_memory`]. + fn alloc(&self, layout: Layout) -> Option; + + /// Adds a contiguous range of frames to the allocator. + /// + /// The caller guarantees that `addr` and `size` are both aligned to + /// [`PAGE_SIZE`]. The added memory can be uninitialized. + /// + /// The memory being added would never overlap with any memory that is + /// already added, i.e., a frame cannot be added twice without being + /// allocated in between. + /// + /// However, if [`GlobalFrameAllocator::alloc`] returns multiple frames, + /// it is possible that some of them are added back before others. + fn add_free_memory(&self, addr: Paddr, size: usize); } -impl CountingFrameAllocator { - pub fn new(allocator: FrameAllocator, total: usize) -> Self { - CountingFrameAllocator { - allocator, - total, - allocated: 0, - } - } - - pub fn alloc(&mut self, count: usize) -> Option { - match self.allocator.alloc(count) { - Some(value) => { - self.allocated += count * PAGE_SIZE; - Some(value) - } - None => None, - } - } - - // TODO: this method should be marked unsafe as invalid arguments will mess - // up the underlying allocator. - pub fn dealloc(&mut self, start_frame: usize, count: usize) { - self.allocator.dealloc(start_frame, count); - self.allocated -= count * PAGE_SIZE; - } - - pub fn mem_total(&self) -> usize { - self.total - } - - pub fn mem_available(&self) -> usize { - self.total - self.allocated - } +extern "Rust" { + /// The global frame allocator's reference exported by + /// [`crate::global_frame_allocator`]. + static __GLOBAL_FRAME_ALLOCATOR_REF: &'static dyn GlobalFrameAllocator; } -pub(in crate::mm) static FRAME_ALLOCATOR: Once> = Once::new(); +/// Directly allocates a contiguous range of frames. +fn alloc_upcall(layout: core::alloc::Layout) -> Option { + // SAFETY: We believe that the global frame allocator is set up correctly + // with the `global_frame_allocator` attribute. If they use safe code only + // then the up-call is safe. + unsafe { __GLOBAL_FRAME_ALLOCATOR_REF.alloc(layout) } +} -pub(crate) fn init() { +/// Up-call to add a range of frames to the global frame allocator. +/// +/// It would return the frame to the allocator for further use. This would like +/// to be done after the release of the metadata to avoid re-allocation before +/// the metadata is reset. +pub(super) fn add_free_memory_upcall(addr: Paddr, size: usize) { + // SAFETY: We believe that the global frame allocator is set up correctly + // with the `global_frame_allocator` attribute. If they use safe code only + // then the up-call is safe. + unsafe { __GLOBAL_FRAME_ALLOCATOR_REF.add_free_memory(addr, size) } +} + +/// Initializes the global frame allocator. +/// +/// It just does adds the frames to the global frame allocator. Calling it +/// multiple times would be not safe. +/// +/// # Safety +/// +/// This function should be called only once. +pub(crate) unsafe fn init() { let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions; - let mut total: usize = 0; - let mut allocator = FrameAllocator::<32>::new(); + + // Retire the early allocator. + let early_allocator = EARLY_ALLOCATOR.lock().take().unwrap(); + let (range_1, range_2) = early_allocator.allocated_regions(); + for region in regions.iter() { if region.typ() == MemoryRegionType::Usable { - // Make the memory region page-aligned, and skip if it is too small. - let start = region.base().align_up(PAGE_SIZE) / PAGE_SIZE; - let region_end = region.base().checked_add(region.len()).unwrap(); - let end = region_end.align_down(PAGE_SIZE) / PAGE_SIZE; - if end <= start { - continue; - } + debug_assert!(region.base() % PAGE_SIZE == 0); + debug_assert!(region.len() % PAGE_SIZE == 0); + // Add global free pages to the frame allocator. - allocator.add_frame(start, end); - total += (end - start) * PAGE_SIZE; - info!( - "Found usable region, start:{:x}, end:{:x}", - region.base(), - region.base() + region.len() - ); + // Truncate the early allocated frames if there is an overlap. + for r1 in range_difference(&(region.base()..region.end()), &range_1) { + for r2 in range_difference(&r1, &range_2) { + log::info!("Adding free frames to the allocator: {:x?}", r2); + add_free_memory_upcall(r2.start, r2.len()); + } + } } } - let counting_allocator = CountingFrameAllocator::new(allocator, total); - FRAME_ALLOCATOR.call_once(|| SpinLock::new(counting_allocator)); +} + +/// An allocator in the early boot phase when frame metadata is not available. +pub(super) struct EarlyFrameAllocator { + // We need to allocate from under 4G first since the linear mapping for + // the higher region is not constructed yet. + under_4g_range: Range, + under_4g_end: Paddr, + + // And also sometimes 4G is not enough for early phase. This, if not `0..0`, + // is the largest region above 4G. + max_range: Range, + max_end: Paddr, +} + +/// The global frame allocator in the early boot phase. +/// +/// It is used to allocate frames before the frame metadata is initialized. +/// The allocated frames are not tracked by the frame metadata. After the +/// metadata is initialized with [`super::meta::init`], the frames are tracked +/// with metadata and the early allocator is no longer used. +/// +/// This is protected by the [`spin::Mutex`] rather than [`crate::sync::SpinLock`] +/// since the latter uses CPU-local storage, which isn't available in the early +/// boot phase. So we must make sure that no interrupts are enabled when using +/// this allocator. +pub(super) static EARLY_ALLOCATOR: spin::Mutex> = + spin::Mutex::new(None); + +impl EarlyFrameAllocator { + /// Creates a new early frame allocator. + /// + /// It uses at most 2 regions, the first is the maximum usable region below + /// 4 GiB. The other is the maximum usable region above 4 GiB and is only + /// usable when linear mapping is constructed. + pub fn new() -> Self { + let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions; + + let mut under_4g_range = 0..0; + let mut max_range = 0..0; + for region in regions.iter() { + if region.typ() != MemoryRegionType::Usable { + continue; + } + const PADDR4G: Paddr = 0x1_0000_0000; + if region.base() < PADDR4G { + let range = region.base()..region.end().min(PADDR4G); + if range.len() > under_4g_range.len() { + under_4g_range = range; + } + } + if region.end() >= PADDR4G { + let range = region.base().max(PADDR4G)..region.end(); + if range.len() > max_range.len() { + max_range = range; + } + } + } + + log::debug!( + "Early frame allocator (below 4G) at: {:#x?}", + under_4g_range + ); + if !max_range.is_empty() { + log::debug!("Early frame allocator (above 4G) at: {:#x?}", max_range); + } + + Self { + under_4g_range: under_4g_range.clone(), + under_4g_end: under_4g_range.start, + max_range: max_range.clone(), + max_end: max_range.start, + } + } + + /// Allocates a contiguous range of frames. + pub fn alloc(&mut self, layout: Layout) -> Option { + let size = layout.size().align_up(PAGE_SIZE); + let allocated = self.under_4g_end.align_up(layout.align()); + if allocated + size <= self.under_4g_range.end { + // Allocated below 4G. + self.under_4g_end = allocated + size; + Some(allocated) + } else { + // Try above 4G. + let allocated = self.max_end.align_up(layout.align()); + if allocated + size <= self.max_range.end { + self.max_end = allocated + size; + } + Some(allocated) + } + } + + pub(super) fn allocated_regions(&self) -> (Range, Range) { + ( + self.under_4g_range.start..self.under_4g_end, + self.max_range.start..self.max_end, + ) + } +} + +/// Metadata for frames allocated in the early boot phase. +/// +/// Frames allocated with [`early_alloc`] are not immediately tracked with +/// frame metadata. But [`super::meta::init`] will track them later. +#[derive(Debug)] +pub(crate) struct EarlyAllocatedFrameMeta; + +impl_frame_meta_for!(EarlyAllocatedFrameMeta); + +/// Allocates a contiguous range of frames in the early boot phase. +/// +/// The early allocated frames will not be reclaimable, until the metadata is +/// initialized by [`super::meta::init`]. Then we can use [`Frame::from_raw`] +/// to free the frames. +/// +/// # Panics +/// +/// This function panics if: +/// - it is called before [`init_early_allocator`], +/// - or if is called after [`init`]. +pub(crate) fn early_alloc(layout: Layout) -> Option { + let mut early_allocator = EARLY_ALLOCATOR.lock(); + early_allocator.as_mut().unwrap().alloc(layout) +} + +/// Initializes the early frame allocator. +/// +/// [`early_alloc`] should be used after this initialization. After [`init`], the +/// early allocator. +/// +/// # Safety +/// +/// This function should be called only once after the memory regions are ready. +pub(crate) unsafe fn init_early_allocator() { + let mut early_allocator = EARLY_ALLOCATOR.lock(); + *early_allocator = Some(EarlyFrameAllocator::new()); } diff --git a/ostd/src/mm/frame/meta.rs b/ostd/src/mm/frame/meta.rs index 442dd8cf9..1c90a4fe7 100644 --- a/ostd/src/mm/frame/meta.rs +++ b/ostd/src/mm/frame/meta.rs @@ -39,10 +39,11 @@ pub(crate) mod mapping { } use core::{ + alloc::Layout, any::Any, cell::UnsafeCell, fmt::Debug, - mem::{size_of, MaybeUninit}, + mem::{size_of, ManuallyDrop, MaybeUninit}, result::Result, sync::atomic::{AtomicU64, Ordering}, }; @@ -50,16 +51,19 @@ use core::{ use align_ext::AlignExt; use log::info; -use super::{allocator, Segment}; use crate::{ arch::mm::PagingConsts, const_assert, mm::{ - kspace::LINEAR_MAPPING_BASE_VADDR, paddr_to_vaddr, page_size, page_table::boot_pt, - CachePolicy, Infallible, Paddr, PageFlags, PageProperty, PrivilegedPageFlags, Vaddr, - VmReader, PAGE_SIZE, + frame::allocator::{self, EarlyAllocatedFrameMeta}, + kspace::LINEAR_MAPPING_BASE_VADDR, + paddr_to_vaddr, page_size, + page_table::boot_pt, + CachePolicy, Infallible, Paddr, PageFlags, PageProperty, PrivilegedPageFlags, Segment, + Vaddr, VmReader, PAGE_SIZE, }, panic::abort, + util::range_difference, }; /// The maximum number of bytes of the metadata of a frame. @@ -383,16 +387,6 @@ impl MetaSlot { // `Release` pairs with the `Acquire` in `Frame::from_unused` and ensures // `drop_meta_in_place` won't be reordered after this memory store. self.ref_count.store(REF_COUNT_UNUSED, Ordering::Release); - - // Deallocate the frame. - // It would return the frame to the allocator for further use. This would be done - // after the release of the metadata to avoid re-allocation before the metadata - // is reset. - allocator::FRAME_ALLOCATOR - .get() - .unwrap() - .lock() - .dealloc(self.frame_paddr() / PAGE_SIZE, 1); } /// Drops the metadata of a slot in place. @@ -460,8 +454,6 @@ pub(crate) unsafe fn init() -> Segment { add_temp_linear_mapping(max_paddr); - super::MAX_PADDR.store(max_paddr, Ordering::Relaxed); - let tot_nr_frames = max_paddr / page_size::(1); let (nr_meta_pages, meta_pages) = alloc_meta_frames(tot_nr_frames); @@ -482,10 +474,33 @@ pub(crate) unsafe fn init() -> Segment { .unwrap(); // Now the metadata frames are mapped, we can initialize the metadata. - Segment::from_unused(meta_pages..meta_pages + nr_meta_pages * PAGE_SIZE, |_| { - MetaPageMeta {} - }) - .unwrap() + super::MAX_PADDR.store(max_paddr, Ordering::Relaxed); + + let meta_page_range = meta_pages..meta_pages + nr_meta_pages * PAGE_SIZE; + + let (range_1, range_2) = allocator::EARLY_ALLOCATOR + .lock() + .as_ref() + .unwrap() + .allocated_regions(); + for r in range_difference(&range_1, &meta_page_range) { + let early_seg = Segment::from_unused(r, |_| EarlyAllocatedFrameMeta).unwrap(); + let _ = ManuallyDrop::new(early_seg); + } + for r in range_difference(&range_2, &meta_page_range) { + let early_seg = Segment::from_unused(r, |_| EarlyAllocatedFrameMeta).unwrap(); + let _ = ManuallyDrop::new(early_seg); + } + + Segment::from_unused(meta_page_range, |_| MetaPageMeta {}).unwrap() +} + +/// Returns whether the global frame allocator is initialized. +pub(in crate::mm) fn is_initialized() -> bool { + // `init` sets it with relaxed ordering somewhere in the middle. But due + // to the safety requirement of the `init` function, we can assume that + // there is no race conditions. + super::MAX_PADDR.load(Ordering::Relaxed) != 0 } fn alloc_meta_frames(tot_nr_frames: usize) -> (usize, Paddr) { @@ -493,13 +508,10 @@ fn alloc_meta_frames(tot_nr_frames: usize) -> (usize, Paddr) { .checked_mul(size_of::()) .unwrap() .div_ceil(PAGE_SIZE); - let start_paddr = allocator::FRAME_ALLOCATOR - .get() - .unwrap() - .lock() - .alloc(nr_meta_pages) - .unwrap() - * PAGE_SIZE; + let start_paddr = allocator::early_alloc( + Layout::from_size_align(nr_meta_pages * PAGE_SIZE, PAGE_SIZE).unwrap(), + ) + .unwrap(); let slots = paddr_to_vaddr(start_paddr) as *mut MetaSlot; @@ -523,14 +535,6 @@ fn alloc_meta_frames(tot_nr_frames: usize) -> (usize, Paddr) { (nr_meta_pages, start_paddr) } -/// Returns whether the global frame allocator is initialized. -pub(in crate::mm) fn is_initialized() -> bool { - // `init` sets it somewhere in the middle. But due to the safety - // requirement of the `init` function, we can assume that there - // is no race condition. - super::MAX_PADDR.load(Ordering::Relaxed) != 0 -} - /// Adds a temporary linear mapping for the metadata frames. /// /// We only assume boot page table to contain 4G linear mapping. Thus if the diff --git a/ostd/src/mm/frame/mod.rs b/ostd/src/mm/frame/mod.rs index d9f2f3900..316af583d 100644 --- a/ostd/src/mm/frame/mod.rs +++ b/ostd/src/mm/frame/mod.rs @@ -44,6 +44,7 @@ use core::{ sync::atomic::{AtomicUsize, Ordering}, }; +pub use allocator::GlobalFrameAllocator; use meta::{mapping, AnyFrameMeta, GetFrameError, MetaSlot, REF_COUNT_UNUSED}; pub use segment::Segment; use untyped::{AnyUFrameMeta, UFrame}; @@ -220,6 +221,8 @@ impl Drop for Frame { // SAFETY: this is the last reference and is about to be dropped. unsafe { self.slot().drop_last_in_place() }; + + allocator::add_free_memory_upcall(self.start_paddr(), PAGE_SIZE); } } } diff --git a/ostd/src/mm/frame/unique.rs b/ostd/src/mm/frame/unique.rs index e2e1a6041..a9e8a9d31 100644 --- a/ostd/src/mm/frame/unique.rs +++ b/ostd/src/mm/frame/unique.rs @@ -99,6 +99,26 @@ impl UniqueFrame { unsafe { &mut *self.slot().dyn_meta_ptr() } } + /// Resets the frame to unused without up-calling the allocator. + /// + /// This is solely useful for the allocator implementation/testing and + /// is highly experimental. Usage of this function is discouraged. + /// + /// Usage of this function other than the allocator would actually leak + /// the frame since the allocator would not be aware of the frame. + // + // FIXME: We may have a better `Segment` and `UniqueSegment` design to + // allow the allocator hold the ownership of all the frames in a chunk + // instead of the head. Then this weird public API can be `#[cfg(ktest)]`. + pub fn reset_as_unused(self) { + let this = ManuallyDrop::new(self); + + this.slot().ref_count.store(0, Ordering::Release); + // SAFETY: We are the sole owner and the reference count is 0. + // The slot is initialized. + unsafe { this.slot().drop_last_in_place() }; + } + /// Converts this frame into a raw physical address. pub(crate) fn into_raw(self) -> Paddr { let this = ManuallyDrop::new(self); @@ -134,6 +154,8 @@ impl Drop for UniqueFrame { // SAFETY: We are the sole owner and the reference count is 0. // The slot is initialized. unsafe { self.slot().drop_last_in_place() }; + + super::allocator::add_free_memory_upcall(self.start_paddr(), PAGE_SIZE); } } diff --git a/ostd/src/mm/heap_allocator/mod.rs b/ostd/src/mm/heap_allocator/mod.rs index d49632213..69839d2fa 100644 --- a/ostd/src/mm/heap_allocator/mod.rs +++ b/ostd/src/mm/heap_allocator/mod.rs @@ -2,7 +2,10 @@ mod slab_allocator; -use core::alloc::{GlobalAlloc, Layout}; +use core::{ + alloc::{GlobalAlloc, Layout}, + mem::ManuallyDrop, +}; use align_ext::AlignExt; use log::debug; @@ -11,11 +14,11 @@ use spin::Once; use super::paddr_to_vaddr; use crate::{ - mm::{frame::allocator::FRAME_ALLOCATOR, PAGE_SIZE}, + impl_frame_meta_for, + mm::{FrameAllocOptions, PAGE_SIZE}, prelude::*, sync::SpinLock, trap::disable_local, - Error, }; #[global_allocator] @@ -49,6 +52,12 @@ struct LockedHeapWithRescue { heap: Once>, } +/// The metadata for the kernel heap frames. +#[derive(Debug)] +pub struct KernelHeapMeta; + +impl_frame_meta_for!(KernelHeapMeta); + impl LockedHeapWithRescue { /// Creates an new heap pub const fn new() -> Self { @@ -94,22 +103,26 @@ impl LockedHeapWithRescue { }; let allocation_start = { - let mut page_allocator = FRAME_ALLOCATOR.get().unwrap().lock(); - if num_frames >= MIN_NUM_FRAMES { - page_allocator.alloc(num_frames).ok_or(Error::NoMemory)? + let mut options = FrameAllocOptions::new(); + options.zeroed(false); + let segment = if num_frames >= MIN_NUM_FRAMES { + options + .alloc_segment_with(num_frames, |_| KernelHeapMeta) + .unwrap() } else { - match page_allocator.alloc(MIN_NUM_FRAMES) { - None => page_allocator.alloc(num_frames).ok_or(Error::NoMemory)?, - Some(start) => { + match options.alloc_segment_with(MIN_NUM_FRAMES, |_| KernelHeapMeta) { + Ok(seg) => { num_frames = MIN_NUM_FRAMES; - start + seg } + Err(_) => options.alloc_segment_with(num_frames, |_| KernelHeapMeta)?, } - } + }; + let paddr = segment.start_paddr(); + let _ = ManuallyDrop::new(segment); + paddr }; - // FIXME: the alloc function internally allocates heap memory(inside FrameAllocator). - // So if the heap is nearly run out, allocating frame will fail too. - let vaddr = paddr_to_vaddr(allocation_start * PAGE_SIZE); + let vaddr = paddr_to_vaddr(allocation_start); // SAFETY: the frame is allocated from FrameAllocator and never be deallocated, // so the addr is always valid. diff --git a/ostd/src/mm/mod.rs b/ostd/src/mm/mod.rs index 31d832f9c..34bcc1c2d 100644 --- a/ostd/src/mm/mod.rs +++ b/ostd/src/mm/mod.rs @@ -16,7 +16,6 @@ pub(crate) mod kspace; mod offset; pub(crate) mod page_prop; pub(crate) mod page_table; -pub mod stat; pub mod tlb; pub mod vm_space; diff --git a/ostd/src/mm/page_table/boot_pt.rs b/ostd/src/mm/page_table/boot_pt.rs index e62691ac5..1087d23b5 100644 --- a/ostd/src/mm/page_table/boot_pt.rs +++ b/ostd/src/mm/page_table/boot_pt.rs @@ -5,6 +5,7 @@ //! in order to initialize the running phase page tables. use core::{ + alloc::Layout, result::Result, sync::atomic::{AtomicU32, Ordering}, }; @@ -15,7 +16,11 @@ use crate::{ cpu::num_cpus, cpu_local_cell, mm::{ - frame::allocator::FRAME_ALLOCATOR, nr_subpage_per_huge, paddr_to_vaddr, Paddr, PageFlags, + frame::{ + self, + allocator::{self, EarlyAllocatedFrameMeta}, + }, + nr_subpage_per_huge, paddr_to_vaddr, Frame, FrameAllocOptions, Paddr, PageFlags, PageProperty, PagingConstsTrait, PagingLevel, Vaddr, PAGE_SIZE, }, sync::SpinLock, @@ -62,13 +67,27 @@ where /// The caller should ensure that: /// - another legitimate page table is activated on this CPU; /// - this function should be called only once per CPU; -/// - no [`with`] calls are performed on this CPU after this dismissal; -/// - no [`with`] calls are performed on this CPU after the activation of -/// another page table and before this dismissal. +/// - no [`with_borrow`] calls are performed on this CPU after this dismissal; +/// - no [`with_borrow`] calls are performed on this CPU after the activation +/// of another page table and before this dismissal. pub(crate) unsafe fn dismiss() { IS_DISMISSED.store(true); if DISMISS_COUNT.fetch_add(1, Ordering::SeqCst) as usize == num_cpus() - 1 { - BOOT_PAGE_TABLE.lock().take(); + let boot_pt = BOOT_PAGE_TABLE.lock().take().unwrap(); + + dfs_walk_on_leave::( + boot_pt.root_pt, + PagingConsts::NR_LEVELS, + &mut |pte| { + if !pte.prop().flags.contains(PTE_POINTS_TO_FIRMWARE_PT) { + // SAFETY: The pointed frame is allocated and forgotten with `into_raw`. + drop(unsafe { Frame::::from_raw(pte.paddr()) }) + } + // Firmware provided page tables may be a DAG instead of a tree. + // Clear it to avoid double-free when we meet it the second time. + *pte = PageTableEntry::new_absent(); + }, + ); } } @@ -97,6 +116,13 @@ pub(crate) struct BootPageTable< _pretend_to_use: core::marker::PhantomData<(E, C)>, } +// We use extra two available bits in the boot PT for memory management. +// +// The first available bit is used to differentiate firmware page tables from +// the page tables allocated here. The second is for identifying double-visits +// when walking the page tables since the PT can be a DAG. +const PTE_POINTS_TO_FIRMWARE_PT: PageFlags = PageFlags::AVAIL1; + impl BootPageTable { /// Creates a new boot page table from the current page table root /// physical address. @@ -108,15 +134,13 @@ impl BootPageTable { /// by the firmware, loader or the setup code. unsafe fn from_current_pt() -> Self { let root_pt = crate::arch::mm::current_page_table_paddr() / C::BASE_PAGE_SIZE; - // Make sure the first available bit is not set for firmware page tables. + // Make sure the 2 available bits are not set for firmware page tables. dfs_walk_on_leave::(root_pt, C::NR_LEVELS, &mut |pte: &mut E| { let prop = pte.prop(); - if prop.flags.contains(PageFlags::AVAIL1) { - pte.set_prop(PageProperty::new( - prop.flags - PageFlags::AVAIL1, - prop.cache, - )); - } + pte.set_prop(PageProperty::new( + prop.flags | PTE_POINTS_TO_FIRMWARE_PT, + prop.cache, + )); }); Self { root_pt, @@ -230,17 +254,26 @@ impl BootPageTable { } fn alloc_child(&mut self) -> E { - let frame = FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap(); + let frame_paddr = if frame::meta::is_initialized() { + let frame = FrameAllocOptions::new() + .zeroed(false) + .alloc_frame_with(EarlyAllocatedFrameMeta) + .unwrap(); + frame.into_raw() + } else { + allocator::early_alloc( + Layout::from_size_align(C::BASE_PAGE_SIZE, C::BASE_PAGE_SIZE).unwrap(), + ) + .unwrap() + }; + // Zero it out. - let vaddr = paddr_to_vaddr(frame * PAGE_SIZE) as *mut u8; + let vaddr = paddr_to_vaddr(frame_paddr) as *mut u8; unsafe { core::ptr::write_bytes(vaddr, 0, PAGE_SIZE) }; - let mut pte = E::new_pt(frame * C::BASE_PAGE_SIZE); + let mut pte = E::new_pt(frame_paddr); let prop = pte.prop(); - pte.set_prop(PageProperty::new( - prop.flags | PageFlags::AVAIL1, - prop.cache, - )); + pte.set_prop(PageProperty::new(prop.flags, prop.cache)); pte } @@ -267,20 +300,6 @@ fn dfs_walk_on_leave( } } -impl Drop for BootPageTable { - fn drop(&mut self) { - dfs_walk_on_leave::(self.root_pt, C::NR_LEVELS, &mut |pte| { - if pte.prop().flags.contains(PageFlags::AVAIL1) { - let pt = pte.paddr() / C::BASE_PAGE_SIZE; - FRAME_ALLOCATOR.get().unwrap().lock().dealloc(pt, 1); - } - // Firmware provided page tables may be a DAG instead of a tree. - // Clear it to avoid double-free when we meet it the second time. - *pte = E::new_absent(); - }); - } -} - #[cfg(ktest)] use crate::prelude::*; diff --git a/ostd/src/mm/stat/mod.rs b/ostd/src/mm/stat/mod.rs deleted file mode 100644 index 594486f3e..000000000 --- a/ostd/src/mm/stat/mod.rs +++ /dev/null @@ -1,21 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//! APIs for memory statistics. - -use crate::mm::frame::allocator::FRAME_ALLOCATOR; - -/// Total memory available for any usages in the system (in bytes). -/// -/// It would be only a slightly less than total physical memory of the system -/// in most occasions. For example, bad memory, kernel statically-allocated -/// memory or firmware reserved memories do not count. -pub fn mem_total() -> usize { - FRAME_ALLOCATOR.get().unwrap().lock().mem_total() -} - -/// Current readily available memory (in bytes). -/// -/// Such memory can be directly used for allocation without reclaiming. -pub fn mem_available() -> usize { - FRAME_ALLOCATOR.get().unwrap().lock().mem_available() -} diff --git a/tools/bump_version.sh b/tools/bump_version.sh index 0f334dfc6..24177e4f2 100755 --- a/tools/bump_version.sh +++ b/tools/bump_version.sh @@ -111,6 +111,7 @@ LINUX_BZIMAGE_BUILDER_CARGO_TOML_PATH=${ASTER_SRC_DIR}/ostd/libs/linux-bzimage/b LINUX_BZIMAGE_SETUP_CARGO_TOML_PATH=${ASTER_SRC_DIR}/ostd/libs/linux-bzimage/setup/Cargo.toml OSDK_CARGO_TOML_PATH=${ASTER_SRC_DIR}/osdk/Cargo.toml OSDK_TEST_RUNNER_CARGO_TOML_PATH=${ASTER_SRC_DIR}/osdk/deps/test-kernel/Cargo.toml +OSDK_FRAME_ALLOCATOR_CARGO_TOML_PATH=${ASTER_SRC_DIR}/osdk/deps/frame-allocator/Cargo.toml VERSION_PATH=${ASTER_SRC_DIR}/VERSION current_version=$(cat ${VERSION_PATH}) @@ -131,13 +132,16 @@ update_package_version ${OSTD_CARGO_TOML_PATH} update_package_version ${LINUX_BOOT_PARAMS_CARGO_TOML_PATH} update_package_version ${LINUX_BZIMAGE_BUILDER_CARGO_TOML_PATH} update_package_version ${LINUX_BZIMAGE_SETUP_CARGO_TOML_PATH} +update_package_version ${OSDK_CARGO_TOML_PATH} +update_package_version ${OSDK_TEST_RUNNER_CARGO_TOML_PATH} +update_package_version ${OSDK_FRAME_ALLOCATOR_CARGO_TOML_PATH} + +update_dep_version ${OSDK_TEST_RUNNER_CARGO_TOML_PATH} ostd +update_dep_version ${OSDK_FRAME_ALLOCATOR_CARGO_TOML_PATH} ostd update_dep_version ${OSTD_CARGO_TOML_PATH} ostd-test update_dep_version ${OSTD_CARGO_TOML_PATH} linux-boot-params update_dep_version ${OSTD_CARGO_TOML_PATH} ostd-macros update_dep_version ${LINUX_BZIMAGE_SETUP_CARGO_TOML_PATH} linux-boot-params -update_package_version ${OSDK_CARGO_TOML_PATH} -update_package_version ${OSDK_TEST_RUNNER_CARGO_TOML_PATH} -update_dep_version ${OSDK_TEST_RUNNER_CARGO_TOML_PATH} ostd update_dep_version ${OSDK_CARGO_TOML_PATH} linux-bzimage-builder # Automatically bump Cargo.lock files diff --git a/tools/github_workflows/publish_osdk_and_ostd.sh b/tools/github_workflows/publish_osdk_and_ostd.sh index 8e69226f0..247a18ecd 100755 --- a/tools/github_workflows/publish_osdk_and_ostd.sh +++ b/tools/github_workflows/publish_osdk_and_ostd.sh @@ -83,6 +83,7 @@ for TARGET in $TARGETS; do do_publish_for ostd/libs/ostd-test $TARGET do_publish_for ostd/libs/linux-bzimage/setup $TARGET do_publish_for ostd $TARGET + do_publish_for osdk/deps/frame-allocator $TARGET do_publish_for osdk/deps/test-kernel $TARGET # For actual publishing, we should only publish once. Using any target that