Avoid repetitive locking for clearing cache

This commit is contained in:
Zhang Junyang 2025-03-20 20:22:48 +08:00 committed by Tate, Hongliang Tian
parent e5be154ca8
commit 83b9ebf87d
2 changed files with 27 additions and 20 deletions

View File

@ -71,13 +71,17 @@ impl<const NR_CONT_FRAMES: usize, const COUNT: usize> CacheArray<NR_CONT_FRAMES,
/// deallocate to the global pool. /// deallocate to the global pool.
fn dealloc(&mut self, guard: &DisabledLocalIrqGuard, addr: Paddr) { fn dealloc(&mut self, guard: &DisabledLocalIrqGuard, addr: Paddr) {
if self.push_front(addr).is_none() { if self.push_front(addr).is_none() {
super::pools::dealloc(guard, addr, Self::segment_size()); let nr_to_dealloc = COUNT * 2 / 3 + 1;
let nr_to_dealloc = COUNT * 2 / 3;
for _ in 0..nr_to_dealloc { let segments = (0..nr_to_dealloc).map(|i| {
let frame = self.pop_front().unwrap(); if i == 0 {
super::pools::dealloc(guard, frame, Self::segment_size()); (addr, Self::segment_size())
} } else {
(self.pop_front().unwrap(), Self::segment_size())
}
});
super::pools::dealloc(guard, segments);
}; };
} }
@ -134,7 +138,7 @@ pub(super) fn alloc(guard: &DisabledLocalIrqGuard, layout: Layout) -> Option<Pad
pub(super) fn dealloc(guard: &DisabledLocalIrqGuard, addr: Paddr, size: usize) { pub(super) fn dealloc(guard: &DisabledLocalIrqGuard, addr: Paddr, size: usize) {
let nr_frames = size / PAGE_SIZE; let nr_frames = size / PAGE_SIZE;
if nr_frames > 4 { if nr_frames > 4 {
super::pools::dealloc(guard, addr, size); super::pools::dealloc(guard, [(addr, size)].into_iter());
return; return;
} }
@ -146,6 +150,6 @@ pub(super) fn dealloc(guard: &DisabledLocalIrqGuard, addr: Paddr, size: usize) {
2 => cache.cache2.dealloc(guard, addr), 2 => cache.cache2.dealloc(guard, addr),
3 => cache.cache3.dealloc(guard, addr), 3 => cache.cache3.dealloc(guard, addr),
4 => cache.cache4.dealloc(guard, addr), 4 => cache.cache4.dealloc(guard, addr),
_ => super::pools::dealloc(guard, addr, size), _ => super::pools::dealloc(guard, [(addr, size)].into_iter()),
} }
} }

View File

@ -82,8 +82,7 @@ pub(super) fn alloc(guard: &DisabledLocalIrqGuard, layout: Layout) -> Option<Pad
do_dealloc( do_dealloc(
&mut local_pool, &mut local_pool,
&mut global_pool, &mut global_pool,
chunk_addr + layout.size(), [(chunk_addr + layout.size(), allocated_size - layout.size())].into_iter(),
allocated_size - layout.size(),
); );
} }
} }
@ -95,12 +94,15 @@ pub(super) fn alloc(guard: &DisabledLocalIrqGuard, layout: Layout) -> Option<Pad
chunk_addr chunk_addr
} }
pub(super) fn dealloc(guard: &DisabledLocalIrqGuard, addr: Paddr, size: usize) { pub(super) fn dealloc(
guard: &DisabledLocalIrqGuard,
segments: impl Iterator<Item = (Paddr, usize)>,
) {
let local_pool_cell = LOCAL_POOL.get_with(guard); let local_pool_cell = LOCAL_POOL.get_with(guard);
let mut local_pool = local_pool_cell.borrow_mut(); let mut local_pool = local_pool_cell.borrow_mut();
let mut global_pool = OnDemandGlobalLock::new(); let mut global_pool = OnDemandGlobalLock::new();
do_dealloc(&mut local_pool, &mut global_pool, addr, size); do_dealloc(&mut local_pool, &mut global_pool, segments);
balancing::balance(local_pool.deref_mut(), &mut global_pool); balancing::balance(local_pool.deref_mut(), &mut global_pool);
@ -120,15 +122,16 @@ pub(super) fn add_free_memory(_guard: &DisabledLocalIrqGuard, addr: Paddr, size:
fn do_dealloc( fn do_dealloc(
local_pool: &mut BuddySet<MAX_LOCAL_BUDDY_ORDER>, local_pool: &mut BuddySet<MAX_LOCAL_BUDDY_ORDER>,
global_pool: &mut OnDemandGlobalLock, global_pool: &mut OnDemandGlobalLock,
addr: Paddr, segments: impl Iterator<Item = (Paddr, usize)>,
size: usize,
) { ) {
split_to_chunks(addr, size).for_each(|(addr, order)| { segments.for_each(|(addr, size)| {
if order >= MAX_LOCAL_BUDDY_ORDER { split_to_chunks(addr, size).for_each(|(addr, order)| {
global_pool.get().insert_chunk(addr, order); if order >= MAX_LOCAL_BUDDY_ORDER {
} else { global_pool.get().insert_chunk(addr, order);
local_pool.insert_chunk(addr, order); } else {
} local_pool.insert_chunk(addr, order);
}
});
}); });
} }