mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-09 05:16:47 +00:00
Improve the performance of page_cache by using fine-grained lock and allow page discarding
This commit is contained in:
parent
822f2b97c4
commit
efa75a2f31
@ -1241,8 +1241,7 @@ impl Inode for ExfatInode {
|
|||||||
|
|
||||||
inner
|
inner
|
||||||
.page_cache
|
.page_cache
|
||||||
.pages()
|
.discard_range(read_off..read_off + read_len);
|
||||||
.decommit(read_off..read_off + read_len)?;
|
|
||||||
|
|
||||||
let mut buf_offset = 0;
|
let mut buf_offset = 0;
|
||||||
let frame = VmAllocOptions::new(1).uninit(true).alloc_single().unwrap();
|
let frame = VmAllocOptions::new(1).uninit(true).alloc_single().unwrap();
|
||||||
@ -1334,7 +1333,7 @@ impl Inode for ExfatInode {
|
|||||||
|
|
||||||
let start = offset.min(file_size);
|
let start = offset.min(file_size);
|
||||||
let end = end_offset.min(file_size);
|
let end = end_offset.min(file_size);
|
||||||
inner.page_cache.pages().decommit(start..end)?;
|
inner.page_cache.discard_range(start..end);
|
||||||
|
|
||||||
let new_size = {
|
let new_size = {
|
||||||
let mut inner = inner.upgrade();
|
let mut inner = inner.upgrade();
|
||||||
|
@ -649,9 +649,7 @@ impl Inner {
|
|||||||
let end = file_size.min(offset + buf.len()).align_down(BLOCK_SIZE);
|
let end = file_size.min(offset + buf.len()).align_down(BLOCK_SIZE);
|
||||||
(start, end - start)
|
(start, end - start)
|
||||||
};
|
};
|
||||||
self.page_cache
|
self.page_cache.discard_range(offset..offset + read_len);
|
||||||
.pages()
|
|
||||||
.decommit(offset..offset + read_len)?;
|
|
||||||
|
|
||||||
let mut buf_offset = 0;
|
let mut buf_offset = 0;
|
||||||
for bid in Bid::from_offset(offset)..Bid::from_offset(offset + read_len) {
|
for bid in Bid::from_offset(offset)..Bid::from_offset(offset + read_len) {
|
||||||
@ -683,7 +681,7 @@ impl Inner {
|
|||||||
|
|
||||||
let start = offset.min(file_size);
|
let start = offset.min(file_size);
|
||||||
let end = end_offset.min(file_size);
|
let end = end_offset.min(file_size);
|
||||||
self.page_cache.pages().decommit(start..end)?;
|
self.page_cache.discard_range(start..end);
|
||||||
|
|
||||||
if end_offset > file_size {
|
if end_offset > file_size {
|
||||||
self.page_cache.pages().resize(end_offset)?;
|
self.page_cache.pages().resize(end_offset)?;
|
||||||
|
@ -54,6 +54,12 @@ impl PageCache {
|
|||||||
self.manager.evict_range(range)
|
self.manager.evict_range(range)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Evict the data within a specified range from the page cache without persisting
|
||||||
|
/// them to the backend.
|
||||||
|
pub fn discard_range(&self, range: Range<usize>) {
|
||||||
|
self.manager.discard_range(range)
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the backend.
|
/// Returns the backend.
|
||||||
pub fn backend(&self) -> Arc<dyn PageCacheBackend> {
|
pub fn backend(&self) -> Arc<dyn PageCacheBackend> {
|
||||||
self.manager.backend()
|
self.manager.backend()
|
||||||
@ -96,15 +102,22 @@ impl PageCacheManager {
|
|||||||
self.backend.upgrade().unwrap()
|
self.backend.upgrade().unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Discard pages without writing them back to disk.
|
||||||
|
pub fn discard_range(&self, range: Range<usize>) {
|
||||||
|
let page_idx_range = get_page_idx_range(&range);
|
||||||
|
for idx in page_idx_range {
|
||||||
|
self.pages.lock().pop(&idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn evict_range(&self, range: Range<usize>) -> Result<()> {
|
pub fn evict_range(&self, range: Range<usize>) -> Result<()> {
|
||||||
let page_idx_range = get_page_idx_range(&range);
|
let page_idx_range = get_page_idx_range(&range);
|
||||||
let mut pages = self.pages.lock();
|
|
||||||
|
|
||||||
//TODO: When there are many pages, we should submit them in batches of folios rather than all at once.
|
//TODO: When there are many pages, we should submit them in batches of folios rather than all at once.
|
||||||
let mut indices_and_waiters: Vec<(usize, BioWaiter)> = Vec::new();
|
let mut indices_and_waiters: Vec<(usize, BioWaiter)> = Vec::new();
|
||||||
|
|
||||||
for idx in page_idx_range {
|
for idx in page_idx_range {
|
||||||
if let Some(page) = pages.get_mut(&idx) {
|
if let Some(page) = self.pages.lock().get_mut(&idx) {
|
||||||
if let PageState::Dirty = page.state() {
|
if let PageState::Dirty = page.state() {
|
||||||
let backend = self.backend();
|
let backend = self.backend();
|
||||||
if idx < backend.npages() {
|
if idx < backend.npages() {
|
||||||
@ -116,7 +129,9 @@ impl PageCacheManager {
|
|||||||
|
|
||||||
for (idx, waiter) in indices_and_waiters.iter() {
|
for (idx, waiter) in indices_and_waiters.iter() {
|
||||||
if matches!(waiter.wait(), Some(BioStatus::Complete)) {
|
if matches!(waiter.wait(), Some(BioStatus::Complete)) {
|
||||||
pages.get_mut(idx).unwrap().set_state(PageState::UpToDate)
|
if let Some(page) = self.pages.lock().get_mut(idx) {
|
||||||
|
page.set_state(PageState::UpToDate)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// TODO: We may need an error handler here.
|
// TODO: We may need an error handler here.
|
||||||
return_errno!(Errno::EIO)
|
return_errno!(Errno::EIO)
|
||||||
@ -137,24 +152,23 @@ impl Debug for PageCacheManager {
|
|||||||
|
|
||||||
impl Pager for PageCacheManager {
|
impl Pager for PageCacheManager {
|
||||||
fn commit_page(&self, idx: usize) -> Result<VmFrame> {
|
fn commit_page(&self, idx: usize) -> Result<VmFrame> {
|
||||||
let mut pages = self.pages.lock();
|
if let Some(page) = self.pages.lock().get(&idx) {
|
||||||
let frame = if let Some(page) = pages.get(&idx) {
|
return Ok(page.frame.clone());
|
||||||
page.frame().clone()
|
}
|
||||||
} else {
|
|
||||||
|
//Multiple threads may commit the same page, but the result is ok.
|
||||||
let backend = self.backend();
|
let backend = self.backend();
|
||||||
let page = if idx < backend.npages() {
|
let page = if idx < backend.npages() {
|
||||||
let mut page = Page::alloc()?;
|
let mut page = Page::alloc()?;
|
||||||
backend.read_page_sync(idx, page.frame())?;
|
backend.read_page_sync(idx, page.frame())?;
|
||||||
page.set_state(PageState::UpToDate);
|
page.set_state(PageState::UpToDate);
|
||||||
|
|
||||||
page
|
page
|
||||||
} else {
|
} else {
|
||||||
Page::alloc_zero()?
|
Page::alloc_zero()?
|
||||||
};
|
};
|
||||||
let frame = page.frame().clone();
|
let frame = page.frame().clone();
|
||||||
pages.put(idx, page);
|
self.pages.lock().put(idx, page);
|
||||||
frame
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(frame)
|
Ok(frame)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,8 +184,8 @@ impl Pager for PageCacheManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn decommit_page(&self, idx: usize) -> Result<()> {
|
fn decommit_page(&self, idx: usize) -> Result<()> {
|
||||||
let mut pages = self.pages.lock();
|
let page_result = self.pages.lock().pop(&idx);
|
||||||
if let Some(page) = pages.pop(&idx) {
|
if let Some(page) = page_result {
|
||||||
if let PageState::Dirty = page.state() {
|
if let PageState::Dirty = page.state() {
|
||||||
let Some(backend) = self.backend.upgrade() else {
|
let Some(backend) = self.backend.upgrade() else {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
Loading…
x
Reference in New Issue
Block a user