fix(page_cache): 修复pagecache无法直接mmap然后读写文件的bug (#1158)

* fix(page_cache): 修复pagecache无法直接mmap然后读写文件的bug

经过此commit,用户程序可以直接mmap文件然后读写(无需通过read/write去读取)

Signed-off-by: longjin <longjin@DragonOS.org>

* fix(page_cache): 修复pagecache 文件映射的bug

- 修复对同一文件mmap两次时,第二次map之后写入文件,内核panic的问题
- 修复address space已经drop之后,页面回写时的panic的问题
- 为PageCache和InnerPageCache添加唯一ID支持
- 优化页面错误处理函数,添加inline(never)属性
- 修复页面映射范围计算错误
- 改进页面回收器的地址空间处理逻辑

Signed-off-by: longjin <longjin@DragonOS.org>

---------

Signed-off-by: longjin <longjin@DragonOS.org>
This commit is contained in:
LoGin 2025-05-10 12:12:03 +08:00 committed by GitHub
parent 7486ad438c
commit d3ae9c7c4a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 124 additions and 99 deletions

View File

@ -272,6 +272,8 @@ impl X86_64MMArch {
address.data(),
flags
);
log::error!("fault rip: {:#x}", regs.rip);
let pid = ProcessManager::current_pid();
let mut info =
SigInfo::new(Signal::SIGSEGV, 0, SigCode::User, SigType::Kill(pid));

View File

@ -171,6 +171,7 @@ impl dyn Driver {
/// ## 注意
///
/// 这里的默认实现很低效,请为特定的驱动自行实现高效的查询
#[inline(never)]
pub fn find_device_by_name(&self, name: &str) -> Option<Arc<dyn Device>> {
if let Some(r) = self.__find_device_by_name_fast(name) {
return Some(r);

View File

@ -72,7 +72,7 @@ pub fn virtio_console(
log::debug!(
"virtio_console: dev_id: {:?}, parent: {:?}",
dev_id,
dev_parent
dev_parent.as_ref().map(|x| x.name())
);
let device = VirtIOConsoleDevice::new(transport, dev_id.clone());
if device.is_none() {
@ -576,9 +576,7 @@ impl Driver for VirtIOConsoleDriver {
virtio_con_dev.dev_id(),
);
}
log::debug!("virtio console: add_device: to lock inner");
let mut inner = self.inner();
log::debug!("virtio console: add_device: inner.locked");
let dev_name = inner.alloc_id();
if dev_name.is_none() {
panic!("Failed to allocate ID for VirtIO console device: '{:?}', virtio console device limit exceeded.", virtio_con_dev.dev_id())

View File

@ -1063,7 +1063,12 @@ pub fn pci_init() {
let common_header = box_pci_device.common_header();
match box_pci_device.header_type() {
HeaderType::Standard if common_header.status & 0x10 != 0 => {
info!("Found pci standard device with class code ={} subclass={} status={:#x} cap_pointer={:#x} vendor={:#x}, device id={:#x},bdf={}", common_header.class_code, common_header.subclass, common_header.status, box_pci_device.as_standard_device().unwrap().capabilities_pointer,common_header.vendor_id, common_header.device_id,common_header.bus_device_function);
info!(
"Found pci standard device with class code ={} subclass={}, bdf={}",
common_header.class_code,
common_header.subclass,
common_header.bus_device_function
);
}
HeaderType::Standard => {
info!(

View File

@ -99,6 +99,7 @@ impl TtyLdiscManager {
/// ### 参数
/// - tty需要设置的tty
/// - o_tty: other tty 用于pty pair
#[inline(never)]
pub fn ldisc_setup(tty: Arc<TtyCore>, o_tty: Option<Arc<TtyCore>>) -> Result<(), SystemError> {
let ld = tty.ldisc();

View File

@ -196,7 +196,7 @@ impl VirtIODeviceManager {
dev.set_virtio_device_index(virtio_index);
dev.set_device_name(format!("virtio{}", virtio_index.data()));
log::debug!("virtio_device_add: dev: {:?}", dev);
log::debug!("virtio_device_add: dev: {:?}", dev.name());
// 添加设备到设备管理器
device_manager().add_device(dev.clone() as Arc<dyn Device>)?;
let r = device_manager()

View File

@ -1,4 +1,7 @@
use core::cmp::min;
use core::{
cmp::min,
sync::atomic::{AtomicUsize, Ordering},
};
use alloc::{
sync::{Arc, Weak},
@ -21,22 +24,27 @@ use crate::{
};
use crate::{libs::align::page_align_up, mm::page::PageType};
static PAGE_CACHE_ID: AtomicUsize = AtomicUsize::new(0);
/// 页面缓存
#[derive(Debug)]
pub struct PageCache {
id: usize,
inner: SpinLock<InnerPageCache>,
inode: Lazy<Weak<dyn IndexNode>>,
}
#[derive(Debug)]
pub struct InnerPageCache {
#[allow(unused)]
id: usize,
pages: HashMap<usize, Arc<Page>>,
page_cache_ref: Weak<PageCache>,
}
impl InnerPageCache {
pub fn new(page_cache_ref: Weak<PageCache>) -> InnerPageCache {
pub fn new(page_cache_ref: Weak<PageCache>, id: usize) -> InnerPageCache {
Self {
id,
pages: HashMap::new(),
page_cache_ref,
}
@ -316,8 +324,10 @@ impl Drop for InnerPageCache {
impl PageCache {
pub fn new(inode: Option<Weak<dyn IndexNode>>) -> Arc<PageCache> {
let id = PAGE_CACHE_ID.fetch_add(1, Ordering::SeqCst);
Arc::new_cyclic(|weak| Self {
inner: SpinLock::new(InnerPageCache::new(weak.clone())),
id,
inner: SpinLock::new(InnerPageCache::new(weak.clone(), id)),
inode: {
let v: Lazy<Weak<dyn IndexNode>> = Lazy::new();
if let Some(inode) = inode {
@ -328,6 +338,13 @@ impl PageCache {
})
}
/// # 获取页缓存的ID
#[inline]
#[allow(unused)]
pub fn id(&self) -> usize {
self.id
}
pub fn inode(&self) -> Option<Weak<dyn IndexNode>> {
self.inode.try_get().cloned()
}

View File

@ -1,3 +1,4 @@
use alloc::boxed::Box;
use core::{
alloc::Layout,
cmp::{max, min},
@ -20,10 +21,7 @@ use crate::{
use crate::mm::MemoryManagementArch;
use super::{
allocator::page_frame::FrameAllocator,
page::{FileMapInfo, Page, PageFlags, PageType},
};
use super::page::{Page, PageFlags};
bitflags! {
pub struct FaultFlags: u64{
@ -270,6 +268,7 @@ impl PageFaultHandler {
///
/// ## 返回值
/// - VmFaultReason: 页面错误处理信息标志
#[inline(never)]
pub unsafe fn do_fault(pfm: &mut PageFaultMessage) -> VmFaultReason {
if !pfm.flags().contains(FaultFlags::FAULT_FLAG_WRITE) {
Self::do_read_fault(pfm)
@ -293,6 +292,7 @@ impl PageFaultHandler {
///
/// ## 返回值
/// - VmFaultReason: 页面错误处理信息标志
#[inline(never)]
pub unsafe fn do_cow_fault(pfm: &mut PageFaultMessage) -> VmFaultReason {
let mut ret = Self::filemap_fault(pfm);
@ -353,6 +353,7 @@ impl PageFaultHandler {
///
/// ## 返回值
/// - VmFaultReason: 页面错误处理信息标志
#[inline(never)]
pub unsafe fn do_shared_fault(pfm: &mut PageFaultMessage) -> VmFaultReason {
let mut ret = Self::filemap_fault(pfm);
@ -413,6 +414,7 @@ impl PageFaultHandler {
///
/// ## 返回值
/// - VmFaultReason: 页面错误处理信息标志
#[inline(never)]
pub unsafe fn do_wp_page(pfm: &mut PageFaultMessage) -> VmFaultReason {
let address = pfm.address_aligned_down();
let vma = pfm.vma.clone();
@ -540,7 +542,7 @@ impl PageFaultHandler {
let to_pte = min(
from_pte + fault_around_page_number,
min(
1 << MMArch::PAGE_SHIFT,
MMArch::PAGE_ENTRY_NUM,
pte_pgoff + (vma_pages_count - vm_pgoff),
),
);
@ -589,7 +591,7 @@ impl PageFaultHandler {
.expect("file_page_offset is none"))
<< MMArch::PAGE_SHIFT);
for pgoff in start_pgoff..=end_pgoff {
for pgoff in start_pgoff..end_pgoff {
if let Some(page) = page_cache.lock_irqsave().get_page(pgoff) {
let page_guard = page.read_irqsave();
if page_guard.flags().contains(PageFlags::PG_UPTODATE) {
@ -621,10 +623,10 @@ impl PageFaultHandler {
let file = vma_guard.vm_file().expect("no vm_file in vma");
let page_cache = file.inode().page_cache().unwrap();
let file_pgoff = pfm.file_pgoff.expect("no file_pgoff");
let mapper = &mut pfm.mapper;
let mut ret = VmFaultReason::empty();
if let Some(page) = page_cache.lock_irqsave().get_page(file_pgoff) {
let page = page_cache.lock_irqsave().get_page(file_pgoff);
if let Some(page) = page {
// TODO 异步从磁盘中预读页面进PageCache
// 直接将PageCache中的页面作为要映射的页面
@ -633,37 +635,17 @@ impl PageFaultHandler {
// TODO 同步预读
//涉及磁盘IO返回标志为VM_FAULT_MAJOR
ret = VmFaultReason::VM_FAULT_MAJOR;
// let mut buf: Vec<u8> = vec![0; MMArch::PAGE_SIZE];
let allocator = mapper.allocator_mut();
// 分配一个物理页面作为加入PageCache的新页
let new_cache_page = allocator.allocate_one().unwrap();
// (MMArch::phys_2_virt(new_cache_page).unwrap().data() as *mut u8)
// .copy_from_nonoverlapping(buf.as_mut_ptr(), MMArch::PAGE_SIZE);
let mut buffer = Box::new([0u8; MMArch::PAGE_SIZE]);
file.pread(
file_pgoff * MMArch::PAGE_SIZE,
MMArch::PAGE_SIZE,
core::slice::from_raw_parts_mut(
MMArch::phys_2_virt(new_cache_page).unwrap().data() as *mut u8,
MMArch::PAGE_SIZE,
),
buffer.as_mut_slice(),
)
.expect("failed to read file to create pagecache page");
drop(buffer);
let page = page_manager_lock_irqsave()
.create_one_page(
PageType::File(FileMapInfo {
page_cache: page_cache.clone(),
index: file_pgoff,
}),
PageFlags::PG_LRU,
allocator,
)
.expect("failed to create page");
pfm.page = Some(page.clone());
page_cache.lock_irqsave().add_page(file_pgoff, &page);
let page = page_cache.lock_irqsave().get_page(file_pgoff);
pfm.page = page;
}
ret
}

View File

@ -82,12 +82,16 @@ impl PageManager {
}
pub fn get(&mut self, paddr: &PhysAddr) -> Option<Arc<Page>> {
page_reclaimer_lock_irqsave().get(paddr);
if let Some(p) = page_reclaimer_lock_irqsave().get(paddr) {
return Some(p);
}
self.phys2page.get(paddr).cloned()
}
pub fn get_unwrap(&mut self, paddr: &PhysAddr) -> Arc<Page> {
page_reclaimer_lock_irqsave().get(paddr);
if let Some(p) = page_reclaimer_lock_irqsave().get(paddr) {
return p;
}
self.phys2page
.get(paddr)
.unwrap_or_else(|| panic!("Phys Page not found, {:?}", paddr))
@ -348,8 +352,11 @@ impl PageReclaimer {
let inode = page_cache.inode().clone().unwrap().upgrade().unwrap();
for vma in guard.vma_set() {
let address_space = vma.lock_irqsave().address_space().unwrap();
let address_space = address_space.upgrade().unwrap();
let address_space = vma.lock_irqsave().address_space().and_then(|x| x.upgrade());
if address_space.is_none() {
continue;
}
let address_space = address_space.unwrap();
let mut guard = address_space.write();
let mapper = &mut guard.user_mapper.utable;
let virt = vma.lock_irqsave().page_address(page_index).unwrap();
@ -380,6 +387,7 @@ impl PageReclaimer {
MMArch::PAGE_SIZE
};
if len > 0 {
inode
.write_direct(
page_index * MMArch::PAGE_SIZE,
@ -393,6 +401,7 @@ impl PageReclaimer {
SpinLock::new(FilePrivateData::Unused).lock(),
)
.unwrap();
}
// 清除标记
guard.remove_flags(PageFlags::PG_DIRTY);

View File

@ -451,8 +451,7 @@ impl InnerAddressSpace {
// 找到未使用的区域
let region = match addr {
Some(vaddr) => {
self.mappings
.find_free_at(self.mmap_min, vaddr, page_count.bytes(), map_flags)?
self.find_free_at(self.mmap_min, vaddr, page_count.bytes(), map_flags)?
}
None => self
.mappings
@ -799,6 +798,56 @@ impl InnerAddressSpace {
return self.set_brk(new_brk);
}
pub fn find_free_at(
&mut self,
min_vaddr: VirtAddr,
vaddr: VirtAddr,
size: usize,
flags: MapFlags,
) -> Result<VirtRegion, SystemError> {
// 如果没有指定地址,那么就在当前进程的地址空间中寻找一个空闲的虚拟内存范围。
if vaddr == VirtAddr::new(0) {
return self
.mappings
.find_free(min_vaddr, size)
.ok_or(SystemError::ENOMEM);
}
// 如果指定了地址,那么就检查指定的地址是否可用。
let requested = VirtRegion::new(vaddr, size);
if requested.end() >= MMArch::USER_END_VADDR || !vaddr.check_aligned(MMArch::PAGE_SIZE) {
return Err(SystemError::EINVAL);
}
let intersect_vma = self.mappings.conflicts(requested).next();
if let Some(vma) = intersect_vma {
if flags.contains(MapFlags::MAP_FIXED_NOREPLACE) {
// 如果指定了 MAP_FIXED_NOREPLACE 标志,由于所指定的地址无法成功建立映射,则放弃映射,不对地址做修正
return Err(SystemError::EEXIST);
}
if flags.contains(MapFlags::MAP_FIXED) {
// 对已有的VMA进行覆盖
let intersect_region = vma.lock().region.intersect(&requested).unwrap();
self.munmap(
VirtPageFrame::new(intersect_region.start),
PageFrameCount::from_bytes(intersect_region.size).unwrap(),
)?;
return Ok(requested);
}
// 如果没有指定MAP_FIXED标志那么就对地址做修正
let requested = self
.mappings
.find_free(min_vaddr, size)
.ok_or(SystemError::ENOMEM)?;
return Ok(requested);
}
return Ok(requested);
}
}
impl Drop for InnerAddressSpace {
@ -949,45 +998,6 @@ impl UserMappings {
return Some(region);
}
pub fn find_free_at(
&self,
min_vaddr: VirtAddr,
vaddr: VirtAddr,
size: usize,
flags: MapFlags,
) -> Result<VirtRegion, SystemError> {
// 如果没有指定地址,那么就在当前进程的地址空间中寻找一个空闲的虚拟内存范围。
if vaddr == VirtAddr::new(0) {
return self.find_free(min_vaddr, size).ok_or(SystemError::ENOMEM);
}
// 如果指定了地址,那么就检查指定的地址是否可用。
let requested = VirtRegion::new(vaddr, size);
if requested.end() >= MMArch::USER_END_VADDR || !vaddr.check_aligned(MMArch::PAGE_SIZE) {
return Err(SystemError::EINVAL);
}
if let Some(_x) = self.conflicts(requested).next() {
if flags.contains(MapFlags::MAP_FIXED_NOREPLACE) {
// 如果指定了 MAP_FIXED_NOREPLACE 标志,由于所指定的地址无法成功建立映射,则放弃映射,不对地址做修正
return Err(SystemError::EEXIST);
}
if flags.contains(MapFlags::MAP_FIXED) {
// todo: 支持MAP_FIXED标志对已有的VMA进行覆盖
return Err(SystemError::ENOSYS);
}
// 如果没有指定MAP_FIXED标志那么就对地址做修正
let requested = self.find_free(min_vaddr, size).ok_or(SystemError::ENOMEM)?;
return Ok(requested);
}
return Ok(requested);
}
/// 在当前进程的地址空间中,保留一个指定大小的区域,使得该区域不在空洞中。
/// 该函数会修改vm_holes中的空洞信息。
///