Make the RCU lifetime of page tables explicit

This commit is contained in:
Zhang Junyang
2025-05-20 11:39:51 +08:00
committed by Tate, Hongliang Tian
parent ef81100958
commit 79b3f68892
16 changed files with 524 additions and 340 deletions

View File

@ -20,7 +20,10 @@ use core::{
use align_ext::AlignExt;
use aster_rights::Full;
use ostd::mm::{vm_space::VmItem, UntypedMem, VmIo, MAX_USERSPACE_VADDR};
use ostd::{
mm::{vm_space::VmItem, UntypedMem, VmIo, MAX_USERSPACE_VADDR},
task::disable_preempt,
};
use self::aux_vec::{AuxKey, AuxVec};
use super::ProcessVmarGuard;
@ -386,7 +389,11 @@ impl InitStackReader<'_> {
let page_base_addr = stack_base.align_down(PAGE_SIZE);
let vm_space = self.vmar.unwrap().vm_space();
let mut cursor = vm_space.cursor(&(page_base_addr..page_base_addr + PAGE_SIZE))?;
let preempt_guard = disable_preempt();
let mut cursor = vm_space.cursor(
&preempt_guard,
&(page_base_addr..page_base_addr + PAGE_SIZE),
)?;
let VmItem::Mapped { frame, .. } = cursor.query()? else {
return_errno_with_message!(Errno::EACCES, "Page not accessible");
};
@ -410,7 +417,11 @@ impl InitStackReader<'_> {
let page_base_addr = read_offset.align_down(PAGE_SIZE);
let vm_space = self.vmar.unwrap().vm_space();
let mut cursor = vm_space.cursor(&(page_base_addr..page_base_addr + PAGE_SIZE))?;
let preempt_guard = disable_preempt();
let mut cursor = vm_space.cursor(
&preempt_guard,
&(page_base_addr..page_base_addr + PAGE_SIZE),
)?;
let VmItem::Mapped { frame, .. } = cursor.query()? else {
return_errno_with_message!(Errno::EACCES, "Page not accessible");
};
@ -450,7 +461,11 @@ impl InitStackReader<'_> {
let page_base_addr = read_offset.align_down(PAGE_SIZE);
let vm_space = self.vmar.unwrap().vm_space();
let mut cursor = vm_space.cursor(&(page_base_addr..page_base_addr + PAGE_SIZE))?;
let preempt_guard = disable_preempt();
let mut cursor = vm_space.cursor(
&preempt_guard,
&(page_base_addr..page_base_addr + PAGE_SIZE),
)?;
let VmItem::Mapped { frame, .. } = cursor.query()? else {
return_errno_with_message!(Errno::EACCES, "Page not accessible");
};

View File

@ -7,7 +7,10 @@
use align_ext::AlignExt;
use aster_rights::Full;
use ostd::mm::{CachePolicy, PageFlags, PageProperty, VmIo};
use ostd::{
mm::{CachePolicy, PageFlags, PageProperty, VmIo},
task::disable_preempt,
};
use xmas_elf::program::{self, ProgramHeader64};
use super::elf_file::Elf;
@ -311,9 +314,10 @@ fn map_segment_vmo(
// Tail padding: If the segment's mem_size is larger than file size,
// then the bytes that are not backed up by file content should be zeros.(usually .data/.bss sections).
let preempt_guard = disable_preempt();
let mut cursor = root_vmar
.vm_space()
.cursor_mut(&(map_addr..map_addr + segment_size))?;
.cursor_mut(&preempt_guard, &(map_addr..map_addr + segment_size))?;
let page_flags = PageFlags::from(perms) | PageFlags::ACCESSED;
// Head padding.

View File

@ -11,7 +11,10 @@ use core::{num::NonZeroUsize, ops::Range};
use align_ext::AlignExt;
use aster_rights::Rights;
use ostd::mm::{tlb::TlbFlushOp, PageFlags, PageProperty, VmSpace, MAX_USERSPACE_VADDR};
use ostd::{
mm::{tlb::TlbFlushOp, PageFlags, PageProperty, VmSpace, MAX_USERSPACE_VADDR},
task::disable_preempt,
};
use self::{
interval_set::{Interval, IntervalSet},
@ -355,16 +358,19 @@ impl Vmar_ {
/// Clears all content of the root VMAR.
fn clear_root_vmar(&self) -> Result<()> {
{
let full_range = 0..MAX_USERSPACE_VADDR;
let mut cursor = self.vm_space.cursor_mut(&full_range).unwrap();
cursor.unmap(full_range.len());
cursor.flusher().sync_tlb_flush();
}
{
let mut inner = self.inner.write();
inner.vm_mappings.clear();
}
let mut inner = self.inner.write();
inner.vm_mappings.clear();
// Keep `inner` locked to avoid race conditions.
let preempt_guard = disable_preempt();
let full_range = 0..MAX_USERSPACE_VADDR;
let mut cursor = self
.vm_space
.cursor_mut(&preempt_guard, &full_range)
.unwrap();
cursor.unmap(full_range.len());
cursor.flusher().sync_tlb_flush();
Ok(())
}
@ -428,11 +434,12 @@ impl Vmar_ {
let mut new_inner = new_vmar_.inner.write();
// Clone mappings.
let preempt_guard = disable_preempt();
let new_vmspace = new_vmar_.vm_space();
let range = self.base..(self.base + self.size);
let mut new_cursor = new_vmspace.cursor_mut(&range).unwrap();
let mut new_cursor = new_vmspace.cursor_mut(&preempt_guard, &range).unwrap();
let cur_vmspace = self.vm_space();
let mut cur_cursor = cur_vmspace.cursor_mut(&range).unwrap();
let mut cur_cursor = cur_vmspace.cursor_mut(&preempt_guard, &range).unwrap();
for vm_mapping in inner.vm_mappings.iter() {
let base = vm_mapping.map_to_addr();

View File

@ -7,9 +7,12 @@ use core::{
};
use align_ext::AlignExt;
use ostd::mm::{
tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, FrameAllocOptions, PageFlags, PageProperty,
UFrame, VmSpace,
use ostd::{
mm::{
tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, FrameAllocOptions, PageFlags, PageProperty,
UFrame, VmSpace,
},
task::disable_preempt,
};
use super::interval_set::Interval;
@ -152,8 +155,11 @@ impl VmMapping {
// Errors caused by the "around" pages should be ignored, so here we
// only return the error if the faulting page is still not mapped.
if res.is_err() {
let mut cursor =
vm_space.cursor(&(page_aligned_addr..page_aligned_addr + PAGE_SIZE))?;
let preempt_guard = disable_preempt();
let mut cursor = vm_space.cursor(
&preempt_guard,
&(page_aligned_addr..page_aligned_addr + PAGE_SIZE),
)?;
if let VmItem::Mapped { .. } = cursor.query().unwrap() {
return Ok(());
}
@ -163,8 +169,11 @@ impl VmMapping {
}
'retry: loop {
let mut cursor =
vm_space.cursor_mut(&(page_aligned_addr..page_aligned_addr + PAGE_SIZE))?;
let preempt_guard = disable_preempt();
let mut cursor = vm_space.cursor_mut(
&preempt_guard,
&(page_aligned_addr..page_aligned_addr + PAGE_SIZE),
)?;
match cursor.query().unwrap() {
VmItem::Mapped {
@ -213,6 +222,7 @@ impl VmMapping {
Err(VmoCommitError::Err(e)) => return Err(e),
Err(VmoCommitError::NeedIo(index)) => {
drop(cursor);
drop(preempt_guard);
self.vmo
.as_ref()
.unwrap()
@ -291,7 +301,8 @@ impl VmMapping {
let vm_perms = self.perms - VmPerms::WRITE;
'retry: loop {
let mut cursor = vm_space.cursor_mut(&(start_addr..end_addr))?;
let preempt_guard = disable_preempt();
let mut cursor = vm_space.cursor_mut(&preempt_guard, &(start_addr..end_addr))?;
let operate =
move |commit_fn: &mut dyn FnMut()
-> core::result::Result<UFrame, VmoCommitError>| {
@ -317,6 +328,7 @@ impl VmMapping {
match vmo.try_operate_on_range(&(start_offset..end_offset), operate) {
Ok(_) => return Ok(()),
Err(VmoCommitError::NeedIo(index)) => {
drop(preempt_guard);
vmo.commit_on(index, CommitFlags::empty())?;
start_addr = index * PAGE_SIZE + self.map_to_addr;
continue 'retry;
@ -419,8 +431,10 @@ impl VmMapping {
impl VmMapping {
/// Unmaps the mapping from the VM space.
pub(super) fn unmap(self, vm_space: &VmSpace) -> Result<()> {
let preempt_guard = disable_preempt();
let range = self.range();
let mut cursor = vm_space.cursor_mut(&range)?;
let mut cursor = vm_space.cursor_mut(&preempt_guard, &range)?;
cursor.unmap(range.len());
cursor.flusher().dispatch_tlb_flush();
cursor.flusher().sync_tlb_flush();
@ -430,9 +444,9 @@ impl VmMapping {
/// Change the perms of the mapping.
pub(super) fn protect(self, vm_space: &VmSpace, perms: VmPerms) -> Self {
let preempt_guard = disable_preempt();
let range = self.range();
let mut cursor = vm_space.cursor_mut(&range).unwrap();
let mut cursor = vm_space.cursor_mut(&preempt_guard, &range).unwrap();
let op = |p: &mut PageProperty| p.flags = perms.into();
while cursor.virt_addr() < range.end {