Fix the nested bottom half

This commit is contained in:
Ruihan Li
2024-12-01 23:11:49 +08:00
committed by Tate, Hongliang Tian
parent 0cb2ea562e
commit 28edc57dd0
4 changed files with 102 additions and 86 deletions

View File

@ -119,37 +119,15 @@ static ENABLED_MASK: AtomicU8 = AtomicU8::new(0);
cpu_local_cell! {
static PENDING_MASK: u8 = 0;
static IS_ENABLED: bool = true;
}
/// Enables softirq in current processor.
fn enable_softirq_local() {
IS_ENABLED.store(true);
}
/// Disables softirq in current processor.
fn disable_softirq_local() {
IS_ENABLED.store(false);
}
/// Checks whether the softirq is enabled in current processor.
fn is_softirq_enabled() -> bool {
IS_ENABLED.load()
}
/// Processes pending softirqs.
///
/// The processing instructions will iterate for `SOFTIRQ_RUN_TIMES` times. If any softirq
/// is raised during the iteration, it will be processed.
pub(crate) fn process_pending() {
fn process_pending() {
const SOFTIRQ_RUN_TIMES: u8 = 5;
if !is_softirq_enabled() {
return;
}
disable_softirq_local();
for _i in 0..SOFTIRQ_RUN_TIMES {
let mut action_mask = {
let pending_mask = PENDING_MASK.load();
@ -166,6 +144,4 @@ pub(crate) fn process_pending() {
action_mask &= action_mask - 1;
}
}
enable_softirq_local();
}

View File

@ -122,39 +122,48 @@ impl UserContextApiInternal for UserContext {
// set ID flag which means cpu support CPUID instruction
self.user_context.general.rflags |= (RFlags::INTERRUPT_FLAG | RFlags::ID).bits() as usize;
const SYSCALL_TRAPNUM: u16 = 0x100;
const SYSCALL_TRAPNUM: usize = 0x100;
// return when it is syscall or cpu exception type is Fault or Trap.
let return_reason = loop {
scheduler::might_preempt();
self.user_context.run();
match CpuException::to_cpu_exception(self.user_context.trap_num as u16) {
#[cfg(feature = "cvm_guest")]
Some(CpuException::VIRTUALIZATION_EXCEPTION) => {
crate::arch::irq::enable_local();
handle_virtualization_exception(self);
}
Some(exception) if exception.typ().is_fatal_or_trap() => {
crate::arch::irq::enable_local();
break ReturnReason::UserException;
}
Some(exception) => {
#[cfg(feature = "cvm_guest")]
if exception == CpuException::VIRTUALIZATION_EXCEPTION {
handle_virtualization_exception(self);
continue;
}
match exception.typ() {
CpuExceptionType::FaultOrTrap
| CpuExceptionType::Trap
| CpuExceptionType::Fault => break ReturnReason::UserException,
_ => (),
}
panic!(
"cannot handle user CPU exception: {:?}, trapframe: {:?}",
exception,
self.as_trap_frame()
);
}
None if self.user_context.trap_num == SYSCALL_TRAPNUM => {
crate::arch::irq::enable_local();
break ReturnReason::UserSyscall;
}
None => {
if self.user_context.trap_num as u16 == SYSCALL_TRAPNUM {
break ReturnReason::UserSyscall;
}
call_irq_callback_functions(
&self.as_trap_frame(),
self.as_trap_frame().trap_num,
);
crate::arch::irq::enable_local();
}
};
call_irq_callback_functions(&self.as_trap_frame(), self.as_trap_frame().trap_num);
}
if has_kernel_event() {
break ReturnReason::KernelEvent;
}
};
crate::arch::irq::enable_local();
if return_reason == ReturnReason::UserException {
self.cpu_exception_info = CpuExceptionInfo {
page_fault_addr: unsafe { x86::controlregs::cr2() },
@ -221,6 +230,20 @@ pub enum CpuExceptionType {
Reserved,
}
impl CpuExceptionType {
/// Returns whether this exception type is a fault or a trap.
pub fn is_fatal_or_trap(self) -> bool {
match self {
CpuExceptionType::Trap | CpuExceptionType::Fault | CpuExceptionType::FaultOrTrap => {
true
}
CpuExceptionType::Abort | CpuExceptionType::Interrupt | CpuExceptionType::Reserved => {
false
}
}
}
}
macro_rules! define_cpu_exception {
( $([ $name: ident = $exception_id:tt, $exception_type:tt]),* ) => {
/// CPU exception.

View File

@ -45,7 +45,7 @@ cfg_if! {
}
cpu_local_cell! {
static IS_KERNEL_INTERRUPTED: bool = false;
static KERNEL_INTERRUPT_NESTED_LEVEL: u8 = 0;
}
/// Trap frame of kernel interrupt
@ -214,42 +214,41 @@ impl UserContext {
/// and the IRQ occurs while the CPU is executing in the kernel mode.
/// Otherwise, it returns false.
pub fn is_kernel_interrupted() -> bool {
IS_KERNEL_INTERRUPTED.load()
KERNEL_INTERRUPT_NESTED_LEVEL.load() != 0
}
/// Handle traps (only from kernel).
#[no_mangle]
extern "sysv64" fn trap_handler(f: &mut TrapFrame) {
if CpuException::is_cpu_exception(f.trap_num as u16) {
match CpuException::to_cpu_exception(f.trap_num as u16).unwrap() {
#[cfg(feature = "cvm_guest")]
CpuException::VIRTUALIZATION_EXCEPTION => {
let ve_info = tdcall::get_veinfo().expect("#VE handler: fail to get VE info\n");
let mut trapframe_wrapper = TrapFrameWrapper(&mut *f);
handle_virtual_exception(&mut trapframe_wrapper, &ve_info);
*f = *trapframe_wrapper.0;
}
CpuException::PAGE_FAULT => {
let page_fault_addr = x86_64::registers::control::Cr2::read_raw();
// The actual user space implementation should be responsible
// for providing mechanism to treat the 0 virtual address.
if (0..MAX_USERSPACE_VADDR).contains(&(page_fault_addr as usize)) {
handle_user_page_fault(f, page_fault_addr);
} else {
handle_kernel_page_fault(f, page_fault_addr);
}
}
exception => {
panic!(
"Cannot handle kernel cpu exception:{:?}. Error code:{:x?}; Trapframe:{:#x?}.",
exception, f.error_code, f
);
match CpuException::to_cpu_exception(f.trap_num as u16) {
#[cfg(feature = "cvm_guest")]
Some(CpuException::VIRTUALIZATION_EXCEPTION) => {
let ve_info = tdcall::get_veinfo().expect("#VE handler: fail to get VE info\n");
let mut trapframe_wrapper = TrapFrameWrapper(&mut *f);
handle_virtual_exception(&mut trapframe_wrapper, &ve_info);
*f = *trapframe_wrapper.0;
}
Some(CpuException::PAGE_FAULT) => {
let page_fault_addr = x86_64::registers::control::Cr2::read_raw();
// The actual user space implementation should be responsible
// for providing mechanism to treat the 0 virtual address.
if (0..MAX_USERSPACE_VADDR).contains(&(page_fault_addr as usize)) {
handle_user_page_fault(f, page_fault_addr);
} else {
handle_kernel_page_fault(f, page_fault_addr);
}
}
} else {
IS_KERNEL_INTERRUPTED.store(true);
call_irq_callback_functions(f, f.trap_num);
IS_KERNEL_INTERRUPTED.store(false);
Some(exception) => {
panic!(
"cannot handle kernel CPU exception: {:?}, trapframe: {:?}",
exception, f
);
}
None => {
KERNEL_INTERRUPT_NESTED_LEVEL.add_assign(1);
call_irq_callback_functions(f, f.trap_num);
KERNEL_INTERRUPT_NESTED_LEVEL.sub_assign(1);
}
}
}

View File

@ -34,35 +34,53 @@ fn process_top_half(trap_frame: &TrapFrame, irq_number: usize) {
}
fn process_bottom_half() {
let Some(handler) = BOTTOM_HALF_HANDLER.get() else {
return;
};
// We need to disable preemption when processing bottom half since
// the interrupt is enabled in this context.
let _preempt_guard = disable_preempt();
// This needs to be done before enabling the local interrupts to
// avoid race conditions.
let preempt_guard = disable_preempt();
crate::arch::irq::enable_local();
if let Some(handler) = BOTTOM_HALF_HANDLER.get() {
handler()
}
handler();
crate::arch::irq::disable_local();
drop(preempt_guard);
}
pub(crate) fn call_irq_callback_functions(trap_frame: &TrapFrame, irq_number: usize) {
// For x86 CPUs, interrupts are not re-entrant. Local interrupts will be disabled when
// an interrupt handler is called (Unless interrupts are re-enabled in an interrupt handler).
// We do not provide support for reentrant interrupt handlers. Otherwise, it's very hard to
// guarantee the absence of stack overflows.
//
// FIXME: For arch that supports re-entrant interrupts, we may need to record nested level here.
IN_INTERRUPT_CONTEXT.store(true);
// As a concrete example, Linux once supported them in its early days, but has dropped support
// for this very reason. See
// <https://github.com/torvalds/linux/commit/d8bf368d0631d4bc2612d8bf2e4e8e74e620d0cc>.
//
// Nevertheless, we still need a counter to track the nested level because interrupts are
// enabled while the bottom half is being processing. The counter cannot exceed two because the
// bottom half cannot be reentrant for the same reason.
INTERRUPT_NESTED_LEVEL.add_assign(1);
process_top_half(trap_frame, irq_number);
crate::arch::interrupts_ack(irq_number);
crate::arch::irq::enable_local();
process_bottom_half();
IN_INTERRUPT_CONTEXT.store(false);
if INTERRUPT_NESTED_LEVEL.load() == 1 {
process_bottom_half();
}
INTERRUPT_NESTED_LEVEL.sub_assign(1);
}
cpu_local_cell! {
static IN_INTERRUPT_CONTEXT: bool = false;
static INTERRUPT_NESTED_LEVEL: u8 = 0;
}
/// Returns whether we are in the interrupt context.
///
/// Note that both the top half and the bottom half is processed in the interrupt context.
pub fn in_interrupt_context() -> bool {
IN_INTERRUPT_CONTEXT.load()
INTERRUPT_NESTED_LEVEL.load() != 0
}