mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-21 16:33:24 +00:00
Fix the nested bottom half
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
0cb2ea562e
commit
28edc57dd0
@ -119,37 +119,15 @@ static ENABLED_MASK: AtomicU8 = AtomicU8::new(0);
|
|||||||
|
|
||||||
cpu_local_cell! {
|
cpu_local_cell! {
|
||||||
static PENDING_MASK: u8 = 0;
|
static PENDING_MASK: u8 = 0;
|
||||||
static IS_ENABLED: bool = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Enables softirq in current processor.
|
|
||||||
fn enable_softirq_local() {
|
|
||||||
IS_ENABLED.store(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Disables softirq in current processor.
|
|
||||||
fn disable_softirq_local() {
|
|
||||||
IS_ENABLED.store(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks whether the softirq is enabled in current processor.
|
|
||||||
fn is_softirq_enabled() -> bool {
|
|
||||||
IS_ENABLED.load()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Processes pending softirqs.
|
/// Processes pending softirqs.
|
||||||
///
|
///
|
||||||
/// The processing instructions will iterate for `SOFTIRQ_RUN_TIMES` times. If any softirq
|
/// The processing instructions will iterate for `SOFTIRQ_RUN_TIMES` times. If any softirq
|
||||||
/// is raised during the iteration, it will be processed.
|
/// is raised during the iteration, it will be processed.
|
||||||
pub(crate) fn process_pending() {
|
fn process_pending() {
|
||||||
const SOFTIRQ_RUN_TIMES: u8 = 5;
|
const SOFTIRQ_RUN_TIMES: u8 = 5;
|
||||||
|
|
||||||
if !is_softirq_enabled() {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
disable_softirq_local();
|
|
||||||
|
|
||||||
for _i in 0..SOFTIRQ_RUN_TIMES {
|
for _i in 0..SOFTIRQ_RUN_TIMES {
|
||||||
let mut action_mask = {
|
let mut action_mask = {
|
||||||
let pending_mask = PENDING_MASK.load();
|
let pending_mask = PENDING_MASK.load();
|
||||||
@ -166,6 +144,4 @@ pub(crate) fn process_pending() {
|
|||||||
action_mask &= action_mask - 1;
|
action_mask &= action_mask - 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enable_softirq_local();
|
|
||||||
}
|
}
|
||||||
|
@ -122,39 +122,48 @@ impl UserContextApiInternal for UserContext {
|
|||||||
// set ID flag which means cpu support CPUID instruction
|
// set ID flag which means cpu support CPUID instruction
|
||||||
self.user_context.general.rflags |= (RFlags::INTERRUPT_FLAG | RFlags::ID).bits() as usize;
|
self.user_context.general.rflags |= (RFlags::INTERRUPT_FLAG | RFlags::ID).bits() as usize;
|
||||||
|
|
||||||
const SYSCALL_TRAPNUM: u16 = 0x100;
|
const SYSCALL_TRAPNUM: usize = 0x100;
|
||||||
|
|
||||||
// return when it is syscall or cpu exception type is Fault or Trap.
|
// return when it is syscall or cpu exception type is Fault or Trap.
|
||||||
let return_reason = loop {
|
let return_reason = loop {
|
||||||
scheduler::might_preempt();
|
scheduler::might_preempt();
|
||||||
self.user_context.run();
|
self.user_context.run();
|
||||||
|
|
||||||
match CpuException::to_cpu_exception(self.user_context.trap_num as u16) {
|
match CpuException::to_cpu_exception(self.user_context.trap_num as u16) {
|
||||||
|
#[cfg(feature = "cvm_guest")]
|
||||||
|
Some(CpuException::VIRTUALIZATION_EXCEPTION) => {
|
||||||
|
crate::arch::irq::enable_local();
|
||||||
|
handle_virtualization_exception(self);
|
||||||
|
}
|
||||||
|
Some(exception) if exception.typ().is_fatal_or_trap() => {
|
||||||
|
crate::arch::irq::enable_local();
|
||||||
|
break ReturnReason::UserException;
|
||||||
|
}
|
||||||
Some(exception) => {
|
Some(exception) => {
|
||||||
#[cfg(feature = "cvm_guest")]
|
panic!(
|
||||||
if exception == CpuException::VIRTUALIZATION_EXCEPTION {
|
"cannot handle user CPU exception: {:?}, trapframe: {:?}",
|
||||||
handle_virtualization_exception(self);
|
exception,
|
||||||
continue;
|
self.as_trap_frame()
|
||||||
}
|
);
|
||||||
match exception.typ() {
|
}
|
||||||
CpuExceptionType::FaultOrTrap
|
None if self.user_context.trap_num == SYSCALL_TRAPNUM => {
|
||||||
| CpuExceptionType::Trap
|
crate::arch::irq::enable_local();
|
||||||
| CpuExceptionType::Fault => break ReturnReason::UserException,
|
break ReturnReason::UserSyscall;
|
||||||
_ => (),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
if self.user_context.trap_num as u16 == SYSCALL_TRAPNUM {
|
call_irq_callback_functions(
|
||||||
break ReturnReason::UserSyscall;
|
&self.as_trap_frame(),
|
||||||
}
|
self.as_trap_frame().trap_num,
|
||||||
|
);
|
||||||
|
crate::arch::irq::enable_local();
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
call_irq_callback_functions(&self.as_trap_frame(), self.as_trap_frame().trap_num);
|
|
||||||
if has_kernel_event() {
|
if has_kernel_event() {
|
||||||
break ReturnReason::KernelEvent;
|
break ReturnReason::KernelEvent;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
crate::arch::irq::enable_local();
|
|
||||||
if return_reason == ReturnReason::UserException {
|
if return_reason == ReturnReason::UserException {
|
||||||
self.cpu_exception_info = CpuExceptionInfo {
|
self.cpu_exception_info = CpuExceptionInfo {
|
||||||
page_fault_addr: unsafe { x86::controlregs::cr2() },
|
page_fault_addr: unsafe { x86::controlregs::cr2() },
|
||||||
@ -221,6 +230,20 @@ pub enum CpuExceptionType {
|
|||||||
Reserved,
|
Reserved,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl CpuExceptionType {
|
||||||
|
/// Returns whether this exception type is a fault or a trap.
|
||||||
|
pub fn is_fatal_or_trap(self) -> bool {
|
||||||
|
match self {
|
||||||
|
CpuExceptionType::Trap | CpuExceptionType::Fault | CpuExceptionType::FaultOrTrap => {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
CpuExceptionType::Abort | CpuExceptionType::Interrupt | CpuExceptionType::Reserved => {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
macro_rules! define_cpu_exception {
|
macro_rules! define_cpu_exception {
|
||||||
( $([ $name: ident = $exception_id:tt, $exception_type:tt]),* ) => {
|
( $([ $name: ident = $exception_id:tt, $exception_type:tt]),* ) => {
|
||||||
/// CPU exception.
|
/// CPU exception.
|
||||||
|
@ -45,7 +45,7 @@ cfg_if! {
|
|||||||
}
|
}
|
||||||
|
|
||||||
cpu_local_cell! {
|
cpu_local_cell! {
|
||||||
static IS_KERNEL_INTERRUPTED: bool = false;
|
static KERNEL_INTERRUPT_NESTED_LEVEL: u8 = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Trap frame of kernel interrupt
|
/// Trap frame of kernel interrupt
|
||||||
@ -214,42 +214,41 @@ impl UserContext {
|
|||||||
/// and the IRQ occurs while the CPU is executing in the kernel mode.
|
/// and the IRQ occurs while the CPU is executing in the kernel mode.
|
||||||
/// Otherwise, it returns false.
|
/// Otherwise, it returns false.
|
||||||
pub fn is_kernel_interrupted() -> bool {
|
pub fn is_kernel_interrupted() -> bool {
|
||||||
IS_KERNEL_INTERRUPTED.load()
|
KERNEL_INTERRUPT_NESTED_LEVEL.load() != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle traps (only from kernel).
|
/// Handle traps (only from kernel).
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
extern "sysv64" fn trap_handler(f: &mut TrapFrame) {
|
extern "sysv64" fn trap_handler(f: &mut TrapFrame) {
|
||||||
if CpuException::is_cpu_exception(f.trap_num as u16) {
|
match CpuException::to_cpu_exception(f.trap_num as u16) {
|
||||||
match CpuException::to_cpu_exception(f.trap_num as u16).unwrap() {
|
#[cfg(feature = "cvm_guest")]
|
||||||
#[cfg(feature = "cvm_guest")]
|
Some(CpuException::VIRTUALIZATION_EXCEPTION) => {
|
||||||
CpuException::VIRTUALIZATION_EXCEPTION => {
|
let ve_info = tdcall::get_veinfo().expect("#VE handler: fail to get VE info\n");
|
||||||
let ve_info = tdcall::get_veinfo().expect("#VE handler: fail to get VE info\n");
|
let mut trapframe_wrapper = TrapFrameWrapper(&mut *f);
|
||||||
let mut trapframe_wrapper = TrapFrameWrapper(&mut *f);
|
handle_virtual_exception(&mut trapframe_wrapper, &ve_info);
|
||||||
handle_virtual_exception(&mut trapframe_wrapper, &ve_info);
|
*f = *trapframe_wrapper.0;
|
||||||
*f = *trapframe_wrapper.0;
|
}
|
||||||
}
|
Some(CpuException::PAGE_FAULT) => {
|
||||||
CpuException::PAGE_FAULT => {
|
let page_fault_addr = x86_64::registers::control::Cr2::read_raw();
|
||||||
let page_fault_addr = x86_64::registers::control::Cr2::read_raw();
|
// The actual user space implementation should be responsible
|
||||||
// The actual user space implementation should be responsible
|
// for providing mechanism to treat the 0 virtual address.
|
||||||
// for providing mechanism to treat the 0 virtual address.
|
if (0..MAX_USERSPACE_VADDR).contains(&(page_fault_addr as usize)) {
|
||||||
if (0..MAX_USERSPACE_VADDR).contains(&(page_fault_addr as usize)) {
|
handle_user_page_fault(f, page_fault_addr);
|
||||||
handle_user_page_fault(f, page_fault_addr);
|
} else {
|
||||||
} else {
|
handle_kernel_page_fault(f, page_fault_addr);
|
||||||
handle_kernel_page_fault(f, page_fault_addr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
exception => {
|
|
||||||
panic!(
|
|
||||||
"Cannot handle kernel cpu exception:{:?}. Error code:{:x?}; Trapframe:{:#x?}.",
|
|
||||||
exception, f.error_code, f
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
Some(exception) => {
|
||||||
IS_KERNEL_INTERRUPTED.store(true);
|
panic!(
|
||||||
call_irq_callback_functions(f, f.trap_num);
|
"cannot handle kernel CPU exception: {:?}, trapframe: {:?}",
|
||||||
IS_KERNEL_INTERRUPTED.store(false);
|
exception, f
|
||||||
|
);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
KERNEL_INTERRUPT_NESTED_LEVEL.add_assign(1);
|
||||||
|
call_irq_callback_functions(f, f.trap_num);
|
||||||
|
KERNEL_INTERRUPT_NESTED_LEVEL.sub_assign(1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,35 +34,53 @@ fn process_top_half(trap_frame: &TrapFrame, irq_number: usize) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn process_bottom_half() {
|
fn process_bottom_half() {
|
||||||
|
let Some(handler) = BOTTOM_HALF_HANDLER.get() else {
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
// We need to disable preemption when processing bottom half since
|
// We need to disable preemption when processing bottom half since
|
||||||
// the interrupt is enabled in this context.
|
// the interrupt is enabled in this context.
|
||||||
let _preempt_guard = disable_preempt();
|
// This needs to be done before enabling the local interrupts to
|
||||||
|
// avoid race conditions.
|
||||||
|
let preempt_guard = disable_preempt();
|
||||||
|
crate::arch::irq::enable_local();
|
||||||
|
|
||||||
if let Some(handler) = BOTTOM_HALF_HANDLER.get() {
|
handler();
|
||||||
handler()
|
|
||||||
}
|
crate::arch::irq::disable_local();
|
||||||
|
drop(preempt_guard);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn call_irq_callback_functions(trap_frame: &TrapFrame, irq_number: usize) {
|
pub(crate) fn call_irq_callback_functions(trap_frame: &TrapFrame, irq_number: usize) {
|
||||||
// For x86 CPUs, interrupts are not re-entrant. Local interrupts will be disabled when
|
// We do not provide support for reentrant interrupt handlers. Otherwise, it's very hard to
|
||||||
// an interrupt handler is called (Unless interrupts are re-enabled in an interrupt handler).
|
// guarantee the absence of stack overflows.
|
||||||
//
|
//
|
||||||
// FIXME: For arch that supports re-entrant interrupts, we may need to record nested level here.
|
// As a concrete example, Linux once supported them in its early days, but has dropped support
|
||||||
IN_INTERRUPT_CONTEXT.store(true);
|
// for this very reason. See
|
||||||
|
// <https://github.com/torvalds/linux/commit/d8bf368d0631d4bc2612d8bf2e4e8e74e620d0cc>.
|
||||||
|
//
|
||||||
|
// Nevertheless, we still need a counter to track the nested level because interrupts are
|
||||||
|
// enabled while the bottom half is being processing. The counter cannot exceed two because the
|
||||||
|
// bottom half cannot be reentrant for the same reason.
|
||||||
|
INTERRUPT_NESTED_LEVEL.add_assign(1);
|
||||||
|
|
||||||
process_top_half(trap_frame, irq_number);
|
process_top_half(trap_frame, irq_number);
|
||||||
crate::arch::interrupts_ack(irq_number);
|
crate::arch::interrupts_ack(irq_number);
|
||||||
crate::arch::irq::enable_local();
|
|
||||||
process_bottom_half();
|
|
||||||
|
|
||||||
IN_INTERRUPT_CONTEXT.store(false);
|
if INTERRUPT_NESTED_LEVEL.load() == 1 {
|
||||||
|
process_bottom_half();
|
||||||
|
}
|
||||||
|
|
||||||
|
INTERRUPT_NESTED_LEVEL.sub_assign(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu_local_cell! {
|
cpu_local_cell! {
|
||||||
static IN_INTERRUPT_CONTEXT: bool = false;
|
static INTERRUPT_NESTED_LEVEL: u8 = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns whether we are in the interrupt context.
|
/// Returns whether we are in the interrupt context.
|
||||||
|
///
|
||||||
|
/// Note that both the top half and the bottom half is processed in the interrupt context.
|
||||||
pub fn in_interrupt_context() -> bool {
|
pub fn in_interrupt_context() -> bool {
|
||||||
IN_INTERRUPT_CONTEXT.load()
|
INTERRUPT_NESTED_LEVEL.load() != 0
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user