Merge remote-tracking branch 'origin/master' into patch-merge-master-1129

This commit is contained in:
longjin
2024-11-29 13:01:15 +00:00
8 changed files with 353 additions and 25 deletions

View File

@ -1,4 +1,7 @@
use core::sync::atomic::compiler_fence;
use crate::{
arch::ipc::signal::{SigCode, Signal},
filesystem::vfs::{
core::generate_inode_id, file::FileMode, syscall::ModeType, FilePrivateData, FileSystem,
FileType, IndexNode, Metadata,
@ -8,7 +11,7 @@ use crate::{
wait_queue::WaitQueue,
},
net::event_poll::{EPollEventType, EPollItem, EventPoll},
process::ProcessState,
process::{ProcessManager, ProcessState},
sched::SchedMode,
time::PosixTimeSpec,
};
@ -20,6 +23,8 @@ use alloc::{
};
use system_error::SystemError;
use super::signal_types::{SigInfo, SigType};
/// 我们设定pipe_buff的总大小为1024字节
const PIPE_BUFF_SIZE: usize = 1024;
@ -59,6 +64,7 @@ pub struct InnerPipeInode {
metadata: Metadata,
reader: u32,
writer: u32,
had_reader: bool,
epitems: SpinLock<LinkedList<Arc<EPollItem>>>,
}
@ -131,6 +137,7 @@ impl LockedPipeInode {
valid_cnt: 0,
read_pos: 0,
write_pos: 0,
had_reader: false,
data: [0; PIPE_BUFF_SIZE],
metadata: Metadata {
@ -278,15 +285,27 @@ impl IndexNode for LockedPipeInode {
mut data: SpinLockGuard<FilePrivateData>,
mode: &crate::filesystem::vfs::file::FileMode,
) -> Result<(), SystemError> {
let accmode = mode.accmode();
let mut guard = self.inner.lock();
// 不能以读写方式打开管道
if mode.contains(FileMode::O_RDWR) {
if accmode == FileMode::O_RDWR.bits() {
return Err(SystemError::EACCES);
}
if mode.contains(FileMode::O_RDONLY) {
} else if accmode == FileMode::O_RDONLY.bits() {
guard.reader += 1;
}
if mode.contains(FileMode::O_WRONLY) {
guard.had_reader = true;
// println!(
// "FIFO: pipe try open in read mode with reader pid:{:?}",
// ProcessManager::current_pid()
// );
} else if accmode == FileMode::O_WRONLY.bits() {
// println!(
// "FIFO: pipe try open in write mode with {} reader, writer pid:{:?}",
// guard.reader,
// ProcessManager::current_pid()
// );
if guard.reader == 0 && mode.contains(FileMode::O_NONBLOCK) {
return Err(SystemError::ENXIO);
}
guard.writer += 1;
}
@ -311,10 +330,11 @@ impl IndexNode for LockedPipeInode {
} else {
return Err(SystemError::EBADF);
}
let accmode = mode.accmode();
let mut guard = self.inner.lock();
// 写端关闭
if mode.contains(FileMode::O_WRONLY) {
if accmode == FileMode::O_WRONLY.bits() {
assert!(guard.writer > 0);
guard.writer -= 1;
// 如果已经没有写端了,则唤醒读端
@ -325,7 +345,7 @@ impl IndexNode for LockedPipeInode {
}
// 读端关闭
if mode.contains(FileMode::O_RDONLY) {
if accmode == FileMode::O_RDONLY.bits() {
assert!(guard.reader > 0);
guard.reader -= 1;
// 如果已经没有写端了,则唤醒读端
@ -361,7 +381,35 @@ impl IndexNode for LockedPipeInode {
let mut inode = self.inner.lock();
if inode.reader == 0 {
// TODO: 如果已经没有读端存在了则向写端进程发送SIGPIPE信号
if !inode.had_reader {
// 如果从未有读端,直接返回 ENXIO无论是否阻塞模式
return Err(SystemError::ENXIO);
} else {
// 如果曾经有读端,现在已关闭
match mode.contains(FileMode::O_NONBLOCK) {
true => {
// 非阻塞模式,直接返回 EPIPE
return Err(SystemError::EPIPE);
}
false => {
let sig = Signal::SIGPIPE;
let mut info = SigInfo::new(
sig,
0,
SigCode::Kernel,
SigType::Kill(ProcessManager::current_pid()),
);
compiler_fence(core::sync::atomic::Ordering::SeqCst);
let _retval = sig
.send_signal_info(Some(&mut info), ProcessManager::current_pid())
.map(|x| x as usize);
compiler_fence(core::sync::atomic::Ordering::SeqCst);
return Err(SystemError::EPIPE);
}
}
}
}
// 如果管道空间不够

View File

@ -275,7 +275,12 @@ impl ProcessManager {
// avoid deadlock
drop(writer);
let rq = cpu_rq(pcb.sched_info().on_cpu().unwrap().data() as usize);
let rq = cpu_rq(
pcb.sched_info()
.on_cpu()
.unwrap_or(smp_get_processor_id())
.data() as usize,
);
let (rq, _guard) = rq.self_lock();
rq.update_rq_clock();

View File

@ -1,12 +1,12 @@
//! 这个文件实现的是调度过程中涉及到的时钟
//!
use crate::{arch::CurrentTimeArch, time::TimeArch};
use crate::{arch::CurrentTimeArch, smp::cpu::ProcessorId, time::TimeArch};
pub struct SchedClock;
impl SchedClock {
#[inline]
pub fn sched_clock_cpu(_cpu: usize) -> u64 {
pub fn sched_clock_cpu(_cpu: ProcessorId) -> u64 {
#[cfg(target_arch = "x86_64")]
{
if crate::arch::driver::tsc::TSCManager::cpu_khz() == 0 {

View File

@ -1,14 +1,17 @@
use core::sync::atomic::{compiler_fence, AtomicUsize, Ordering};
use crate::{
arch::CurrentIrqArch, exception::InterruptArch, process::ProcessControlBlock,
smp::core::smp_get_processor_id, time::jiffies::TICK_NESC,
arch::CurrentIrqArch,
exception::InterruptArch,
process::ProcessControlBlock,
smp::{core::smp_get_processor_id, cpu::ProcessorId},
time::jiffies::TICK_NESC,
};
use alloc::sync::Arc;
use super::{clock::SchedClock, cpu_irq_time};
pub fn irq_time_read(cpu: usize) -> u64 {
pub fn irq_time_read(cpu: ProcessorId) -> u64 {
compiler_fence(Ordering::SeqCst);
let irqtime = cpu_irq_time(cpu);
@ -49,7 +52,7 @@ impl IrqTime {
}
pub fn irqtime_start() {
let cpu = smp_get_processor_id().data() as usize;
let cpu = smp_get_processor_id();
let irq_time = cpu_irq_time(cpu);
compiler_fence(Ordering::SeqCst);
irq_time.irq_start_time = SchedClock::sched_clock_cpu(cpu) as u64;
@ -58,7 +61,7 @@ impl IrqTime {
pub fn irqtime_account_irq(_pcb: Arc<ProcessControlBlock>) {
compiler_fence(Ordering::SeqCst);
let cpu = smp_get_processor_id().data() as usize;
let cpu = smp_get_processor_id();
let irq_time = cpu_irq_time(cpu);
compiler_fence(Ordering::SeqCst);
let delta = SchedClock::sched_clock_cpu(cpu) as u64 - irq_time.irq_start_time;
@ -93,7 +96,7 @@ impl CpuTimeFunc {
let mut accounted = Self::steal_account_process_time(max);
if accounted < max {
let irqtime = cpu_irq_time(smp_get_processor_id().data() as usize);
let irqtime = cpu_irq_time(smp_get_processor_id());
accounted += irqtime.irqtime_tick_accounted(max - accounted);
}

View File

@ -63,8 +63,8 @@ pub const SCHED_CAPACITY_SHIFT: u64 = SCHED_FIXEDPOINT_SHIFT;
pub const SCHED_CAPACITY_SCALE: u64 = 1 << SCHED_CAPACITY_SHIFT;
#[inline]
pub fn cpu_irq_time(cpu: usize) -> &'static mut IrqTime {
unsafe { CPU_IRQ_TIME.as_mut().unwrap()[cpu] }
pub fn cpu_irq_time(cpu: ProcessorId) -> &'static mut IrqTime {
unsafe { CPU_IRQ_TIME.as_mut().unwrap()[cpu.data() as usize] }
}
#[inline]
@ -289,7 +289,7 @@ pub struct CpuRunQueue {
lock: SpinLock<()>,
lock_on_who: AtomicUsize,
cpu: usize,
cpu: ProcessorId,
clock_task: u64,
clock: u64,
prev_irq_time: u64,
@ -329,7 +329,7 @@ pub struct CpuRunQueue {
}
impl CpuRunQueue {
pub fn new(cpu: usize) -> Self {
pub fn new(cpu: ProcessorId) -> Self {
Self {
lock: SpinLock::new(()),
lock_on_who: AtomicUsize::new(usize::MAX),
@ -460,6 +460,7 @@ impl CpuRunQueue {
self.enqueue_task(pcb.clone(), flags);
*pcb.sched_info().on_rq.lock_irqsave() = OnRq::Queued;
pcb.sched_info().set_on_cpu(Some(self.cpu));
}
/// 检查对应的task是否可以抢占当前运行的task
@ -638,7 +639,7 @@ impl CpuRunQueue {
let cpu = self.cpu;
if cpu == smp_get_processor_id().data() as usize {
if cpu == smp_get_processor_id() {
// assert!(
// Arc::ptr_eq(&current, &ProcessManager::current_pcb()),
// "rq current name {} process current {}",
@ -653,7 +654,7 @@ impl CpuRunQueue {
}
// 向目标cpu发送重调度ipi
send_resched_ipi(ProcessorId::new(cpu as u32));
send_resched_ipi(cpu);
}
/// 选择下一个task
@ -986,7 +987,7 @@ pub fn sched_init() {
let mut cpu_runqueue = Vec::with_capacity(PerCpu::MAX_CPU_NUM as usize);
for cpu in 0..PerCpu::MAX_CPU_NUM as usize {
let rq = Arc::new(CpuRunQueue::new(cpu));
let rq = Arc::new(CpuRunQueue::new(ProcessorId::new(cpu as u32)));
rq.cfs.force_mut().set_rq(Arc::downgrade(&rq));
cpu_runqueue.push(rq);
}