mirror of
https://github.com/DragonOS-Community/DragonOS.git
synced 2025-06-08 14:16:47 +00:00
* 几个结构体 * 通过vmx_init以及create_vm,create_vcpu部分TODO * kvm_run完成一半 * 能够成功vmlaunch,但是在vmexit时候还有些问题未排查出来 * 解决了vmlaunch导致的cpu_reset的问题 * 整理代码 * 暂时性push到hyc仓库 * 修改内存虚拟化部分参数传入,解决死锁问题 * 初步完成ept映射.但不停EPT_VIOLATION * 初步完成了EPT映射,但是读写内存还是有点问题 * fixme * 更新了一些truncate到from_bits_unchecked的实现 * 完成内存虚拟化EPT_VIOLATION的映射 * fmt * Remove /fixme from .gitignore * Remove /fixme file * Update kernel/src/init/init.rs Co-authored-by: Samuel Dai <samuka007@dragonos.org> * Update kernel/src/init/init.rs Co-authored-by: Samuel Dai <samuka007@dragonos.org> * 修改了注释格式,删除了附带的一些文件操作 * feat(syscall): 实现syscall restart (#1075) 能够在系统调用返回ERESTARTSYS时,信号处理结束后,自动重启系统调用. TODO: 实现wait等需要restart_block的系统调用的重启 Signed-off-by: longjin <longjin@DragonOS.org> * chore: update docker image version in script && update doc (#1076) * chore: update docker image version in script * chore: replace lots of spaces with newline in doc * fix: 修复wait4系统调用部分语义与Linux不一致的问题 (#1080) * fix: 修复wait4系统调用部分语义与Linux不一致的问题 解决wait不住/wait之后卡死的bug --------- Signed-off-by: longjin <longjin@DragonOS.org> * feat(fs/syscall): 实现fchdir系统调用 (#1081) Signed-off-by: longjin <longjin@DragonOS.org> * fix(mm): 修复fat文件系统的PageCache同步问题 (#1005) --------- Co-authored-by: longjin <longjin@DragonOS.org> * fix: 修正nographic启动时,控制台日志未能输出到文件的问题 (#1082) Signed-off-by: longjin <longjin@DragonOS.org> * fix(process): 修复copy_process的一些bug & 支持默认init进程传参 (#1083) - 修复`copy_process`函数对标志位处理不正确的bug - init进程搜索列表中,支持为默认init程序传入参数 Signed-off-by: longjin <longjin@DragonOS.org> * feat: 完善sys_reboot (#1084) * fix(process): 修复copy_process的一些bug & 支持默认init进程传参 - 修复`copy_process`函数对标志位处理不正确的bug - init进程搜索列表中,支持为默认init程序传入参数 Signed-off-by: longjin <longjin@DragonOS.org> * feat: 完善sys_reboot - 校验magic number - 支持多个cmd (具体内容未实现) Signed-off-by: longjin <longjin@DragonOS.org> --------- Signed-off-by: longjin <longjin@DragonOS.org> * fix: 修复do_wait函数在wait所有子进程时,忘了释放锁就sleep的bug (#1089) Signed-off-by: longjin <longjin@DragonOS.org> * pull主线并且fmt --------- Signed-off-by: longjin <longjin@DragonOS.org> Co-authored-by: GnoCiYeH <heyicong@dragonos.org> Co-authored-by: Samuel Dai <samuka007@dragonos.org> Co-authored-by: LoGin <longjin@DragonOS.org> Co-authored-by: LIU Yuwei <22045841+Marsman1996@users.noreply.github.com> Co-authored-by: MemoryShore <1353318529@qq.com>
269 lines
7.6 KiB
Rust
269 lines
7.6 KiB
Rust
use core::{
|
||
fmt::Debug,
|
||
sync::atomic::{AtomicUsize, Ordering},
|
||
};
|
||
|
||
use alloc::{
|
||
boxed::Box,
|
||
sync::{Arc, Weak},
|
||
vec::Vec,
|
||
};
|
||
use hashbrown::HashMap;
|
||
use log::debug;
|
||
use mem::LockedKvmMemSlot;
|
||
use system_error::SystemError;
|
||
|
||
use crate::{
|
||
arch::{
|
||
vm::{kvm_host::vcpu::VirtCpuRequest, vmx::KvmVmx, x86_kvm_manager},
|
||
CurrentKvmManager, KvmArch, VirtCpuArch,
|
||
},
|
||
filesystem::vfs::file::{File, FileMode},
|
||
libs::spinlock::{SpinLock, SpinLockGuard},
|
||
mm::ucontext::AddressSpace,
|
||
process::ProcessManager,
|
||
smp::cpu::ProcessorId,
|
||
virt::vm::{
|
||
kvm_dev::KvmVcpuDev,
|
||
kvm_host::vcpu::{LockedVirtCpu, VirtCpu},
|
||
},
|
||
};
|
||
|
||
use self::{
|
||
mem::{GfnToHvaCache, KvmMemSlotSet, LockedVmMemSlotSet, PfnCacheUsage},
|
||
vcpu::{GuestDebug, VcpuMode},
|
||
};
|
||
|
||
pub mod mem;
|
||
pub mod vcpu;
|
||
|
||
const KVM_ADDRESS_SPACE_NUM: usize = 1;
|
||
pub const KVM_USERSAPCE_IRQ_SOURCE_ID: usize = 0;
|
||
pub const KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID: usize = 1;
|
||
|
||
#[derive(Debug)]
|
||
pub struct LockedVm {
|
||
inner: SpinLock<Vm>,
|
||
}
|
||
|
||
static KVM_USAGE_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||
|
||
impl LockedVm {
|
||
pub fn lock(&self) -> SpinLockGuard<Vm> {
|
||
self.inner.lock()
|
||
}
|
||
|
||
pub fn create(vm_type: usize) -> Result<Arc<Self>, SystemError> {
|
||
let mut memslots_set = vec![];
|
||
let mut memslots = vec![];
|
||
for i in 0..KVM_ADDRESS_SPACE_NUM {
|
||
let mut tmp = vec![];
|
||
for j in 0..2 {
|
||
let mut slots = KvmMemSlotSet::default();
|
||
slots.last_use = None;
|
||
slots.node_idx = j;
|
||
slots.generation = i as u64;
|
||
tmp.push(LockedVmMemSlotSet::new(slots));
|
||
}
|
||
memslots_set.push(tmp);
|
||
memslots.push(memslots_set[i][0].clone());
|
||
}
|
||
|
||
let kvm = Vm {
|
||
mm: ProcessManager::current_pcb()
|
||
.basic()
|
||
.user_vm()
|
||
.unwrap()
|
||
.write()
|
||
.try_clone()?,
|
||
max_vcpus: CurrentKvmManager::KVM_MAX_VCPUS,
|
||
memslots_set,
|
||
memslots,
|
||
arch: KvmArch::init(vm_type)?,
|
||
created_vcpus: 0,
|
||
lock_vm_ref: Weak::new(),
|
||
nr_memslot_pages: 0,
|
||
online_vcpus: 0,
|
||
dirty_ring_size: 0,
|
||
dirty_ring_with_bitmap: false,
|
||
vcpus: HashMap::new(),
|
||
#[cfg(target_arch = "x86_64")]
|
||
kvm_vmx: KvmVmx::default(),
|
||
nr_memslots_dirty_logging: 0,
|
||
mmu_invalidate_seq: 0,
|
||
};
|
||
|
||
let ret = Arc::new(Self {
|
||
inner: SpinLock::new(kvm),
|
||
});
|
||
|
||
Self::hardware_enable_all()?;
|
||
|
||
ret.lock().lock_vm_ref = Arc::downgrade(&ret);
|
||
return Ok(ret);
|
||
}
|
||
|
||
fn hardware_enable_all() -> Result<(), SystemError> {
|
||
KVM_USAGE_COUNT.fetch_add(1, Ordering::SeqCst);
|
||
|
||
// 如果是第一个启动的,则需要对所有cpu都初始化硬件
|
||
if KVM_USAGE_COUNT.load(Ordering::SeqCst) == 1 {
|
||
// FIXME!!!!
|
||
// 这里是要对每个cpu都进行初始化,目前这里只对当前cpu调用了初始化流程
|
||
x86_kvm_manager().arch_hardware_enable()?;
|
||
}
|
||
|
||
Ok(())
|
||
}
|
||
}
|
||
|
||
#[derive(Debug)]
|
||
#[allow(dead_code)]
|
||
pub struct Vm {
|
||
lock_vm_ref: Weak<LockedVm>,
|
||
mm: Arc<AddressSpace>,
|
||
max_vcpus: usize,
|
||
created_vcpus: usize,
|
||
online_vcpus: usize,
|
||
/// vcpu集合
|
||
vcpus: HashMap<usize, Arc<LockedVirtCpu>>,
|
||
// name: String,
|
||
/// 对应活动和非活动内存槽,实际为:[[Arc<LockedVmMemSlots>; 2]; KVM_ADDRESS_SPACE_NUM],这里暂时写Vec
|
||
memslots_set: Vec<Vec<Arc<LockedVmMemSlotSet>>>,
|
||
/// 当前活动内存槽,实际为:[Arc<LockedVmMemSlots>; KVM_ADDRESS_SPACE_NUM],这里暂时写Vec
|
||
pub memslots: Vec<Arc<LockedVmMemSlotSet>>,
|
||
/// 内存槽对应的页数
|
||
nr_memslot_pages: usize,
|
||
|
||
pub arch: KvmArch,
|
||
|
||
pub dirty_ring_size: u32,
|
||
pub nr_memslots_dirty_logging: u32,
|
||
dirty_ring_with_bitmap: bool,
|
||
|
||
#[cfg(target_arch = "x86_64")]
|
||
pub kvm_vmx: KvmVmx,
|
||
|
||
pub mmu_invalidate_seq: u64, //用于表示内存管理单元(MMU)无效化序列号
|
||
}
|
||
|
||
impl Vm {
|
||
#[inline(never)]
|
||
pub fn create_vcpu(&mut self, id: usize) -> Result<usize, SystemError> {
|
||
if id >= self.max_vcpus {
|
||
return Err(SystemError::EINVAL);
|
||
}
|
||
|
||
if self.created_vcpus >= self.max_vcpus {
|
||
return Err(SystemError::EINVAL);
|
||
}
|
||
|
||
self.created_vcpus += 1;
|
||
|
||
let vcpu = self._create_vcpu(id)?;
|
||
if self.dirty_ring_size != 0 {
|
||
todo!()
|
||
}
|
||
|
||
vcpu.lock().vcpu_id = self.online_vcpus;
|
||
|
||
self.vcpus.insert(self.online_vcpus, vcpu.clone());
|
||
|
||
self.online_vcpus += 1;
|
||
|
||
let vcpu_inode = KvmVcpuDev::new(vcpu);
|
||
|
||
let file = File::new(vcpu_inode, FileMode::from_bits_truncate(0x777))?;
|
||
|
||
let fd = ProcessManager::current_pcb()
|
||
.fd_table()
|
||
.write()
|
||
.alloc_fd(file, None)?;
|
||
|
||
Ok(fd as usize)
|
||
}
|
||
|
||
/// ### 创建一个vcpu,并且初始化部分数据
|
||
#[inline(never)]
|
||
pub fn _create_vcpu(&mut self, id: usize) -> Result<Arc<LockedVirtCpu>, SystemError> {
|
||
let mut vcpu = self.new_vcpu(id);
|
||
|
||
vcpu.init_arch(self, id)?;
|
||
|
||
Ok(Arc::new(LockedVirtCpu::new(vcpu)))
|
||
}
|
||
|
||
#[inline(never)]
|
||
pub fn new_vcpu(&self, id: usize) -> VirtCpu {
|
||
return VirtCpu {
|
||
cpu: ProcessorId::INVALID,
|
||
kvm: Some(self.lock_vm_ref.clone()),
|
||
vcpu_id: id,
|
||
pid: None,
|
||
preempted: false,
|
||
ready: false,
|
||
last_used_slot: None,
|
||
stats_id: format!("kvm-{}/vcpu-{}", ProcessManager::current_pid().data(), id),
|
||
pv_time: GfnToHvaCache::init(self.lock_vm_ref.clone(), PfnCacheUsage::HOST_USES_PFN),
|
||
arch: VirtCpuArch::new(),
|
||
private: None,
|
||
request: VirtCpuRequest::empty(),
|
||
guest_debug: GuestDebug::empty(),
|
||
run: unsafe { Some(Box::new_zeroed().assume_init()) },
|
||
vcpu_idx: 0,
|
||
mode: VcpuMode::OutsideGuestMode,
|
||
stat: Default::default(),
|
||
};
|
||
}
|
||
|
||
#[cfg(target_arch = "x86_64")]
|
||
pub fn kvm_vmx_mut(&mut self) -> &mut KvmVmx {
|
||
&mut self.kvm_vmx
|
||
}
|
||
|
||
#[cfg(target_arch = "x86_64")]
|
||
pub fn kvm_vmx(&self) -> &KvmVmx {
|
||
&self.kvm_vmx
|
||
}
|
||
}
|
||
|
||
/// ## 多处理器状态(有些状态在某些架构并不合法)
|
||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||
#[allow(dead_code)]
|
||
pub enum MutilProcessorState {
|
||
Runnable,
|
||
Uninitialized,
|
||
InitReceived,
|
||
Halted,
|
||
SipiReceived,
|
||
Stopped,
|
||
CheckStop,
|
||
Operating,
|
||
Load,
|
||
ApResetHold,
|
||
Suspended,
|
||
}
|
||
///返回包含 gfn 的 memslot 的指针。如果没有找到,则返回 NULL。
|
||
///当 "approx" 设置为 true 时,即使地址落在空洞中,也会返回 memslot。
|
||
///在这种情况下,将返回空洞边界的其中一个 memslot。
|
||
/// 先简陋完成,原本是二分,现在先遍历
|
||
pub fn search_memslots(
|
||
slot_set: Arc<LockedVmMemSlotSet>,
|
||
gfn: u64, /*_approx:bool*/
|
||
) -> Option<Arc<LockedKvmMemSlot>> {
|
||
let slots = slot_set.lock();
|
||
let node = &slots.gfn_tree;
|
||
//let(start,end)=(0,node.len()-1);
|
||
for (_gfn_num, slot) in node.iter() {
|
||
let slot_guard = slot.read();
|
||
debug!(
|
||
"gfn:{gfn},slot base_gfn: {},slot npages: {}",
|
||
slot_guard.base_gfn, slot_guard.npages
|
||
);
|
||
if gfn >= slot_guard.base_gfn && gfn < slot_guard.base_gfn + slot_guard.npages as u64 {
|
||
return Some(slot.clone());
|
||
}
|
||
}
|
||
return None;
|
||
}
|