mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-08 21:06:48 +00:00
implement map vmo to vmar
This commit is contained in:
parent
31d644a0f3
commit
429341caa6
@ -15,6 +15,7 @@ use crate::mm::PhysFrame;
|
||||
/// type to represent a series of page frames is convenient because,
|
||||
/// more often than not, one needs to operate on a batch of frames rather
|
||||
/// a single frame.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VmFrameVec(Vec<VmFrame>);
|
||||
|
||||
impl VmFrameVec {
|
||||
@ -44,6 +45,11 @@ impl VmFrameVec {
|
||||
Ok(Self(frame_list))
|
||||
}
|
||||
|
||||
/// returns an empty vmframe vec
|
||||
pub fn empty() -> Self {
|
||||
Self(Vec::new())
|
||||
}
|
||||
|
||||
/// Pushs a new frame to the collection.
|
||||
pub fn push(&mut self, new_frame: VmFrame) {
|
||||
self.0.push(new_frame);
|
||||
@ -74,6 +80,11 @@ impl VmFrameVec {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// zero all internal vm frames
|
||||
pub fn zero(&self) {
|
||||
self.0.iter().for_each(|vm_frame| vm_frame.zero())
|
||||
}
|
||||
|
||||
/// Truncate some frames.
|
||||
///
|
||||
/// If `new_len >= self.len()`, then this method has no effect.
|
||||
|
@ -129,6 +129,7 @@ impl VmIo for VmSpace {
|
||||
|
||||
/// Options for mapping physical memory pages into a VM address space.
|
||||
/// See `VmSpace::map`.
|
||||
#[derive(Clone)]
|
||||
pub struct VmMapOptions {
|
||||
/// start virtual address
|
||||
addr: Option<Vaddr>,
|
||||
|
@ -10,6 +10,10 @@
|
||||
#![feature(btree_drain_filter)]
|
||||
#![feature(const_option)]
|
||||
#![feature(extend_one)]
|
||||
// FIXME: This feature is used to support vm capbility now as a work around.
|
||||
// Since this is an incomplete feature, use this feature is unsafe.
|
||||
// We should find a proper method to replace this feature with min_specialization, which is a sound feature.
|
||||
#![feature(specialization)]
|
||||
|
||||
use crate::{
|
||||
prelude::*,
|
||||
|
@ -22,7 +22,7 @@ fn handle_page_fault(trap_info: &TrapInformation) {
|
||||
const PAGE_NOT_PRESENT_ERROR_MASK: u64 = 0x1 << 0;
|
||||
if trap_info.err & PAGE_NOT_PRESENT_ERROR_MASK == 0 {
|
||||
// If page is not present, we should ask the vmar try to commit this page
|
||||
todo!()
|
||||
generate_fault_signal(trap_info)
|
||||
} else {
|
||||
// Otherwise, the page fault is caused by page protection error.
|
||||
generate_fault_signal(trap_info)
|
||||
|
@ -14,5 +14,6 @@
|
||||
//! In Jinux, VMARs and VMOs, as well as other capabilities, are implemented
|
||||
//! as zero-cost capabilities.
|
||||
|
||||
mod perms;
|
||||
mod vmar;
|
||||
mod vmo;
|
||||
|
79
src/services/libs/jinux-std/src/vm/perms.rs
Normal file
79
src/services/libs/jinux-std/src/vm/perms.rs
Normal file
@ -0,0 +1,79 @@
|
||||
use crate::rights::Rights;
|
||||
use bitflags::bitflags;
|
||||
use jinux_frame::vm::VmPerm;
|
||||
|
||||
bitflags! {
|
||||
/// The memory access permissions of memory mappings.
|
||||
pub struct VmPerms: u32 {
|
||||
/// Readable.
|
||||
const READ = 1 << 0;
|
||||
/// Writable.
|
||||
const WRITE = 1 << 1;
|
||||
/// Executable.
|
||||
const EXEC = 1 << 2;
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Rights> for VmPerms {
|
||||
fn from(rights: Rights) -> VmPerms {
|
||||
let mut vm_perm = VmPerms::empty();
|
||||
if rights.contains(Rights::READ) {
|
||||
vm_perm |= VmPerms::READ;
|
||||
}
|
||||
if rights.contains(Rights::WRITE) {
|
||||
vm_perm |= VmPerms::WRITE;
|
||||
}
|
||||
if rights.contains(Rights::EXEC) {
|
||||
vm_perm |= VmPerms::EXEC;
|
||||
}
|
||||
vm_perm
|
||||
}
|
||||
}
|
||||
|
||||
impl From<VmPerms> for Rights {
|
||||
fn from(vm_perms: VmPerms) -> Rights {
|
||||
let mut rights = Rights::empty();
|
||||
if vm_perms.contains(VmPerms::READ) {
|
||||
rights |= Rights::READ;
|
||||
}
|
||||
if vm_perms.contains(VmPerms::WRITE) {
|
||||
rights |= Rights::WRITE;
|
||||
}
|
||||
if vm_perms.contains(VmPerms::EXEC) {
|
||||
rights |= Rights::EXEC;
|
||||
}
|
||||
rights
|
||||
}
|
||||
}
|
||||
|
||||
impl From<VmPerm> for VmPerms {
|
||||
fn from(perm: VmPerm) -> Self {
|
||||
let mut perms = VmPerms::empty();
|
||||
if perm.contains(VmPerm::R) {
|
||||
perms |= VmPerms::READ;
|
||||
}
|
||||
if perm.contains(VmPerm::W) {
|
||||
perms |= VmPerms::WRITE;
|
||||
}
|
||||
if perm.contains(VmPerm::X) {
|
||||
perms |= VmPerms::EXEC;
|
||||
}
|
||||
perms
|
||||
}
|
||||
}
|
||||
|
||||
impl From<VmPerms> for VmPerm {
|
||||
fn from(perms: VmPerms) -> Self {
|
||||
let mut perm = VmPerm::empty();
|
||||
if perms.contains(VmPerms::READ) {
|
||||
perm |= VmPerm::R;
|
||||
}
|
||||
if perms.contains(VmPerms::WRITE) {
|
||||
perm |= VmPerm::W;
|
||||
}
|
||||
if perms.contains(VmPerms::EXEC) {
|
||||
perm |= VmPerm::X;
|
||||
}
|
||||
perm
|
||||
}
|
||||
}
|
@ -4,10 +4,7 @@ use jinux_frame::{vm::VmIo, Error, Result};
|
||||
|
||||
use crate::{rights::Rights, vm::vmo::Vmo};
|
||||
|
||||
use super::{
|
||||
options::{VmarChildOptions, VmarMapOptions},
|
||||
VmPerms, Vmar, Vmar_,
|
||||
};
|
||||
use super::{options::VmarChildOptions, vm_mapping::VmarMapOptions, VmPerms, Vmar, Vmar_};
|
||||
|
||||
impl Vmar<Rights> {
|
||||
/// Creates a root VMAR.
|
||||
|
@ -3,25 +3,23 @@
|
||||
mod dyn_cap;
|
||||
mod options;
|
||||
mod static_cap;
|
||||
pub mod vm_mapping;
|
||||
|
||||
use crate::rights::Rights;
|
||||
use crate::vm::perms::VmPerms;
|
||||
use alloc::collections::BTreeMap;
|
||||
use alloc::sync::Arc;
|
||||
use alloc::sync::Weak;
|
||||
use alloc::vec::Vec;
|
||||
use bitflags::bitflags;
|
||||
use jinux_frame::config::PAGE_SIZE;
|
||||
// use jinux_frame::vm::VmPerm;
|
||||
use core::ops::Range;
|
||||
use jinux_frame::config::PAGE_SIZE;
|
||||
use jinux_frame::vm::Vaddr;
|
||||
use jinux_frame::vm::VmIo;
|
||||
// use jinux_frame::vm::VmPerm;
|
||||
use jinux_frame::vm::VmSpace;
|
||||
use jinux_frame::AlignExt;
|
||||
use jinux_frame::{Error, Result};
|
||||
use spin::Mutex;
|
||||
|
||||
use super::vmo::Vmo;
|
||||
use self::vm_mapping::VmMapping;
|
||||
|
||||
/// Virtual Memory Address Regions (VMARs) are a type of capability that manages
|
||||
/// user address spaces.
|
||||
@ -54,13 +52,7 @@ pub struct Vmar<R = Rights>(Arc<Vmar_>, R);
|
||||
|
||||
// TODO: how page faults can be delivered to and handled by the current VMAR.
|
||||
|
||||
impl Vmar {
|
||||
pub(super) fn vm_space(&self) -> Arc<VmSpace> {
|
||||
self.0.vm_space.clone()
|
||||
}
|
||||
}
|
||||
|
||||
struct Vmar_ {
|
||||
pub(super) struct Vmar_ {
|
||||
/// vmar inner
|
||||
inner: Mutex<VmarInner>,
|
||||
/// The offset relative to the root VMAR
|
||||
@ -80,8 +72,8 @@ struct VmarInner {
|
||||
/// The child vmars. The key is offset relative to root VMAR
|
||||
child_vmar_s: BTreeMap<Vaddr, Arc<Vmar_>>,
|
||||
/// The mapped vmos. The key is offset relative to root VMAR
|
||||
mapped_vmos: BTreeMap<Vaddr, Arc<Vmo>>,
|
||||
/// Free ranges that can be used for creating child vmar or mapping vmos
|
||||
mapped_vmos: BTreeMap<Vaddr, Arc<VmMapping>>,
|
||||
/// Free regions that can be used for creating child vmar or mapping vmos
|
||||
free_regions: BTreeMap<Vaddr, FreeRegion>,
|
||||
}
|
||||
|
||||
@ -118,8 +110,8 @@ impl Vmar_ {
|
||||
|
||||
// do real protect. The protected range is ensured to be mapped.
|
||||
fn do_protect_inner(&self, perms: VmPerms, range: Range<usize>) -> Result<()> {
|
||||
for (vmo_base, mapped_vmo) in &self.inner.lock().mapped_vmos {
|
||||
let vmo_range = *vmo_base..(*vmo_base + mapped_vmo.size());
|
||||
for (vmo_base, vm_mapping) in &self.inner.lock().mapped_vmos {
|
||||
let vmo_range = *vmo_base..(*vmo_base + vm_mapping.size());
|
||||
if is_intersected(&range, &vmo_range) {
|
||||
let intersected_range = get_intersected_range(&range, &vmo_range);
|
||||
// TODO: How to protect a mapped vmo?
|
||||
@ -184,20 +176,13 @@ impl Vmar_ {
|
||||
}
|
||||
}
|
||||
// if the read range is in mapped vmo
|
||||
for (vmo_base, vmo) in &self.inner.lock().mapped_vmos {
|
||||
let vmo_end = *vmo_base + vmo.size();
|
||||
if *vmo_base <= read_start && read_end <= vmo_end {
|
||||
let vmo_offset = read_start - *vmo_base;
|
||||
return vmo.read_bytes(vmo_offset, buf);
|
||||
for (vm_mapping_base, vm_mapping) in &self.inner.lock().mapped_vmos {
|
||||
let vm_mapping_end = *vm_mapping_base + vm_mapping.size();
|
||||
if *vm_mapping_base <= read_start && read_end <= vm_mapping_end {
|
||||
let vm_mapping_offset = read_start - *vm_mapping_base;
|
||||
return vm_mapping.read_bytes(vm_mapping_offset, buf);
|
||||
}
|
||||
}
|
||||
// FIXME: should be read the free range?
|
||||
// for (_, free_region) in &self.inner.lock().free_regions {
|
||||
// let (region_start, region_end) = free_region.range();
|
||||
// if region_start <= read_start && read_end <= region_end {
|
||||
// return self.vm_space.read_bytes(read_start, buf);
|
||||
// }
|
||||
// }
|
||||
|
||||
// FIXME: If the read range is across different vmos or child vmars, should we directly return error?
|
||||
Err(Error::AccessDenied)
|
||||
@ -215,21 +200,13 @@ impl Vmar_ {
|
||||
}
|
||||
}
|
||||
// if the write range is in mapped vmo
|
||||
for (vmo_base, vmo) in &self.inner.lock().mapped_vmos {
|
||||
let vmo_end = *vmo_base + vmo.size();
|
||||
if *vmo_base <= write_start && write_end <= vmo_end {
|
||||
let vmo_offset = write_start - *vmo_base;
|
||||
return vmo.write_bytes(vmo_offset, buf);
|
||||
for (vm_mapping_base, vm_mapping) in &self.inner.lock().mapped_vmos {
|
||||
let vm_mapping_end = *vm_mapping_base + vm_mapping.size();
|
||||
if *vm_mapping_base <= write_start && write_end <= vm_mapping_end {
|
||||
let vm_mapping_offset = write_start - *vm_mapping_base;
|
||||
return vm_mapping.write_bytes(vm_mapping_offset, buf);
|
||||
}
|
||||
}
|
||||
// if the write range is in free region
|
||||
// FIXME: should we write the free region?
|
||||
// for (_, free_region) in &self.inner.lock().free_regions {
|
||||
// let (region_start, region_end) = free_region.range();
|
||||
// if region_start <= write_start && write_end <= region_end {
|
||||
// return self.vm_space.write_bytes(write_start, buf);
|
||||
// }
|
||||
// }
|
||||
|
||||
// FIXME: If the write range is across different vmos or child vmars, should we directly return error?
|
||||
Err(Error::AccessDenied)
|
||||
@ -242,7 +219,7 @@ impl Vmar_ {
|
||||
child_vmar_size: usize,
|
||||
align: usize,
|
||||
) -> Result<Arc<Vmar_>> {
|
||||
match self.find_free_region_for_child_vmar(child_vmar_offset, child_vmar_size, align) {
|
||||
match self.find_free_region_for_child(child_vmar_offset, child_vmar_size, align) {
|
||||
None => return Err(Error::InvalidArgs),
|
||||
Some((region_base, child_vmar_offset)) => {
|
||||
// This unwrap should never fails
|
||||
@ -280,18 +257,19 @@ impl Vmar_ {
|
||||
}
|
||||
}
|
||||
|
||||
/// find a free region for child vmar or vmo.
|
||||
/// returns (region base addr, child real offset)
|
||||
fn find_free_region_for_child_vmar(
|
||||
fn find_free_region_for_child(
|
||||
&self,
|
||||
child_vmar_offset: Option<Vaddr>,
|
||||
child_vmar_size: usize,
|
||||
child_offset: Option<Vaddr>,
|
||||
child_size: usize,
|
||||
align: usize,
|
||||
) -> Option<(Vaddr, Vaddr)> {
|
||||
for (region_base, free_region) in &self.inner.lock().free_regions {
|
||||
if let Some(child_vmar_offset) = child_vmar_offset {
|
||||
if let Some(child_vmar_offset) = child_offset {
|
||||
// if the offset is set, we should find a free region can satisfy both the offset and size
|
||||
if *region_base <= child_vmar_offset
|
||||
&& (child_vmar_offset + child_vmar_size) <= (free_region.end())
|
||||
&& (child_vmar_offset + child_size) <= (free_region.end())
|
||||
{
|
||||
return Some((*region_base, child_vmar_offset));
|
||||
}
|
||||
@ -302,7 +280,7 @@ impl Vmar_ {
|
||||
let region_start = free_region.start();
|
||||
let region_end = free_region.end();
|
||||
let child_vmar_real_start = region_start.align_up(align);
|
||||
let child_vmar_real_end = child_vmar_real_start + child_vmar_size;
|
||||
let child_vmar_real_end = child_vmar_real_start + child_size;
|
||||
if region_start <= child_vmar_real_start && child_vmar_real_end <= region_end {
|
||||
return Some((*region_base, child_vmar_real_start));
|
||||
}
|
||||
@ -314,6 +292,89 @@ impl Vmar_ {
|
||||
fn range(&self) -> Range<usize> {
|
||||
self.base..(self.base + self.size)
|
||||
}
|
||||
|
||||
fn check_vmo_overwrite(&self, vmo_range: Range<usize>, can_overwrite: bool) -> Result<()> {
|
||||
let inner = self.inner.lock();
|
||||
for (_, child_vmar) in &inner.child_vmar_s {
|
||||
let child_vmar_range = child_vmar.range();
|
||||
if is_intersected(&vmo_range, &child_vmar_range) {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
}
|
||||
|
||||
if !can_overwrite {
|
||||
for (child_vmo_base, child_vmo) in &inner.mapped_vmos {
|
||||
let child_vmo_range = *child_vmo_base..*child_vmo_base + child_vmo.size();
|
||||
if is_intersected(&vmo_range, &child_vmo_range) {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// returns the attached vm_space
|
||||
pub(super) fn vm_space(&self) -> &VmSpace {
|
||||
&self.vm_space
|
||||
}
|
||||
|
||||
/// map a vmo to this vmar
|
||||
pub fn add_mapping(&self, mapping: Arc<VmMapping>) {
|
||||
self.inner
|
||||
.lock()
|
||||
.mapped_vmos
|
||||
.insert(mapping.map_to_addr(), mapping);
|
||||
}
|
||||
|
||||
fn allocate_free_region_for_vmo(
|
||||
&self,
|
||||
vmo_size: usize,
|
||||
size: usize,
|
||||
offset: Option<usize>,
|
||||
align: usize,
|
||||
can_overwrite: bool,
|
||||
) -> Result<Vaddr> {
|
||||
let allocate_size = size.max(vmo_size);
|
||||
let mut inner = self.inner.lock();
|
||||
if can_overwrite {
|
||||
// if can_overwrite, the offset is ensured not to be None
|
||||
let offset = offset.unwrap();
|
||||
let vmo_range = offset..(offset + allocate_size);
|
||||
// If can overwrite, the vmo can cross multiple free regions. We will split each free regions that intersect with the vmo
|
||||
let mut split_regions = Vec::new();
|
||||
for (free_region_base, free_region) in &inner.free_regions {
|
||||
let free_region_range = free_region.range();
|
||||
if is_intersected(free_region_range, &vmo_range) {
|
||||
split_regions.push(*free_region_base);
|
||||
}
|
||||
}
|
||||
for region_base in split_regions {
|
||||
let free_region = inner.free_regions.remove(®ion_base).unwrap();
|
||||
let intersected_range = get_intersected_range(free_region.range(), &vmo_range);
|
||||
let regions_after_split = free_region.allocate_range(intersected_range);
|
||||
regions_after_split.into_iter().for_each(|region| {
|
||||
inner.free_regions.insert(region.start(), region);
|
||||
});
|
||||
}
|
||||
return Ok(offset);
|
||||
} else {
|
||||
// Otherwise, the vmo in a single region
|
||||
match self.find_free_region_for_child(offset, allocate_size, align) {
|
||||
None => return Err(Error::InvalidArgs),
|
||||
Some((free_region_base, offset)) => {
|
||||
let free_region = inner.free_regions.remove(&free_region_base).unwrap();
|
||||
let vmo_range = offset..(offset + allocate_size);
|
||||
let intersected_range = get_intersected_range(free_region.range(), &vmo_range);
|
||||
let regions_after_split = free_region.allocate_range(intersected_range);
|
||||
regions_after_split.into_iter().for_each(|region| {
|
||||
inner.free_regions.insert(region.start(), region);
|
||||
});
|
||||
return Ok(offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<R> Vmar<R> {
|
||||
@ -330,50 +391,6 @@ impl<R> Vmar<R> {
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
/// The memory access permissions of memory mappings.
|
||||
pub struct VmPerms: u32 {
|
||||
/// Readable.
|
||||
const READ = 1 << 0;
|
||||
/// Writable.
|
||||
const WRITE = 1 << 1;
|
||||
/// Executable.
|
||||
const EXEC = 1 << 2;
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Rights> for VmPerms {
|
||||
fn from(rights: Rights) -> VmPerms {
|
||||
let mut vm_perm = VmPerms::empty();
|
||||
if rights.contains(Rights::READ) {
|
||||
vm_perm |= VmPerms::READ;
|
||||
}
|
||||
if rights.contains(Rights::WRITE) {
|
||||
vm_perm |= VmPerms::WRITE;
|
||||
}
|
||||
if rights.contains(Rights::EXEC) {
|
||||
vm_perm |= VmPerms::EXEC;
|
||||
}
|
||||
vm_perm
|
||||
}
|
||||
}
|
||||
|
||||
impl From<VmPerms> for Rights {
|
||||
fn from(vm_perms: VmPerms) -> Rights {
|
||||
let mut rights = Rights::empty();
|
||||
if vm_perms.contains(VmPerms::READ) {
|
||||
rights |= Rights::READ;
|
||||
}
|
||||
if vm_perms.contains(VmPerms::WRITE) {
|
||||
rights |= Rights::WRITE;
|
||||
}
|
||||
if vm_perms.contains(VmPerms::EXEC) {
|
||||
rights |= Rights::EXEC;
|
||||
}
|
||||
rights
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FreeRegion {
|
||||
range: Range<Vaddr>,
|
||||
}
|
||||
|
@ -1,11 +1,9 @@
|
||||
//! Options for allocating child VMARs and creating mappings.
|
||||
//! Options for allocating child VMARs.
|
||||
|
||||
use jinux_frame::{config::PAGE_SIZE, vm::Vaddr};
|
||||
use jinux_frame::config::PAGE_SIZE;
|
||||
use jinux_frame::{Error, Result};
|
||||
|
||||
use crate::vm::vmo::Vmo;
|
||||
|
||||
use super::{VmPerms, Vmar};
|
||||
use super::Vmar;
|
||||
|
||||
/// Options for allocating a child VMAR, which must not overlap with any
|
||||
/// existing mappings or child VMARs.
|
||||
@ -99,6 +97,8 @@ impl<R> VmarChildOptions<R> {
|
||||
pub fn alloc(self) -> Result<Vmar<R>> {
|
||||
// check align
|
||||
let align = if let Some(align) = self.align {
|
||||
debug_assert!(align % PAGE_SIZE == 0);
|
||||
debug_assert!(align.is_power_of_two());
|
||||
if align % PAGE_SIZE != 0 || !align.is_power_of_two() {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
@ -131,114 +131,3 @@ impl<R> VmarChildOptions<R> {
|
||||
Ok(child_vmar)
|
||||
}
|
||||
}
|
||||
|
||||
/// Options for creating a new mapping. The mapping is not allowed to overlap
|
||||
/// with any child VMARs. And unless specified otherwise, it is not allowed
|
||||
/// to overlap with any existing mapping, either.
|
||||
pub struct VmarMapOptions<R1, R2> {
|
||||
parent: Vmar<R1>,
|
||||
vmo: Vmo<R2>,
|
||||
perms: VmPerms,
|
||||
vmo_offset: usize,
|
||||
size: usize,
|
||||
offset: Option<usize>,
|
||||
align: usize,
|
||||
can_overwrite: bool,
|
||||
}
|
||||
|
||||
impl<R1, R2> VmarMapOptions<R1, R2> {
|
||||
/// Creates a default set of options with the VMO and the memory access
|
||||
/// permissions.
|
||||
///
|
||||
/// The VMO must have access rights that correspond to the memory
|
||||
/// access permissions. For example, if `perms` contains `VmPerm::Write`,
|
||||
/// then `vmo.rights()` should contain `Rights::WRITE`.
|
||||
pub fn new(parent: Vmar<R1>, vmo: Vmo<R2>, perms: VmPerms) -> Self {
|
||||
let size = vmo.size();
|
||||
Self {
|
||||
parent,
|
||||
vmo,
|
||||
perms,
|
||||
vmo_offset: 0,
|
||||
size,
|
||||
offset: None,
|
||||
align: PAGE_SIZE,
|
||||
can_overwrite: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the offset of the first memory page in the VMO that is to be
|
||||
/// mapped into the VMAR.
|
||||
///
|
||||
/// The offset must be page-aligned and within the VMO.
|
||||
///
|
||||
/// The default value is zero.
|
||||
pub fn vmo_offset(mut self, offset: usize) -> Self {
|
||||
self.vmo_offset = offset;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the size of the mapping.
|
||||
///
|
||||
/// The size of a mapping may not be equal to that of the VMO.
|
||||
/// For example, it is ok to create a mapping whose size is larger than
|
||||
/// that of the VMO, although one cannot read from or write to the
|
||||
/// part of the mapping that is not backed by the VMO.
|
||||
/// So you may wonder: what is the point of supporting such _oversized_
|
||||
/// mappings? The reason is two-fold.
|
||||
/// 1. VMOs are resizable. So even if a mapping is backed by a VMO whose
|
||||
/// size is equal to that of the mapping initially, we cannot prevent
|
||||
/// the VMO from shrinking.
|
||||
/// 2. Mappings are not allowed to overlap by default. As a result,
|
||||
/// oversized mappings can serve as a placeholder to prevent future
|
||||
/// mappings from occupying some particular address ranges accidentally.
|
||||
///
|
||||
/// The default value is the size of the VMO.
|
||||
pub fn size(mut self, size: usize) -> Self {
|
||||
self.size = size;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the mapping's alignment.
|
||||
///
|
||||
/// The default value is the page size.
|
||||
///
|
||||
/// The provided alignment must be a power of two and a multiple of the
|
||||
/// page size.
|
||||
pub fn align(mut self, align: usize) -> Self {
|
||||
self.align = align;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the mapping's offset inside the VMAR.
|
||||
///
|
||||
/// The offset must satisfy the alignment requirement.
|
||||
/// Also, the mapping's range `[offset, offset + size)` must be within
|
||||
/// the VMAR.
|
||||
///
|
||||
/// If not set, the system will choose an offset automatically.
|
||||
pub fn offset(mut self, offset: usize) -> Self {
|
||||
self.offset = Some(offset);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets whether the mapping can overwrite existing mappings.
|
||||
///
|
||||
/// The default value is false.
|
||||
///
|
||||
/// If this option is set to true, then the `offset` option must be
|
||||
/// set.
|
||||
pub fn can_overwrite(mut self, can_overwrite: bool) -> Self {
|
||||
self.can_overwrite = can_overwrite;
|
||||
self
|
||||
}
|
||||
|
||||
/// Creates the mapping.
|
||||
///
|
||||
/// All options will be checked at this point.
|
||||
///
|
||||
/// On success, the virtual address of the new mapping is returned.
|
||||
pub fn build(mut self) -> Result<Vaddr> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
@ -9,10 +9,7 @@ use crate::{
|
||||
vm::vmo::Vmo,
|
||||
};
|
||||
|
||||
use super::{
|
||||
options::{VmarChildOptions, VmarMapOptions},
|
||||
VmPerms, Vmar, Vmar_,
|
||||
};
|
||||
use super::{options::VmarChildOptions, vm_mapping::VmarMapOptions, VmPerms, Vmar, Vmar_};
|
||||
|
||||
impl<R: TRights> Vmar<R> {
|
||||
/// Creates a root VMAR.
|
||||
|
310
src/services/libs/jinux-std/src/vm/vmar/vm_mapping.rs
Normal file
310
src/services/libs/jinux-std/src/vm/vmar/vm_mapping.rs
Normal file
@ -0,0 +1,310 @@
|
||||
use alloc::{
|
||||
collections::{BTreeMap, BTreeSet},
|
||||
sync::{Arc, Weak},
|
||||
};
|
||||
use jinux_frame::{
|
||||
config::PAGE_SIZE,
|
||||
vm::{Vaddr, VmFrameVec, VmPerm},
|
||||
Error,
|
||||
};
|
||||
use jinux_frame::{vm::VmMapOptions, Result};
|
||||
use spin::Mutex;
|
||||
|
||||
use crate::vm::vmo::{Pager, Vmo, Vmo_};
|
||||
|
||||
use super::{Vmar, Vmar_};
|
||||
use crate::vm::perms::VmPerms;
|
||||
use crate::vm::vmar::Rights;
|
||||
use crate::vm::vmo::VmoRights;
|
||||
|
||||
/// A VmMapping represents mapping a vmo into a vmar.
|
||||
/// A vmar can has multiple VmMappings, which means multiple vmos are mapped to a vmar.
|
||||
/// A vmo can also contain multiple VmMappings, which means a vmo can be mapped to multiple vmars.
|
||||
/// The reltionship between Vmar and Vmo is M:N.
|
||||
pub struct VmMapping {
|
||||
/// The parent vmar. The parent should always point to a valid vmar.
|
||||
parent: Weak<Vmar_>,
|
||||
/// The mapped vmo.
|
||||
vmo: Arc<Vmo_>,
|
||||
/// The mao offset of the vmo, in bytes.
|
||||
vmo_offset: usize,
|
||||
/// The size of mapping, in bytes. The map size can even be larger than the size of backup vmo.
|
||||
/// Those pages outside vmo range cannot be read or write.
|
||||
map_size: usize,
|
||||
/// The base address relative to the root vmar where the vmo is mapped.
|
||||
map_to_addr: Vaddr,
|
||||
/// The pages already mapped. The key is the page index in vmo.
|
||||
mapped_pages: Mutex<BTreeSet<usize>>,
|
||||
/// The map option of each **unmapped** page. The key is the page index in vmo.
|
||||
/// This map can be filled when mapping a vmo to vmar and can be modified when call mprotect.
|
||||
/// We keep the options in case the page is not committed and will further need these options.
|
||||
page_map_options: Mutex<BTreeMap<usize, VmMapOptions>>,
|
||||
}
|
||||
|
||||
impl VmMapping {
|
||||
pub fn build_mapping<R1, R2>(option: VmarMapOptions<R1, R2>) -> Result<Self> {
|
||||
let VmarMapOptions {
|
||||
parent,
|
||||
vmo,
|
||||
perms,
|
||||
vmo_offset,
|
||||
size,
|
||||
offset,
|
||||
align,
|
||||
can_overwrite,
|
||||
} = option;
|
||||
let Vmar(parent_vmar, _) = parent;
|
||||
let vmo_ = vmo.0;
|
||||
let vmo_size = vmo_.size();
|
||||
let map_to_addr = parent_vmar.allocate_free_region_for_vmo(
|
||||
vmo_size,
|
||||
size,
|
||||
offset,
|
||||
align,
|
||||
can_overwrite,
|
||||
)?;
|
||||
|
||||
let real_map_size = size.min(vmo_size);
|
||||
let start_page_idx = vmo_offset / PAGE_SIZE;
|
||||
let end_page_idx = (vmo_offset + real_map_size) / PAGE_SIZE;
|
||||
let vm_space = parent_vmar.vm_space();
|
||||
|
||||
let mut page_map_options = BTreeMap::new();
|
||||
let mut mapped_pages = BTreeSet::new();
|
||||
let perm = VmPerm::from(perms);
|
||||
for page_idx in start_page_idx..end_page_idx {
|
||||
let mut vm_map_options = VmMapOptions::new();
|
||||
let page_map_addr = map_to_addr + (page_idx - start_page_idx) * PAGE_SIZE;
|
||||
vm_map_options.addr(Some(page_map_addr));
|
||||
vm_map_options.perm(perm);
|
||||
vm_map_options.can_overwrite(can_overwrite);
|
||||
vm_map_options.align(align);
|
||||
if vmo_.page_commited(page_idx) {
|
||||
vmo_.map_page(page_idx, &vm_space, vm_map_options)?;
|
||||
mapped_pages.insert(page_idx);
|
||||
} else {
|
||||
// The page is not committed. We simple record the map options for further mapping.
|
||||
page_map_options.insert(page_idx, vm_map_options);
|
||||
}
|
||||
}
|
||||
Ok(Self {
|
||||
parent: Arc::downgrade(&parent_vmar),
|
||||
vmo: vmo_,
|
||||
vmo_offset,
|
||||
map_size: size,
|
||||
map_to_addr,
|
||||
mapped_pages: Mutex::new(mapped_pages),
|
||||
page_map_options: Mutex::new(page_map_options),
|
||||
})
|
||||
}
|
||||
|
||||
/// Add a new committed page and map it to vmspace
|
||||
pub fn map_one_page(&self, page_idx: usize, frames: VmFrameVec) -> Result<()> {
|
||||
let parent = self.parent.upgrade().unwrap();
|
||||
let vm_space = parent.vm_space();
|
||||
let map_addr = page_idx * PAGE_SIZE + self.map_to_addr;
|
||||
let page_map_options_lock = self.page_map_options.lock();
|
||||
let map_options = page_map_options_lock.get(&page_idx).unwrap();
|
||||
vm_space.map(frames, &map_options)?;
|
||||
self.mapped_pages.lock().insert(page_idx);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn unmap_one_page(&self, page_idx: usize) -> Result<()> {
|
||||
let parent = self.parent.upgrade().unwrap();
|
||||
let vm_space = parent.vm_space();
|
||||
let map_addr = page_idx * PAGE_SIZE + self.map_to_addr;
|
||||
let range = map_addr..(map_addr + PAGE_SIZE);
|
||||
vm_space.unmap(&range)?;
|
||||
self.mapped_pages.lock().remove(&page_idx);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn map_to_addr(&self) -> Vaddr {
|
||||
self.map_to_addr
|
||||
}
|
||||
|
||||
pub fn size(&self) -> usize {
|
||||
self.map_size
|
||||
}
|
||||
|
||||
pub fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
|
||||
let vmo_read_offset = self.vmo_offset + offset;
|
||||
self.vmo.read_bytes(vmo_read_offset, buf)
|
||||
}
|
||||
|
||||
pub fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
|
||||
let vmo_write_offset = self.vmo_offset + offset;
|
||||
self.vmo.write_bytes(vmo_write_offset, buf)
|
||||
}
|
||||
}
|
||||
|
||||
/// Options for creating a new mapping. The mapping is not allowed to overlap
|
||||
/// with any child VMARs. And unless specified otherwise, it is not allowed
|
||||
/// to overlap with any existing mapping, either.
|
||||
pub struct VmarMapOptions<R1, R2> {
|
||||
parent: Vmar<R1>,
|
||||
vmo: Vmo<R2>,
|
||||
perms: VmPerms,
|
||||
vmo_offset: usize,
|
||||
size: usize,
|
||||
offset: Option<usize>,
|
||||
align: usize,
|
||||
can_overwrite: bool,
|
||||
}
|
||||
|
||||
impl<R1, R2> VmarMapOptions<R1, R2> {
|
||||
/// Creates a default set of options with the VMO and the memory access
|
||||
/// permissions.
|
||||
///
|
||||
/// The VMO must have access rights that correspond to the memory
|
||||
/// access permissions. For example, if `perms` contains `VmPerm::Write`,
|
||||
/// then `vmo.rights()` should contain `Rights::WRITE`.
|
||||
pub fn new(parent: Vmar<R1>, vmo: Vmo<R2>, perms: VmPerms) -> Self {
|
||||
let size = vmo.size();
|
||||
Self {
|
||||
parent,
|
||||
vmo,
|
||||
perms,
|
||||
vmo_offset: 0,
|
||||
size,
|
||||
offset: None,
|
||||
align: PAGE_SIZE,
|
||||
can_overwrite: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the offset of the first memory page in the VMO that is to be
|
||||
/// mapped into the VMAR.
|
||||
///
|
||||
/// The offset must be page-aligned and within the VMO.
|
||||
///
|
||||
/// The default value is zero.
|
||||
pub fn vmo_offset(mut self, offset: usize) -> Self {
|
||||
self.vmo_offset = offset;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the size of the mapping.
|
||||
///
|
||||
/// The size of a mapping may not be equal to that of the VMO.
|
||||
/// For example, it is ok to create a mapping whose size is larger than
|
||||
/// that of the VMO, although one cannot read from or write to the
|
||||
/// part of the mapping that is not backed by the VMO.
|
||||
/// So you may wonder: what is the point of supporting such _oversized_
|
||||
/// mappings? The reason is two-fold.
|
||||
/// 1. VMOs are resizable. So even if a mapping is backed by a VMO whose
|
||||
/// size is equal to that of the mapping initially, we cannot prevent
|
||||
/// the VMO from shrinking.
|
||||
/// 2. Mappings are not allowed to overlap by default. As a result,
|
||||
/// oversized mappings can serve as a placeholder to prevent future
|
||||
/// mappings from occupying some particular address ranges accidentally.
|
||||
///
|
||||
/// The default value is the size of the VMO.
|
||||
pub fn size(mut self, size: usize) -> Self {
|
||||
self.size = size;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the mapping's alignment.
|
||||
///
|
||||
/// The default value is the page size.
|
||||
///
|
||||
/// The provided alignment must be a power of two and a multiple of the
|
||||
/// page size.
|
||||
pub fn align(mut self, align: usize) -> Self {
|
||||
self.align = align;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the mapping's offset inside the VMAR.
|
||||
///
|
||||
/// The offset must satisfy the alignment requirement.
|
||||
/// Also, the mapping's range `[offset, offset + size)` must be within
|
||||
/// the VMAR.
|
||||
///
|
||||
/// If not set, the system will choose an offset automatically.
|
||||
pub fn offset(mut self, offset: usize) -> Self {
|
||||
self.offset = Some(offset);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets whether the mapping can overwrite existing mappings.
|
||||
///
|
||||
/// The default value is false.
|
||||
///
|
||||
/// If this option is set to true, then the `offset` option must be
|
||||
/// set.
|
||||
pub fn can_overwrite(mut self, can_overwrite: bool) -> Self {
|
||||
self.can_overwrite = can_overwrite;
|
||||
self
|
||||
}
|
||||
|
||||
/// Creates the mapping.
|
||||
///
|
||||
/// All options will be checked at this point.
|
||||
///
|
||||
/// On success, the virtual address of the new mapping is returned.
|
||||
pub fn build(self) -> Result<Vaddr> {
|
||||
self.check_options()?;
|
||||
let parent_vmar = self.parent.0.clone();
|
||||
let vmo_ = self.vmo.0.clone();
|
||||
let vm_mapping = Arc::new(VmMapping::build_mapping(self)?);
|
||||
let map_to_addr = vm_mapping.map_to_addr();
|
||||
vmo_.add_mapping(Arc::downgrade(&vm_mapping));
|
||||
parent_vmar.add_mapping(vm_mapping);
|
||||
Ok(map_to_addr)
|
||||
}
|
||||
|
||||
/// check whether all options are valid
|
||||
fn check_options(&self) -> Result<()> {
|
||||
// check align
|
||||
debug_assert!(self.align % PAGE_SIZE == 0);
|
||||
debug_assert!(self.align.is_power_of_two());
|
||||
if self.align % PAGE_SIZE != 0 || !self.align.is_power_of_two() {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
debug_assert!(self.vmo_offset % self.align == 0);
|
||||
if self.vmo_offset % self.align != 0 {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
if let Some(offset) = self.offset {
|
||||
debug_assert!(offset % self.align == 0);
|
||||
if offset % self.align != 0 {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
}
|
||||
self.check_perms()?;
|
||||
self.check_overwrite()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// check whether the vmperm is subset of vmo rights
|
||||
fn check_perms(&self) -> Result<()> {
|
||||
let perm_rights = Rights::from(self.perms);
|
||||
self.vmo.check_rights(perm_rights)
|
||||
}
|
||||
|
||||
/// check whether the vmo will overwrite with any existing vmo or vmar
|
||||
fn check_overwrite(&self) -> Result<()> {
|
||||
if self.can_overwrite {
|
||||
// if can_overwrite is set, the offset cannot be None
|
||||
debug_assert!(self.offset != None);
|
||||
if self.offset == None {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
}
|
||||
if self.offset == None {
|
||||
// if does not specify the offset, we assume the map can always find suitable free region.
|
||||
// FIXME: is this always true?
|
||||
return Ok(());
|
||||
}
|
||||
let offset = self.offset.unwrap();
|
||||
// we should spare enough space at least for the whole vmo
|
||||
let size = self.size.max(self.vmo.size());
|
||||
let vmo_range = offset..(offset + size);
|
||||
self.parent
|
||||
.0
|
||||
.check_vmo_overwrite(vmo_range, self.can_overwrite)
|
||||
}
|
||||
}
|
@ -5,6 +5,7 @@ use jinux_frame::{vm::VmIo, Error};
|
||||
|
||||
use crate::rights::{Rights, TRights};
|
||||
|
||||
use super::VmoRights;
|
||||
use super::{
|
||||
options::{VmoCowChild, VmoSliceChild},
|
||||
Vmo, VmoChildOptions,
|
||||
@ -139,19 +140,12 @@ impl Vmo<Rights> {
|
||||
self.check_rights(Rights::from_bits(R1::BITS).ok_or(Error::InvalidArgs)?)?;
|
||||
Ok(Vmo(self.0, R1::new()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the access rights.
|
||||
pub fn rights(&self) -> Rights {
|
||||
impl VmoRights for Vmo<Rights> {
|
||||
fn rights(&self) -> Rights {
|
||||
self.1
|
||||
}
|
||||
|
||||
pub fn check_rights(&self, rights: Rights) -> Result<()> {
|
||||
if self.rights().contains(rights) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::AccessDenied)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VmIo for Vmo<Rights> {
|
||||
|
@ -3,16 +3,13 @@
|
||||
use core::ops::Range;
|
||||
|
||||
use crate::rights::Rights;
|
||||
use alloc::{
|
||||
collections::{BTreeMap, BTreeSet},
|
||||
sync::Arc,
|
||||
sync::Weak,
|
||||
};
|
||||
use alloc::vec;
|
||||
use alloc::{collections::BTreeMap, sync::Arc, sync::Weak, vec::Vec};
|
||||
use bitflags::bitflags;
|
||||
use jinux_frame::{
|
||||
config::PAGE_SIZE,
|
||||
prelude::Result,
|
||||
vm::{Paddr, Vaddr, VmAllocOptions, VmFrame, VmFrameVec, VmIo, VmMapOptions, VmPerm, VmSpace},
|
||||
vm::{Paddr, Vaddr, VmAllocOptions, VmFrameVec, VmIo, VmMapOptions, VmSpace},
|
||||
Error,
|
||||
};
|
||||
|
||||
@ -25,7 +22,7 @@ pub use options::{VmoChildOptions, VmoOptions};
|
||||
pub use pager::Pager;
|
||||
use spin::Mutex;
|
||||
|
||||
use super::vmar::Vmar;
|
||||
use super::vmar::vm_mapping::{self, VmMapping};
|
||||
|
||||
/// Virtual Memory Objects (VMOs) are a type of capability that represents a
|
||||
/// range of memory pages.
|
||||
@ -82,7 +79,30 @@ use super::vmar::Vmar;
|
||||
/// `Vmo` is easier to use (by offering more powerful APIs) and
|
||||
/// harder to misuse (thanks to its nature of being capability).
|
||||
///
|
||||
pub struct Vmo<R = Rights>(Arc<Vmo_>, R);
|
||||
pub struct Vmo<R = Rights>(pub(super) Arc<Vmo_>, R);
|
||||
|
||||
/// Functions exist both for static capbility and dynamic capibility
|
||||
pub trait VmoRights {
|
||||
/// Returns the access rights.
|
||||
fn rights(&self) -> Rights;
|
||||
|
||||
/// Check whether rights is included in self
|
||||
fn check_rights(&self, rights: Rights) -> Result<()> {
|
||||
if self.rights().contains(rights) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::AccessDenied)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We implement this trait for Vmo, so we can use functions on type like Vmo<R> without trait bounds.
|
||||
// FIXME: This requires the imcomplete feature specialization, which should be fixed further.
|
||||
impl<R> VmoRights for Vmo<R> {
|
||||
default fn rights(&self) -> Rights {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
/// VMO flags.
|
||||
@ -110,7 +130,7 @@ pub enum VmoType {
|
||||
NotChild,
|
||||
}
|
||||
|
||||
struct Vmo_ {
|
||||
pub(super) struct Vmo_ {
|
||||
/// Flags
|
||||
flags: VmoFlags,
|
||||
/// VmoInner
|
||||
@ -128,38 +148,64 @@ struct VmoInner {
|
||||
pager: Option<Arc<dyn Pager>>,
|
||||
/// size, in bytes
|
||||
size: usize,
|
||||
/// The mapped to vmar if mapped
|
||||
mapped_to_vmar: Weak<Vmar>,
|
||||
/// The base addr in vmspace if self is mapped. Otherwise this field is useless
|
||||
mapped_to_addr: Vaddr,
|
||||
/// The pages already mapped. The key is the page index.
|
||||
mapped_pages: BTreeSet<usize>,
|
||||
/// The perm of each page. This map is filled when first time map vmo to vmar
|
||||
page_perms: BTreeMap<usize, VmPerm>,
|
||||
/// The pages committed but not mapped to Vmar. The key is the page index, the value is the backup frame.
|
||||
unmapped_pages: BTreeMap<usize, VmFrameVec>,
|
||||
/// The pages from the parent that current vmo can access. The pages can only be inserted when create childs vmo.
|
||||
/// The key is the page index in current vmo, and the value is the page index in parent vmo.
|
||||
inherited_pages: BTreeMap<usize, usize>,
|
||||
/// The pages committed. The key is the page index, the value is the backup frame.
|
||||
committed_pages: BTreeMap<usize, VmFrameVec>,
|
||||
/// The pages from the parent that current vmo can access. The pages can only be inherited when create childs vmo.
|
||||
/// We store the page index range
|
||||
inherited_pages: InheritedPages,
|
||||
/// The current mapping on this vmo. The vmo can be mapped to multiple vmars.
|
||||
mappings: Vec<Weak<VmMapping>>,
|
||||
// Pages should be filled with zeros when committed. When create COW child, the pages exceed the range of parent vmo
|
||||
// should be in this set. According to the on demand requirement, when read or write these pages for the first time,
|
||||
// we should commit these pages and zeroed these pages.
|
||||
// pages_should_fill_zeros: BTreeSet<usize>,
|
||||
}
|
||||
|
||||
/// Pages inherited from parent
|
||||
struct InheritedPages {
|
||||
/// The page index range in child vmo. The pages inside these range are initially inherited from parent vmo.
|
||||
/// The range includes the start page, but not including the end page
|
||||
page_range: Range<usize>,
|
||||
/// The page index offset in parent vmo. That is to say, the page with index `idx` in child vmo corrsponds to
|
||||
/// page with index `idx + parent_page_idx_offset` in parent vmo
|
||||
parent_page_idx_offset: usize,
|
||||
}
|
||||
|
||||
impl InheritedPages {
|
||||
pub fn new_empty() -> Self {
|
||||
Self {
|
||||
page_range: 0..0,
|
||||
parent_page_idx_offset: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(page_range: Range<usize>, parent_page_idx_offset: usize) -> Self {
|
||||
Self {
|
||||
page_range,
|
||||
parent_page_idx_offset,
|
||||
}
|
||||
}
|
||||
|
||||
fn contains_page(&self, page_idx: usize) -> bool {
|
||||
self.page_range.start <= page_idx && page_idx < self.page_range.end
|
||||
}
|
||||
|
||||
fn parent_page_idx(&self, child_page_idx: usize) -> Option<usize> {
|
||||
if self.contains_page(child_page_idx) {
|
||||
Some(child_page_idx + self.parent_page_idx_offset)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Vmo_ {
|
||||
pub fn commit_page(&self, offset: usize) -> Result<()> {
|
||||
// assert!(offset % PAGE_SIZE == 0);
|
||||
let page_idx = offset / PAGE_SIZE;
|
||||
let is_mapped = self.is_mapped();
|
||||
let mut inner = self.inner.lock();
|
||||
if is_mapped {
|
||||
if inner.mapped_pages.contains(&page_idx) {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
if !inner.unmapped_pages.contains_key(&offset) {
|
||||
if !inner.committed_pages.contains_key(&page_idx) {
|
||||
let frames = match &inner.pager {
|
||||
None => {
|
||||
let vm_alloc_option = VmAllocOptions::new(1);
|
||||
@ -172,17 +218,13 @@ impl Vmo_ {
|
||||
VmFrameVec::from_one_frame(frame)
|
||||
}
|
||||
};
|
||||
if is_mapped {
|
||||
// We hold the lock inside inner, so we cannot call vm_space function here
|
||||
let vm_space = inner.mapped_to_vmar.upgrade().unwrap().vm_space();
|
||||
let mapped_to_addr = inner.mapped_to_addr + page_idx * PAGE_SIZE;
|
||||
let mut vm_map_options = VmMapOptions::new();
|
||||
let vm_perm = inner.page_perms.get(&page_idx).unwrap().clone();
|
||||
vm_map_options.perm(vm_perm).addr(Some(mapped_to_addr));
|
||||
vm_space.map(frames, &vm_map_options)?;
|
||||
} else {
|
||||
inner.unmapped_pages.insert(page_idx, frames);
|
||||
// Update Mapping
|
||||
for vm_mapping in &inner.mappings {
|
||||
if let Some(vm_mapping) = vm_mapping.upgrade() {
|
||||
vm_mapping.map_one_page(page_idx, frames.clone())?;
|
||||
}
|
||||
}
|
||||
inner.committed_pages.insert(page_idx, frames);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -191,19 +233,16 @@ impl Vmo_ {
|
||||
// assert!(offset % PAGE_SIZE == 0);
|
||||
let page_idx = offset / PAGE_SIZE;
|
||||
let mut inner = self.inner.lock();
|
||||
if inner.mapped_pages.contains(&page_idx) {
|
||||
// We hold the lock inside inner, so we cannot call vm_space function here
|
||||
let vm_space = inner.mapped_to_vmar.upgrade().unwrap().vm_space();
|
||||
let mapped_addr = inner.mapped_to_addr + page_idx * PAGE_SIZE;
|
||||
vm_space.unmap(&(mapped_addr..mapped_addr + PAGE_SIZE))?;
|
||||
inner.mapped_pages.remove(&page_idx);
|
||||
if inner.committed_pages.contains_key(&page_idx) {
|
||||
inner.committed_pages.remove(&page_idx);
|
||||
if let Some(pager) = &inner.pager {
|
||||
pager.decommit_page(offset)?;
|
||||
}
|
||||
} else if inner.unmapped_pages.contains_key(&page_idx) {
|
||||
inner.unmapped_pages.remove(&page_idx);
|
||||
if let Some(pager) = &inner.pager {
|
||||
pager.decommit_page(offset)?;
|
||||
// Update mappings
|
||||
for vm_mapping in &inner.mappings {
|
||||
if let Some(vm_mapping) = vm_mapping.upgrade() {
|
||||
vm_mapping.unmap_one_page(page_idx)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@ -213,8 +252,8 @@ impl Vmo_ {
|
||||
assert!(range.start % PAGE_SIZE == 0);
|
||||
assert!(range.end % PAGE_SIZE == 0);
|
||||
let start_page_idx = range.start / PAGE_SIZE;
|
||||
let end_page_idx = range.end / PAGE_SIZE;
|
||||
for page_idx in start_page_idx..end_page_idx {
|
||||
let end_page_idx = (range.end - 1) / PAGE_SIZE;
|
||||
for page_idx in start_page_idx..=end_page_idx {
|
||||
let offset = page_idx * PAGE_SIZE;
|
||||
self.commit_page(offset)?;
|
||||
}
|
||||
@ -226,78 +265,127 @@ impl Vmo_ {
|
||||
// assert!(range.start % PAGE_SIZE == 0);
|
||||
// assert!(range.end % PAGE_SIZE == 0);
|
||||
let start_page_idx = range.start / PAGE_SIZE;
|
||||
let end_page_idx = range.end / PAGE_SIZE;
|
||||
for page_idx in start_page_idx..end_page_idx {
|
||||
let end_page_idx = (range.end - 1) / PAGE_SIZE;
|
||||
for page_idx in start_page_idx..=end_page_idx {
|
||||
let offset = page_idx * PAGE_SIZE;
|
||||
self.decommit_page(offset)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// determine whether a page is commited
|
||||
pub fn page_commited(&self, page_idx: usize) -> bool {
|
||||
self.inner.lock().committed_pages.contains_key(&page_idx)
|
||||
}
|
||||
|
||||
/// Map a page to vm space. The page is ensured to be committed before call this function.
|
||||
pub fn map_page(
|
||||
&self,
|
||||
page_idx: usize,
|
||||
vm_space: &VmSpace,
|
||||
options: VmMapOptions,
|
||||
) -> Result<Vaddr> {
|
||||
debug_assert!(self.page_commited(page_idx));
|
||||
if !self.page_commited(page_idx) {
|
||||
return Err(Error::AccessDenied);
|
||||
}
|
||||
let frames = self
|
||||
.inner
|
||||
.lock()
|
||||
.committed_pages
|
||||
.get(&page_idx)
|
||||
.unwrap()
|
||||
.clone();
|
||||
vm_space.map(frames, &options)
|
||||
}
|
||||
|
||||
pub fn add_mapping(&self, mapping: Weak<VmMapping>) {
|
||||
self.inner.lock().mappings.push(mapping);
|
||||
}
|
||||
|
||||
pub fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
|
||||
let read_len = buf.len();
|
||||
debug_assert!(offset + read_len <= self.size());
|
||||
if offset + read_len > self.size() {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
|
||||
let first_page_idx = offset / PAGE_SIZE;
|
||||
let last_page_idx = (offset + read_len - 1) / PAGE_SIZE;
|
||||
let mut buf_read_offset = 0;
|
||||
// read one page at a time
|
||||
for page_idx in first_page_idx..=last_page_idx {
|
||||
let page_offset = if page_idx == first_page_idx {
|
||||
offset - first_page_idx * PAGE_SIZE
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let page_remain_len = PAGE_SIZE - page_offset;
|
||||
let buf_remain_len = read_len - buf_read_offset;
|
||||
let read_len_in_page = page_remain_len.min(buf_remain_len);
|
||||
if read_len_in_page == 0 {
|
||||
break;
|
||||
}
|
||||
let read_buf = &mut buf[buf_read_offset..(buf_read_offset + read_len_in_page)];
|
||||
buf_read_offset += read_len_in_page;
|
||||
self.read_bytes_in_page(page_idx, page_offset, read_buf)?;
|
||||
}
|
||||
Ok(())
|
||||
let read_range = offset..(offset + read_len);
|
||||
let frames = self.ensure_all_pages_exist(read_range, false)?;
|
||||
let read_offset = offset % PAGE_SIZE;
|
||||
frames.read_bytes(read_offset, buf)
|
||||
}
|
||||
|
||||
/// read bytes to buf. The read content are ensured on same page. if the page is not committed or mapped,
|
||||
/// this func will commit or map this page
|
||||
fn read_bytes_in_page(&self, page_idx: usize, offset: usize, buf: &mut [u8]) -> Result<()> {
|
||||
// First read from pages in parent
|
||||
if let Some(parent_page_idx) = self.inner.lock().inherited_pages.get(&page_idx) {
|
||||
let parent_vmo = self.parent.upgrade().unwrap();
|
||||
let parent_read_offset = *parent_page_idx * PAGE_SIZE + offset;
|
||||
return parent_vmo.read_bytes(parent_read_offset, buf);
|
||||
/// Ensure all pages inside range are backed up vm frames, returns the frames.
|
||||
fn ensure_all_pages_exist(&self, range: Range<usize>, write_page: bool) -> Result<VmFrameVec> {
|
||||
let start_page_idx = range.start / PAGE_SIZE;
|
||||
let end_page_idx = (range.end - 1) / PAGE_SIZE; // The end addr is not included
|
||||
let mut frames = VmFrameVec::empty();
|
||||
for page_idx in start_page_idx..=end_page_idx {
|
||||
let mut page_frame = self.ensure_page_exists(page_idx, write_page)?;
|
||||
frames.append(&mut page_frame)?;
|
||||
}
|
||||
self.ensure_page_exists(page_idx)?;
|
||||
if self.is_mapped() {
|
||||
let page_map_addr = page_idx * PAGE_SIZE + self.mapped_to_addr();
|
||||
let vm_space = self.vm_space();
|
||||
vm_space.read_bytes(page_map_addr, buf)?;
|
||||
} else {
|
||||
let inner = self.inner.lock();
|
||||
let page_frame = inner.unmapped_pages.get(&page_idx).unwrap();
|
||||
page_frame.read_bytes(offset, buf)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(frames)
|
||||
}
|
||||
|
||||
/// commit (and map) page if page not exist
|
||||
fn ensure_page_exists(&self, page_idx: usize) -> Result<()> {
|
||||
self.commit_page(page_idx * PAGE_SIZE)?;
|
||||
let is_mapped = self.is_mapped();
|
||||
/// Ensure one page is backed up by a vmframe, then returns the vmframe.
|
||||
fn ensure_page_exists(&self, page_idx: usize, write_page: bool) -> Result<VmFrameVec> {
|
||||
let inner = self.inner.lock();
|
||||
if is_mapped {
|
||||
debug_assert!(inner.mapped_pages.contains(&page_idx));
|
||||
} else {
|
||||
debug_assert!(inner.unmapped_pages.contains_key(&page_idx));
|
||||
// if the page is already commit, return the committed page.
|
||||
if inner.committed_pages.contains_key(&page_idx) {
|
||||
let frames = inner.committed_pages.get(&page_idx).unwrap().clone();
|
||||
return Ok(frames);
|
||||
}
|
||||
match self.vmo_type {
|
||||
// if the vmo is not child, then commit new page
|
||||
VmoType::NotChild => {
|
||||
self.commit_page(page_idx * PAGE_SIZE)?;
|
||||
let frames = inner.committed_pages.get(&page_idx).unwrap().clone();
|
||||
return Ok(frames);
|
||||
}
|
||||
// if the vmo is slice child, we will request the frame from parent
|
||||
VmoType::SliceChild => {
|
||||
debug_assert!(inner.inherited_pages.contains_page(page_idx));
|
||||
if !inner.inherited_pages.contains_page(page_idx) {
|
||||
return Err(Error::AccessDenied);
|
||||
}
|
||||
let parent = self.parent.upgrade().unwrap();
|
||||
let parent_page_idx = inner.inherited_pages.parent_page_idx(page_idx).unwrap();
|
||||
return parent.ensure_page_exists(parent_page_idx, write_page);
|
||||
}
|
||||
// If the vmo is copy on write
|
||||
VmoType::CopyOnWriteChild => {
|
||||
if write_page {
|
||||
// write
|
||||
// commit a new page
|
||||
self.commit_page(page_idx * PAGE_SIZE)?;
|
||||
let frames = inner.committed_pages.get(&page_idx).unwrap().clone();
|
||||
if let Some(parent_page_idx) = inner.inherited_pages.parent_page_idx(page_idx) {
|
||||
// copy contents of parent to the frame
|
||||
let mut tmp_buffer = [0u8; PAGE_SIZE];
|
||||
let parent = self.parent.upgrade().unwrap();
|
||||
parent.read_bytes(parent_page_idx * PAGE_SIZE, &mut tmp_buffer)?;
|
||||
frames.write_bytes(0, &tmp_buffer)?;
|
||||
} else {
|
||||
frames.zero();
|
||||
}
|
||||
return Ok(frames);
|
||||
} else {
|
||||
// read
|
||||
if let Some(parent_page_idx) = inner.inherited_pages.parent_page_idx(page_idx) {
|
||||
// If it's inherited from parent, we request the page from parent
|
||||
let parent = self.parent.upgrade().unwrap();
|
||||
return parent.ensure_page_exists(parent_page_idx, write_page);
|
||||
} else {
|
||||
// Otherwise, we commit a new page
|
||||
self.commit_page(page_idx * PAGE_SIZE)?;
|
||||
let frames = inner.committed_pages.get(&page_idx).unwrap().clone();
|
||||
// FIXME: should we zero the frames here?
|
||||
frames.zero();
|
||||
return Ok(frames);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
|
||||
@ -307,75 +395,16 @@ impl Vmo_ {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
|
||||
let first_page_idx = offset / PAGE_SIZE;
|
||||
let last_page_idx = (offset + write_len - 1) / PAGE_SIZE;
|
||||
let mut buf_write_offset = 0;
|
||||
for page_idx in first_page_idx..=last_page_idx {
|
||||
let page_offset = if page_idx == first_page_idx {
|
||||
offset - first_page_idx * PAGE_SIZE
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let page_remain_len = PAGE_SIZE - page_offset;
|
||||
let buf_remain_len = write_len - buf_write_offset;
|
||||
let write_len_in_page = page_remain_len.min(buf_remain_len);
|
||||
if write_len_in_page == 0 {
|
||||
break;
|
||||
}
|
||||
let write_buf = &buf[buf_write_offset..(buf_write_offset + write_len_in_page)];
|
||||
buf_write_offset += write_len_in_page;
|
||||
self.write_bytes_in_page(page_idx, page_offset, write_buf)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_bytes_in_page(&self, page_idx: usize, offset: usize, buf: &[u8]) -> Result<()> {
|
||||
// First check if pages in parent
|
||||
if let Some(parent_page_idx) = self.inner.lock().inherited_pages.get(&page_idx) {
|
||||
match self.vmo_type {
|
||||
VmoType::NotChild | VmoType::SliceChild => {
|
||||
let parent_vmo = self.parent.upgrade().unwrap();
|
||||
let parent_read_offset = *parent_page_idx * PAGE_SIZE + offset;
|
||||
return parent_vmo.write_bytes(parent_read_offset, buf);
|
||||
}
|
||||
VmoType::CopyOnWriteChild => {
|
||||
// Commit a new page for write
|
||||
self.commit_page(page_idx * offset)?;
|
||||
let is_mapped = self.is_mapped();
|
||||
let inner = self.inner.lock();
|
||||
// Copy the content of parent page
|
||||
let mut buffer = [0u8; PAGE_SIZE];
|
||||
let parent_page_idx = inner.inherited_pages.get(&page_idx).unwrap().clone();
|
||||
self.parent
|
||||
.upgrade()
|
||||
.unwrap()
|
||||
.read_bytes(parent_page_idx * PAGE_SIZE, &mut buffer)?;
|
||||
if is_mapped {
|
||||
let mapped_to_addr = inner.mapped_to_addr + page_idx * PAGE_SIZE;
|
||||
let vm_space = inner.mapped_to_vmar.upgrade().unwrap();
|
||||
vm_space.write_bytes(mapped_to_addr, &buffer)?;
|
||||
} else {
|
||||
let frame = inner.unmapped_pages.get(&page_idx).unwrap();
|
||||
frame.write_bytes(0, &buffer)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
self.ensure_page_exists(page_idx)?;
|
||||
if self.is_mapped() {
|
||||
let page_map_addr = page_idx * PAGE_SIZE + self.mapped_to_addr();
|
||||
let vm_space = self.vm_space();
|
||||
vm_space.write_bytes(page_map_addr, buf)?;
|
||||
} else {
|
||||
let inner = self.inner.lock();
|
||||
let page_frame = inner.unmapped_pages.get(&page_idx).unwrap();
|
||||
page_frame.write_bytes(offset, buf)?;
|
||||
}
|
||||
let write_range = offset..(offset + write_len);
|
||||
let frames = self.ensure_all_pages_exist(write_range, true)?;
|
||||
let write_offset = offset % PAGE_SIZE;
|
||||
frames.write_bytes(write_offset, buf)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn clear(&self, range: Range<usize>) -> Result<()> {
|
||||
todo!()
|
||||
let buffer = vec![0u8; range.end - range.start];
|
||||
self.write_bytes(range.start, &buffer)
|
||||
}
|
||||
|
||||
pub fn size(&self) -> usize {
|
||||
@ -393,24 +422,6 @@ impl Vmo_ {
|
||||
pub fn flags(&self) -> VmoFlags {
|
||||
self.flags.clone()
|
||||
}
|
||||
|
||||
fn is_mapped(&self) -> bool {
|
||||
if self.inner.lock().mapped_to_vmar.strong_count() == 0 {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// The mapped to vmspace. This function can only be called after self is mapped.
|
||||
fn vm_space(&self) -> Arc<VmSpace> {
|
||||
let mapped_to_vmar = self.inner.lock().mapped_to_vmar.upgrade().unwrap();
|
||||
mapped_to_vmar.vm_space()
|
||||
}
|
||||
|
||||
fn mapped_to_addr(&self) -> Vaddr {
|
||||
self.inner.lock().mapped_to_addr
|
||||
}
|
||||
}
|
||||
|
||||
impl<R> Vmo<R> {
|
||||
|
@ -4,20 +4,22 @@ use core::marker::PhantomData;
|
||||
use core::ops::Range;
|
||||
|
||||
use alloc::collections::BTreeMap;
|
||||
use alloc::collections::BTreeSet;
|
||||
use alloc::sync::Arc;
|
||||
use alloc::sync::Weak;
|
||||
use alloc::vec::Vec;
|
||||
use jinux_frame::config::PAGE_SIZE;
|
||||
use jinux_frame::vm::{Paddr, VmAllocOptions, VmFrame, VmFrameVec};
|
||||
use jinux_frame::vm::{Paddr, VmAllocOptions, VmFrameVec};
|
||||
use jinux_frame::{Error, Result};
|
||||
use jinux_rights_proc::require;
|
||||
use spin::Mutex;
|
||||
use typeflags_util::{SetExtend, SetExtendOp};
|
||||
|
||||
use crate::rights::{Dup, Rights, TRights, Write};
|
||||
use crate::vm::vmo::InheritedPages;
|
||||
use crate::vm::vmo::VmoType;
|
||||
use crate::vm::vmo::{VmoInner, Vmo_};
|
||||
|
||||
use super::VmoRights;
|
||||
use super::{Pager, Vmo, VmoFlags};
|
||||
|
||||
/// Options for allocating a root VMO.
|
||||
@ -160,12 +162,9 @@ fn alloc_vmo_(
|
||||
let vmo_inner = VmoInner {
|
||||
pager,
|
||||
size,
|
||||
mapped_to_vmar: Weak::new(),
|
||||
mapped_to_addr: 0,
|
||||
mapped_pages: BTreeSet::new(),
|
||||
page_perms: BTreeMap::new(),
|
||||
unmapped_pages: committed_pages,
|
||||
inherited_pages: BTreeMap::new(),
|
||||
committed_pages,
|
||||
inherited_pages: InheritedPages::new_empty(),
|
||||
mappings: Vec::new(),
|
||||
// pages_should_fill_zeros: BTreeSet::new(),
|
||||
};
|
||||
Ok(Vmo_ {
|
||||
@ -491,35 +490,16 @@ fn alloc_child_vmo_(
|
||||
}
|
||||
}
|
||||
}
|
||||
// FIXME: Should inherit parent VMO's pager and vmar?
|
||||
let child_pager = parent_vmo_inner.pager.clone();
|
||||
let child_mapped_to_vmar = parent_vmo_inner.mapped_to_vmar.clone();
|
||||
// Set pages inherited from parent vmo and pages should fill zeros
|
||||
let parent_end_page = parent_vmo_inner.size / PAGE_SIZE;
|
||||
let mut inherited_pages = BTreeMap::new();
|
||||
// let mut pages_should_fill_zeros = BTreeSet::new();
|
||||
let child_start_page = child_vmo_start / PAGE_SIZE;
|
||||
let child_end_page = child_vmo_end / PAGE_SIZE;
|
||||
for page_idx in child_start_page..child_end_page {
|
||||
if page_idx <= parent_end_page {
|
||||
// If the page is in range of parent VMO
|
||||
inherited_pages.insert(page_idx, page_idx + child_start_page);
|
||||
} else {
|
||||
// If the page is out of range of parent
|
||||
// pages_should_fill_zeros.insert(page_idx);
|
||||
break;
|
||||
}
|
||||
}
|
||||
let parent_page_idx_offset = range.start / PAGE_SIZE;
|
||||
let inherited_end = range.end.min(parent_vmo_.size());
|
||||
let inherited_end_page_idx = inherited_end / PAGE_SIZE + 1;
|
||||
let inherited_pages = InheritedPages::new(0..inherited_end_page_idx, parent_page_idx_offset);
|
||||
let vmo_inner = VmoInner {
|
||||
pager: child_pager,
|
||||
pager: None,
|
||||
size: child_vmo_end - child_vmo_start,
|
||||
mapped_to_vmar: child_mapped_to_vmar,
|
||||
mapped_to_addr: 0,
|
||||
mapped_pages: BTreeSet::new(),
|
||||
page_perms: BTreeMap::new(),
|
||||
unmapped_pages: BTreeMap::new(),
|
||||
committed_pages: BTreeMap::new(),
|
||||
mappings: Vec::new(),
|
||||
inherited_pages,
|
||||
// pages_should_fill_zeros,
|
||||
};
|
||||
let child_paddr = parent_vmo_
|
||||
.paddr()
|
||||
|
@ -1,11 +1,12 @@
|
||||
use core::ops::Range;
|
||||
|
||||
use jinux_frame::prelude::Result;
|
||||
use jinux_frame::{vm::VmIo, Error};
|
||||
use jinux_frame::vm::VmIo;
|
||||
use jinux_rights_proc::require;
|
||||
|
||||
use crate::rights::*;
|
||||
|
||||
use super::VmoRights;
|
||||
use super::{
|
||||
options::{VmoCowChild, VmoSliceChild},
|
||||
Vmo, VmoChildOptions,
|
||||
@ -139,19 +140,6 @@ impl<R: TRights> Vmo<R> {
|
||||
let rights = self.rights();
|
||||
Vmo(self.0, rights)
|
||||
}
|
||||
|
||||
/// Returns the access rights.
|
||||
pub const fn rights(&self) -> Rights {
|
||||
Rights::from_bits(R::BITS).unwrap()
|
||||
}
|
||||
|
||||
fn check_rights(&self, rights: Rights) -> Result<()> {
|
||||
if self.rights().contains(rights) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::AccessDenied)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: TRights> VmIo for Vmo<R> {
|
||||
@ -165,3 +153,9 @@ impl<R: TRights> VmIo for Vmo<R> {
|
||||
self.0.write_bytes(offset, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: TRights> VmoRights for Vmo<R> {
|
||||
fn rights(&self) -> Rights {
|
||||
Rights::from_bits(R::BITS).unwrap()
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user