mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-25 18:33:24 +00:00
Add potential integer overflow check among Framework APIs
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
dd64f58e55
commit
b450eef166
@ -63,6 +63,10 @@ impl Hpet {
|
||||
|
||||
let mut comparators = Vec::with_capacity(num_comparator as usize);
|
||||
|
||||
// Ensure that the addresses in the loop will not overflow
|
||||
base_address
|
||||
.checked_add(0x100 + num_comparator as usize * 0x20)
|
||||
.unwrap();
|
||||
for i in 0..num_comparator {
|
||||
let comp = Volatile::new(unsafe {
|
||||
&mut *(paddr_to_vaddr(base_address + 0x100 + i as usize * 0x20) as *mut usize
|
||||
|
@ -9,4 +9,5 @@ pub enum Error {
|
||||
AccessDenied,
|
||||
IoError,
|
||||
NotEnoughResources,
|
||||
Overflow,
|
||||
}
|
||||
|
@ -79,11 +79,17 @@ impl IoMem {
|
||||
|
||||
pub fn resize(&mut self, range: Range<Paddr>) -> Result<()> {
|
||||
let start_vaddr = paddr_to_vaddr(range.start);
|
||||
if start_vaddr < self.virtual_address || start_vaddr >= self.virtual_address + self.limit {
|
||||
let virtual_end = self
|
||||
.virtual_address
|
||||
.checked_add(self.limit)
|
||||
.ok_or(Error::Overflow)?;
|
||||
if start_vaddr < self.virtual_address || start_vaddr >= virtual_end {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
let end_vaddr = start_vaddr + range.len();
|
||||
if end_vaddr <= self.virtual_address || end_vaddr > self.virtual_address + self.limit {
|
||||
let end_vaddr = start_vaddr
|
||||
.checked_add(range.len())
|
||||
.ok_or(Error::Overflow)?;
|
||||
if end_vaddr <= self.virtual_address || end_vaddr > virtual_end {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
self.virtual_address = start_vaddr;
|
||||
|
@ -53,6 +53,8 @@ impl DmaCoherent {
|
||||
if !check_and_insert_dma_mapping(start_paddr, frame_count) {
|
||||
return Err(DmaError::AlreadyMapped);
|
||||
}
|
||||
// Ensure that the addresses used later will not overflow
|
||||
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
||||
if !is_cache_coherent {
|
||||
let mut page_table = KERNEL_PAGE_TABLE.get().unwrap().lock();
|
||||
for i in 0..frame_count {
|
||||
@ -120,6 +122,8 @@ impl Drop for DmaCoherentInner {
|
||||
fn drop(&mut self) {
|
||||
let frame_count = self.vm_segment.nframes();
|
||||
let start_paddr = self.vm_segment.start_paddr();
|
||||
// Ensure that the addresses used later will not overflow
|
||||
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
||||
match dma_type() {
|
||||
DmaType::Direct => {
|
||||
#[cfg(feature = "intel_tdx")]
|
||||
|
@ -59,6 +59,8 @@ impl DmaStream {
|
||||
if !check_and_insert_dma_mapping(start_paddr, frame_count) {
|
||||
return Err(DmaError::AlreadyMapped);
|
||||
}
|
||||
// Ensure that the addresses used later will not overflow
|
||||
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
||||
let start_daddr = match dma_type() {
|
||||
DmaType::Direct => {
|
||||
#[cfg(feature = "intel_tdx")]
|
||||
@ -147,6 +149,8 @@ impl Drop for DmaStreamInner {
|
||||
fn drop(&mut self) {
|
||||
let frame_count = self.vm_segment.nframes();
|
||||
let start_paddr = self.vm_segment.start_paddr();
|
||||
// Ensure that the addresses used later will not overflow
|
||||
start_paddr.checked_add(frame_count * PAGE_SIZE).unwrap();
|
||||
match dma_type() {
|
||||
DmaType::Direct => {
|
||||
#[cfg(feature = "intel_tdx")]
|
||||
|
@ -55,6 +55,8 @@ pub fn init() {
|
||||
/// Fail if they have been mapped, otherwise insert them.
|
||||
fn check_and_insert_dma_mapping(start_paddr: Paddr, num_pages: usize) -> bool {
|
||||
let mut mapping_set = DMA_MAPPING_SET.get().unwrap().lock_irq_disabled();
|
||||
// Ensure that the addresses used later will not overflow
|
||||
start_paddr.checked_add(num_pages * PAGE_SIZE).unwrap();
|
||||
for i in 0..num_pages {
|
||||
let paddr = start_paddr + (i * PAGE_SIZE);
|
||||
if mapping_set.contains(&paddr) {
|
||||
@ -71,6 +73,8 @@ fn check_and_insert_dma_mapping(start_paddr: Paddr, num_pages: usize) -> bool {
|
||||
/// Remove a physical address from the dma mapping set.
|
||||
fn remove_dma_mapping(start_paddr: Paddr, num_pages: usize) {
|
||||
let mut mapping_set = DMA_MAPPING_SET.get().unwrap().lock_irq_disabled();
|
||||
// Ensure that the addresses used later will not overflow
|
||||
start_paddr.checked_add(num_pages * PAGE_SIZE).unwrap();
|
||||
for i in 0..num_pages {
|
||||
let paddr = start_paddr + (i * PAGE_SIZE);
|
||||
mapping_set.remove(&paddr);
|
||||
|
@ -105,7 +105,9 @@ impl IntoIterator for VmFrameVec {
|
||||
|
||||
impl VmIo for VmFrameVec {
|
||||
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
|
||||
if buf.len() + offset > self.nbytes() {
|
||||
// Do bound check with potential integer overflow in mind
|
||||
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
|
||||
if max_offset > self.nbytes() {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
|
||||
@ -123,7 +125,9 @@ impl VmIo for VmFrameVec {
|
||||
}
|
||||
|
||||
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
|
||||
if buf.len() + offset > self.nbytes() {
|
||||
// Do bound check with potential integer overflow in mind
|
||||
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
|
||||
if max_offset > self.nbytes() {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
|
||||
@ -266,7 +270,9 @@ impl<'a> VmFrame {
|
||||
|
||||
impl VmIo for VmFrame {
|
||||
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
|
||||
if buf.len() + offset > PAGE_SIZE {
|
||||
// Do bound check with potential integer overflow in mind
|
||||
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
|
||||
if max_offset > PAGE_SIZE {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
let len = self.reader().skip(offset).read(&mut buf.into());
|
||||
@ -275,7 +281,9 @@ impl VmIo for VmFrame {
|
||||
}
|
||||
|
||||
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
|
||||
if buf.len() + offset > PAGE_SIZE {
|
||||
// Do bound check with potential integer overflow in mind
|
||||
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
|
||||
if max_offset > PAGE_SIZE {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
let len = self.writer().skip(offset).write(&mut buf.into());
|
||||
@ -438,7 +446,9 @@ impl<'a> VmSegment {
|
||||
|
||||
impl VmIo for VmSegment {
|
||||
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
|
||||
if buf.len() + offset > self.nbytes() {
|
||||
// Do bound check with potential integer overflow in mind
|
||||
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
|
||||
if max_offset > self.nbytes() {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
let len = self.reader().skip(offset).read(&mut buf.into());
|
||||
@ -447,7 +457,9 @@ impl VmIo for VmSegment {
|
||||
}
|
||||
|
||||
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
|
||||
if buf.len() + offset > self.nbytes() {
|
||||
// Do bound check with potential integer overflow in mind
|
||||
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
|
||||
if max_offset > self.nbytes() {
|
||||
return Err(Error::InvalidArgs);
|
||||
}
|
||||
let len = self.writer().skip(offset).write(&mut buf.into());
|
||||
|
@ -91,7 +91,8 @@ pub(crate) fn init(regions: &[MemoryRegion]) {
|
||||
if region.typ() == MemoryRegionType::Usable {
|
||||
// Make the memory region page-aligned, and skip if it is too small.
|
||||
let start = region.base().align_up(PAGE_SIZE) / PAGE_SIZE;
|
||||
let end = (region.base() + region.len()).align_down(PAGE_SIZE) / PAGE_SIZE;
|
||||
let region_end = region.base().checked_add(region.len()).unwrap();
|
||||
let end = region_end.align_down(PAGE_SIZE) / PAGE_SIZE;
|
||||
if end <= start {
|
||||
continue;
|
||||
}
|
||||
|
@ -64,13 +64,20 @@ impl VmSpace {
|
||||
let mut memory_set = self.memory_set.lock();
|
||||
// FIXME: This is only a hack here. The interface of MapArea cannot simply deal with unmap part of memory,
|
||||
// so we only map MapArea of page size now.
|
||||
|
||||
// Ensure that the base address is not unwrapped repeatedly
|
||||
// and the addresses used later will not overflow
|
||||
let base_addr = options.addr.unwrap();
|
||||
base_addr
|
||||
.checked_add(frames.len() * PAGE_SIZE)
|
||||
.ok_or(Error::Overflow)?;
|
||||
for (idx, frame) in frames.into_iter().enumerate() {
|
||||
let addr = options.addr.unwrap() + idx * PAGE_SIZE;
|
||||
let addr = base_addr + idx * PAGE_SIZE;
|
||||
let frames = VmFrameVec::from_one_frame(frame);
|
||||
memory_set.map(MapArea::new(addr, PAGE_SIZE, flags, frames));
|
||||
}
|
||||
|
||||
Ok(options.addr.unwrap())
|
||||
Ok(base_addr)
|
||||
}
|
||||
|
||||
/// determine whether a vaddr is already mapped
|
||||
@ -86,9 +93,12 @@ impl VmSpace {
|
||||
pub fn unmap(&self, range: &Range<Vaddr>) -> Result<()> {
|
||||
assert!(is_page_aligned(range.start) && is_page_aligned(range.end));
|
||||
let mut start_va = range.start;
|
||||
let page_size = (range.end - range.start) / PAGE_SIZE;
|
||||
let num_pages = (range.end - range.start) / PAGE_SIZE;
|
||||
let mut inner = self.memory_set.lock();
|
||||
for i in 0..page_size {
|
||||
start_va
|
||||
.checked_add(PAGE_SIZE * num_pages)
|
||||
.ok_or(Error::Overflow)?;
|
||||
for i in 0..num_pages {
|
||||
inner.unmap(start_va)?;
|
||||
start_va += PAGE_SIZE;
|
||||
}
|
||||
|
@ -189,6 +189,7 @@ impl From<aster_frame::Error> for Error {
|
||||
aster_frame::Error::IoError => Error::new(Errno::EIO),
|
||||
aster_frame::Error::NotEnoughResources => Error::new(Errno::EBUSY),
|
||||
aster_frame::Error::PageFault => Error::new(Errno::EFAULT),
|
||||
aster_frame::Error::Overflow => Error::new(Errno::EOVERFLOW),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user