add hello_c support

This commit is contained in:
Jianfeng Jiang
2022-09-13 15:56:03 +08:00
parent f5f03d6bca
commit 73f66d54b9
13 changed files with 169 additions and 74 deletions

1
.gitattributes vendored
View File

@ -1,2 +1,3 @@
src/kxos-user/hello_world/hello_world filter=lfs diff=lfs merge=lfs -text
src/kxos-user/fork/fork filter=lfs diff=lfs merge=lfs -text
src/kxos-user/hello_c/hello filter=lfs diff=lfs merge=lfs -text

View File

@ -3,6 +3,7 @@
runner = "cargo run --package kxos-boot --"
[alias]
kcheck = "check --target x86_64-custom.json -Zbuild-std=core,alloc,compiler_builtins -Zbuild-std-features=compiler-builtins-mem"
kbuild = "build --target x86_64-custom.json -Zbuild-std=core,alloc,compiler_builtins -Zbuild-std-features=compiler-builtins-mem"
kimage = "run --target x86_64-custom.json -Zbuild-std=core,alloc,compiler_builtins -Zbuild-std-features=compiler-builtins-mem -- --no-run"
krun = "run --target x86_64-custom.json -Zbuild-std=core,alloc,compiler_builtins -Zbuild-std-features=compiler-builtins-mem"

View File

@ -181,6 +181,19 @@ impl MemorySet {
}
}
/// determine whether a virtaddr is in a mapped area
pub fn is_mapped(&self, vaddr: VirtAddr) -> bool {
for (start_address, map_area) in self.areas.iter() {
if *start_address > vaddr {
break;
}
if *start_address <= vaddr && vaddr < *start_address + map_area.mapped_size() {
return true;
}
}
false
}
pub fn new() -> Self {
Self {
pt: PageTable::new(),

View File

@ -125,27 +125,28 @@ impl VmIo for VmFrameVec {
Ok(())
}
fn write_bytes(&mut self, offset: usize, buf: &[u8]) -> Result<()> {
let mut start = offset;
let mut remain = buf.len();
let mut processed = 0;
for pa in self.0.iter_mut() {
if start >= PAGE_SIZE {
start -= PAGE_SIZE;
} else {
let copy_len = (PAGE_SIZE - start).min(remain);
let src = &buf[processed..processed + copy_len];
let dst = &mut pa.start_pa().kvaddr().get_bytes_array()[start..src.len() + start];
dst.copy_from_slice(src);
processed += copy_len;
remain -= copy_len;
start = 0;
if remain == 0 {
break;
}
}
}
Ok(())
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()> {
todo!()
// let mut start = offset;
// let mut remain = buf.len();
// let mut processed = 0;
// for pa in self.0.iter_mut() {
// if start >= PAGE_SIZE {
// start -= PAGE_SIZE;
// } else {
// let copy_len = (PAGE_SIZE - start).min(remain);
// let src = &buf[processed..processed + copy_len];
// let dst = &mut pa.start_pa().kvaddr().get_bytes_array()[start..src.len() + start];
// dst.copy_from_slice(src);
// processed += copy_len;
// remain -= copy_len;
// start = 0;
// if remain == 0 {
// break;
// }
// }
// }
// Ok(())
}
}

View File

@ -44,7 +44,7 @@ pub trait VmIo: Send + Sync {
/// On success, the input `buf` must be written to the VM object entirely.
/// If, for any reason, the input data can only be written partially,
/// then the method shall return an error.
fn write_bytes(&mut self, offset: usize, buf: &[u8]) -> Result<()>;
fn write_bytes(&self, offset: usize, buf: &[u8]) -> Result<()>;
/// Write a value of a specified type at a specified offset.
fn write_val<T: Pod>(&mut self, offset: usize, new_val: &T) -> Result<()> {

View File

@ -64,6 +64,12 @@ impl VmSpace {
Ok(options.addr.unwrap())
}
/// determine whether a vaddr is already mapped
pub fn is_mapped(&self, vaddr: Vaddr) -> bool {
let memory_set = self.memory_set.exclusive_access();
memory_set.is_mapped(VirtAddr(vaddr))
}
/// Unmaps the physical memory pages within the VM address range.
///
/// The range is allowed to contain gaps, where no physical memory pages
@ -112,7 +118,7 @@ impl VmIo for VmSpace {
self.memory_set.exclusive_access().read_bytes(vaddr, buf)
}
fn write_bytes(&mut self, vaddr: usize, buf: &[u8]) -> Result<()> {
fn write_bytes(&self, vaddr: usize, buf: &[u8]) -> Result<()> {
self.memory_set.exclusive_access().write_bytes(vaddr, buf)
}
}

View File

@ -49,6 +49,10 @@ pub fn init_process() {
process.pid()
);
let hello_c_content = read_hello_c_content();
let process = Process::spawn_user_process(hello_c_content);
info!("spawn hello_c process, pid = {}", process.pid());
loop {}
}
@ -66,3 +70,7 @@ pub fn read_hello_world_content() -> &'static [u8] {
fn read_fork_content() -> &'static [u8] {
include_bytes!("../../kxos-user/fork/fork")
}
fn read_hello_c_content() -> &'static [u8] {
include_bytes!("../../kxos-user/hello_c/hello")
}

View File

@ -5,7 +5,7 @@ use alloc::vec::Vec;
use kxos_frame::{
config::PAGE_SIZE,
debug,
vm::{Vaddr, VmAllocOptions, VmFrameVec, VmIo, VmMapOptions, VmPerm, VmSpace},
vm::{Vaddr, VmIo, VmPerm, VmSpace},
Error,
};
use xmas_elf::{
@ -80,22 +80,32 @@ impl<'a> ElfSegment<'a> {
self.range.end
}
fn copy_and_map(&self, vm_space: &VmSpace) -> Result<(), ElfError> {
if !self.is_page_aligned() {
return Err(ElfError::SegmentNotPageAligned);
}
fn copy_segment(&self, vm_space: &VmSpace) -> Result<(), ElfError> {
// if !self.is_page_aligned() {
// return Err(ElfError::SegmentNotPageAligned);
// }
// map page
debug!(
"map_segment: 0x{:x} - 0x{:x}",
self.start_address(),
self.end_address()
);
let vm_page_range = VmPageRange::new_range(self.start_address()..self.end_address());
let page_number = vm_page_range.len();
// allocate frames
let vm_alloc_options = VmAllocOptions::new(page_number);
let mut frames = VmFrameVec::allocate(&vm_alloc_options)?;
for page in vm_page_range.iter() {
// map page if the page is not mapped
if !page.is_mapped(vm_space) {
let vm_perm = self.vm_perm | VmPerm::W;
page.map_page(vm_space, vm_perm)?;
}
}
debug!(
"copy_segment: 0x{:x} - 0x{:x}",
self.start_address(),
self.end_address()
);
// copy segment
frames.write_bytes(0, self.data)?;
// map segment
let mut vm_map_options = VmMapOptions::new();
vm_map_options.addr(Some(self.start_address()));
vm_map_options.perm(self.vm_perm);
vm_space.map(frames, &vm_map_options)?;
vm_space.write_bytes(self.start_address(), self.data)?;
Ok(())
}
@ -160,13 +170,9 @@ impl<'a> ElfLoadInfo<'a> {
Ok(VmPageRange::new_range(elf_start_address..elf_end_address))
}
pub fn copy_and_map(&self, vm_space: &VmSpace) -> Result<(), ElfError> {
pub fn copy_data(&self, vm_space: &VmSpace) -> Result<(), ElfError> {
for segment in self.segments.iter() {
debug!(
"map segment: 0x{:x}-0x{:x}",
segment.range.start, segment.range.end
);
segment.copy_and_map(vm_space)?;
segment.copy_segment(vm_space)?;
}
Ok(())
}
@ -217,10 +223,10 @@ fn check_elf_header(elf_file: &ElfFile) -> Result<(), ElfError> {
return Err(ElfError::UnsupportedElfType);
}
// system V ABI
debug_assert_eq!(elf_header.pt1.os_abi(), header::OsAbi::SystemV);
if elf_header.pt1.os_abi() != header::OsAbi::SystemV {
return Err(ElfError::UnsupportedElfType);
}
// debug_assert_eq!(elf_header.pt1.os_abi(), header::OsAbi::SystemV);
// if elf_header.pt1.os_abi() != header::OsAbi::SystemV {
// return Err(ElfError::UnsupportedElfType);
// }
// x86_64 architecture
debug_assert_eq!(
elf_header.pt2.machine().as_machine(),

View File

@ -15,7 +15,9 @@ pub fn load_elf_to_vm_space<'a>(
vm_space: &VmSpace,
) -> Result<ElfLoadInfo<'a>, ElfError> {
let elf_load_info = ElfLoadInfo::parse_elf_data(elf_file_content)?;
elf_load_info.copy_and_map(vm_space)?;
debug!("parse data success");
elf_load_info.copy_data(vm_space)?;
debug!("copy_data success");
elf_load_info.debug_check_map_result(vm_space);
debug!("map elf success");
elf_load_info.map_and_clear_user_stack(vm_space);

View File

@ -7,14 +7,15 @@ use kxos_frame::{
vm::{Vaddr, VmAllocOptions, VmFrameVec, VmIo, VmMapOptions, VmPerm, VmSpace},
};
use super::elf::ElfError;
/// A set of **CONTINUOUS** virtual pages in VmSpace
pub struct VmPageRange<'a> {
pub struct VmPageRange {
start_page: VmPage,
end_page: VmPage,
vm_space: Option<&'a VmSpace>,
}
impl<'a> VmPageRange<'a> {
impl VmPageRange {
/// create a set of pages containing virtual address range [a, b)
pub const fn new_range(vaddr_range: Range<Vaddr>) -> Self {
let start_page = VmPage::containing_address(vaddr_range.start);
@ -22,7 +23,6 @@ impl<'a> VmPageRange<'a> {
Self {
start_page,
end_page,
vm_space: None,
}
}
@ -32,7 +32,6 @@ impl<'a> VmPageRange<'a> {
Self {
start_page: page,
end_page: page,
vm_space: None,
}
}
@ -46,45 +45,47 @@ impl<'a> VmPageRange<'a> {
}
/// allocate a set of physical frames and map self to frames
pub fn map(&mut self, vm_space: &'a VmSpace, vm_perm: VmPerm) {
pub fn map(&mut self, vm_space: &VmSpace, vm_perm: VmPerm) {
let options = VmAllocOptions::new(self.len());
let frames = VmFrameVec::allocate(&options).expect("allocate frame error");
self.map_to(vm_space, frames, vm_perm);
}
/// map self to a set of zeroed frames
pub fn map_zeroed(&mut self, vm_space: &'a VmSpace, vm_perm: VmPerm) {
pub fn map_zeroed(&mut self, vm_space: &VmSpace, vm_perm: VmPerm) {
let options = VmAllocOptions::new(self.len());
let mut frames = VmFrameVec::allocate(&options).expect("allocate frame error");
let frames = VmFrameVec::allocate(&options).expect("allocate frame error");
let buffer = vec![0u8; self.nbytes()];
frames.write_bytes(0, &buffer).expect("write zero failed");
self.map_to(vm_space, frames, vm_perm)
self.map_to(vm_space, frames, vm_perm);
vm_space
.write_bytes(self.start_address(), &buffer)
.expect("write zero failed");
// frames.write_bytes(0, &buffer).expect("write zero failed");
}
/// map self to a set of frames
pub fn map_to(&mut self, vm_space: &'a VmSpace, frames: VmFrameVec, vm_perm: VmPerm) {
pub fn map_to(&mut self, vm_space: &VmSpace, frames: VmFrameVec, vm_perm: VmPerm) {
assert_eq!(self.len(), frames.len());
let mut vm_map_options = VmMapOptions::new();
vm_map_options.addr(Some(self.start_address()));
vm_map_options.perm(vm_perm);
vm_space.map(frames, &vm_map_options).expect("map failed");
self.vm_space = Some(vm_space)
}
pub fn unmap(&mut self) {
if self.is_mapped() {
let vm_space = self.vm_space.take().unwrap();
vm_space
.unmap(&(self.start_address()..self.end_address()))
.expect("unmap failed");
}
pub fn unmap(&mut self, vm_space: &VmSpace) {
vm_space
.unmap(&(self.start_address()..self.end_address()))
.expect("unmap failed");
}
pub fn is_mapped(&self) -> bool {
if let None = self.vm_space {
false
} else {
true
pub fn is_mapped(&self, vm_space: &VmSpace) -> bool {
todo!()
}
pub fn iter(&self) -> VmPageIter<'_> {
VmPageIter {
current: self.start_page,
page_range: self,
}
}
@ -98,8 +99,27 @@ impl<'a> VmPageRange<'a> {
}
}
pub struct VmPageIter<'a> {
current: VmPage,
page_range: &'a VmPageRange,
}
impl<'a> Iterator for VmPageIter<'a> {
type Item = VmPage;
fn next(&mut self) -> Option<Self::Item> {
let next_page = if self.current <= self.page_range.end_page {
Some(self.current)
} else {
None
};
self.current = self.current.next_page();
next_page
}
}
/// A Virtual Page
#[derive(Debug, Clone, Copy)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct VmPage {
/// Virtual Page Number
vpn: usize,
@ -115,4 +135,25 @@ impl VmPage {
const fn start_address(&self) -> Vaddr {
self.vpn * PAGE_SIZE
}
const fn next_page(&self) -> VmPage {
VmPage { vpn: self.vpn + 1 }
}
/// Check whether current page is mapped
pub fn is_mapped(&self, vm_space: &VmSpace) -> bool {
vm_space.is_mapped(self.start_address())
}
pub fn map_page(&self, vm_space: &VmSpace, vm_perm: VmPerm) -> Result<(), ElfError> {
let vm_alloc_option = VmAllocOptions::new(1);
let vm_frame = VmFrameVec::allocate(&vm_alloc_option)?;
let mut vm_map_options = VmMapOptions::new();
vm_map_options.addr(Some(self.start_address()));
vm_map_options.perm(vm_perm);
vm_space.map(vm_frame, &vm_map_options)?;
Ok(())
}
}

View File

@ -0,0 +1,7 @@
.PHONY: build clean run
build: hello.c
@gcc -static hello.c -o hello
clean:
@rm hello
run: build
@./hello

3
src/kxos-user/hello_c/hello Executable file
View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:06a00e3155168337417bf707d6f2d47321f9ca2f6a52a73d3dceb6bd1b807e95
size 871896

View File

@ -0,0 +1,6 @@
#include <stdio.h>
int main() {
printf("hello world!\n");
return 0;
}