mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-08 04:55:03 +00:00
Rename regression to test
This commit is contained in:
parent
5eefd600cc
commit
f675552c5a
@ -1,6 +1,6 @@
|
|||||||
# Ignore binaries to avoid performance issues
|
# Ignore binaries to avoid performance issues
|
||||||
target/
|
target/
|
||||||
regression/build/
|
test/build/
|
||||||
|
|
||||||
# QEMU log file
|
# QEMU log file
|
||||||
qemu.log
|
qemu.log
|
||||||
|
6
.github/workflows/benchmark_asterinas.yml
vendored
6
.github/workflows/benchmark_asterinas.yml
vendored
@ -27,7 +27,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- name: Set up the environment
|
- name: Set up the environment
|
||||||
run: |
|
run: |
|
||||||
chmod +x regression/benchmark/bench_linux_and_aster.sh
|
chmod +x test/benchmark/bench_linux_and_aster.sh
|
||||||
# Set up git due to the network issue on the self-hosted runner
|
# Set up git due to the network issue on the self-hosted runner
|
||||||
git config --global --add safe.directory /__w/asterinas/asterinas
|
git config --global --add safe.directory /__w/asterinas/asterinas
|
||||||
git config --global http.sslVerify false
|
git config --global http.sslVerify false
|
||||||
@ -40,13 +40,13 @@ jobs:
|
|||||||
max_attempts: 3
|
max_attempts: 3
|
||||||
command: |
|
command: |
|
||||||
make install_osdk
|
make install_osdk
|
||||||
bash regression/benchmark/bench_linux_and_aster.sh ${{ matrix.benchmark }}
|
bash test/benchmark/bench_linux_and_aster.sh ${{ matrix.benchmark }}
|
||||||
on_retry_command: make clean
|
on_retry_command: make clean
|
||||||
|
|
||||||
- name: Prepare threshold values
|
- name: Prepare threshold values
|
||||||
run: |
|
run: |
|
||||||
echo "Configuring thresholds..."
|
echo "Configuring thresholds..."
|
||||||
ALERT_THRESHOLD=$(jq -r '.alert_threshold' regression/benchmark/${{ matrix.benchmark }}/config.json)
|
ALERT_THRESHOLD=$(jq -r '.alert_threshold' test/benchmark/${{ matrix.benchmark }}/config.json)
|
||||||
echo "ALERT_THRESHOLD=$ALERT_THRESHOLD" >> $GITHUB_ENV
|
echo "ALERT_THRESHOLD=$ALERT_THRESHOLD" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Store benchmark results
|
- name: Store benchmark results
|
||||||
|
6
.github/workflows/test_asterinas.yml
vendored
6
.github/workflows/test_asterinas.yml
vendored
@ -83,6 +83,6 @@ jobs:
|
|||||||
SYSCALL_TEST_DIR=/exfat EXTRA_BLOCKLISTS_DIRS=blocklists.exfat \
|
SYSCALL_TEST_DIR=/exfat EXTRA_BLOCKLISTS_DIRS=blocklists.exfat \
|
||||||
ENABLE_KVM=0 BOOT_PROTOCOL=linux-efi-handover64 RELEASE=1
|
ENABLE_KVM=0 BOOT_PROTOCOL=linux-efi-handover64 RELEASE=1
|
||||||
|
|
||||||
- name: Regression Test (Linux EFI Handover Boot Protocol)
|
- name: General Test (Linux EFI Handover Boot Protocol)
|
||||||
id: regression_test_linux
|
id: test_linux
|
||||||
run: make run AUTO_TEST=regression ENABLE_KVM=0 BOOT_PROTOCOL=linux-efi-handover64 RELEASE=1
|
run: make run AUTO_TEST=test ENABLE_KVM=0 BOOT_PROTOCOL=linux-efi-handover64 RELEASE=1
|
||||||
|
22
Makefile
22
Makefile
@ -31,13 +31,13 @@ BUILD_SYSCALL_TEST := 1
|
|||||||
CARGO_OSDK_ARGS += --kcmd-args="SYSCALL_TEST_DIR=$(SYSCALL_TEST_DIR)"
|
CARGO_OSDK_ARGS += --kcmd-args="SYSCALL_TEST_DIR=$(SYSCALL_TEST_DIR)"
|
||||||
CARGO_OSDK_ARGS += --kcmd-args="EXTRA_BLOCKLISTS_DIRS=$(EXTRA_BLOCKLISTS_DIRS)"
|
CARGO_OSDK_ARGS += --kcmd-args="EXTRA_BLOCKLISTS_DIRS=$(EXTRA_BLOCKLISTS_DIRS)"
|
||||||
CARGO_OSDK_ARGS += --init-args="/opt/syscall_test/run_syscall_test.sh"
|
CARGO_OSDK_ARGS += --init-args="/opt/syscall_test/run_syscall_test.sh"
|
||||||
else ifeq ($(AUTO_TEST), regression)
|
else ifeq ($(AUTO_TEST), test)
|
||||||
CARGO_OSDK_ARGS += --init-args="/regression/run_regression_test.sh"
|
CARGO_OSDK_ARGS += --init-args="/test/run_general_test.sh"
|
||||||
else ifeq ($(AUTO_TEST), boot)
|
else ifeq ($(AUTO_TEST), boot)
|
||||||
CARGO_OSDK_ARGS += --init-args="/regression/boot_hello.sh"
|
CARGO_OSDK_ARGS += --init-args="/test/boot_hello.sh"
|
||||||
else ifeq ($(AUTO_TEST), vsock)
|
else ifeq ($(AUTO_TEST), vsock)
|
||||||
export VSOCK=1
|
export VSOCK=1
|
||||||
CARGO_OSDK_ARGS += --init-args="/regression/run_vsock_test.sh"
|
CARGO_OSDK_ARGS += --init-args="/test/run_vsock_test.sh"
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# If the BENCHMARK is set, we will run the benchmark in the kernel mode.
|
# If the BENCHMARK is set, we will run the benchmark in the kernel mode.
|
||||||
@ -132,7 +132,7 @@ $(CARGO_OSDK):
|
|||||||
|
|
||||||
.PHONY: initramfs
|
.PHONY: initramfs
|
||||||
initramfs:
|
initramfs:
|
||||||
@make --no-print-directory -C regression
|
@make --no-print-directory -C test
|
||||||
|
|
||||||
.PHONY: build
|
.PHONY: build
|
||||||
build: initramfs $(CARGO_OSDK)
|
build: initramfs $(CARGO_OSDK)
|
||||||
@ -149,9 +149,9 @@ run: build
|
|||||||
ifeq ($(AUTO_TEST), syscall)
|
ifeq ($(AUTO_TEST), syscall)
|
||||||
@tail --lines 100 qemu.log | grep -q "^.* of .* test cases passed." \
|
@tail --lines 100 qemu.log | grep -q "^.* of .* test cases passed." \
|
||||||
|| (echo "Syscall test failed" && exit 1)
|
|| (echo "Syscall test failed" && exit 1)
|
||||||
else ifeq ($(AUTO_TEST), regression)
|
else ifeq ($(AUTO_TEST), test)
|
||||||
@tail --lines 100 qemu.log | grep -q "^All regression tests passed." \
|
@tail --lines 100 qemu.log | grep -q "^All general tests passed." \
|
||||||
|| (echo "Regression test failed" && exit 1)
|
|| (echo "General test failed" && exit 1)
|
||||||
else ifeq ($(AUTO_TEST), boot)
|
else ifeq ($(AUTO_TEST), boot)
|
||||||
@tail --lines 100 qemu.log | grep -q "^Successfully booted." \
|
@tail --lines 100 qemu.log | grep -q "^Successfully booted." \
|
||||||
|| (echo "Boot test failed" && exit 1)
|
|| (echo "Boot test failed" && exit 1)
|
||||||
@ -196,7 +196,7 @@ docs: $(CARGO_OSDK)
|
|||||||
.PHONY: format
|
.PHONY: format
|
||||||
format:
|
format:
|
||||||
@./tools/format_all.sh
|
@./tools/format_all.sh
|
||||||
@make --no-print-directory -C regression format
|
@make --no-print-directory -C test format
|
||||||
|
|
||||||
.PHONY: check
|
.PHONY: check
|
||||||
check: $(CARGO_OSDK)
|
check: $(CARGO_OSDK)
|
||||||
@ -218,7 +218,7 @@ check: $(CARGO_OSDK)
|
|||||||
echo "Checking $$dir"; \
|
echo "Checking $$dir"; \
|
||||||
(cd $$dir && cargo osdk clippy -- -- -D warnings) || exit 1; \
|
(cd $$dir && cargo osdk clippy -- -- -D warnings) || exit 1; \
|
||||||
done
|
done
|
||||||
@make --no-print-directory -C regression check
|
@make --no-print-directory -C test check
|
||||||
|
|
||||||
.PHONY: check_osdk
|
.PHONY: check_osdk
|
||||||
check_osdk:
|
check_osdk:
|
||||||
@ -228,5 +228,5 @@ check_osdk:
|
|||||||
clean:
|
clean:
|
||||||
@cargo clean
|
@cargo clean
|
||||||
@cd docs && mdbook clean
|
@cd docs && mdbook clean
|
||||||
@make --no-print-directory -C regression clean
|
@make --no-print-directory -C test clean
|
||||||
@rm -f $(CARGO_OSDK)
|
@rm -f $(CARGO_OSDK)
|
||||||
|
@ -22,7 +22,7 @@ kcmd_args = [
|
|||||||
"init=/usr/bin/busybox",
|
"init=/usr/bin/busybox",
|
||||||
]
|
]
|
||||||
init_args = ["sh", "-l"]
|
init_args = ["sh", "-l"]
|
||||||
initramfs = "regression/build/initramfs.cpio.gz"
|
initramfs = "test/build/initramfs.cpio.gz"
|
||||||
|
|
||||||
# Special options for testing
|
# Special options for testing
|
||||||
[test.boot]
|
[test.boot]
|
||||||
|
@ -32,12 +32,12 @@ cargo osdk test
|
|||||||
|
|
||||||
## Integration Test
|
## Integration Test
|
||||||
|
|
||||||
### Regression Test
|
### General Test
|
||||||
|
|
||||||
The following command builds and runs the test binaries in `regression/apps` directory on Asterinas.
|
The following command builds and runs the test binaries in `test/apps` directory on Asterinas.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
make run AUTO_TEST=regression
|
make run AUTO_TEST=test
|
||||||
```
|
```
|
||||||
|
|
||||||
### Syscall Test
|
### Syscall Test
|
||||||
|
@ -104,7 +104,7 @@ mod test {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// Exfat disk image
|
/// Exfat disk image
|
||||||
static EXFAT_IMAGE: &[u8] = include_bytes!("../../../../../regression/build/exfat.img");
|
static EXFAT_IMAGE: &[u8] = include_bytes!("../../../../../test/build/exfat.img");
|
||||||
|
|
||||||
/// Read exfat disk image
|
/// Read exfat disk image
|
||||||
fn new_vm_segment_from_image() -> Segment {
|
fn new_vm_segment_from_image() -> Segment {
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
mod test_utils;
|
mod test_utils;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn regression() {
|
fn test() {
|
||||||
let stderr = run_cargo_component_cmd!();
|
let stderr = run_cargo_component_cmd!();
|
||||||
assert!(!stderr.contains("access controlled entry point is disallowed"));
|
assert!(!stderr.contains("access controlled entry point is disallowed"));
|
||||||
}
|
}
|
||||||
|
@ -1,58 +0,0 @@
|
|||||||
# Introduction to benchmarks
|
|
||||||
|
|
||||||
## Sysbench
|
|
||||||
Sysbench is a scriptable benchmark tool that evaluates system performance. It includes five kinds of tests: CPU, memory, file I/O, mutex performance, and thread performance. Detailed usage and options can be found by using:
|
|
||||||
```shell
|
|
||||||
sysbench --help
|
|
||||||
sysbench --test=<test_name> help
|
|
||||||
```
|
|
||||||
Here we list some general commands for evaluation:
|
|
||||||
```shell
|
|
||||||
# CPU test
|
|
||||||
sysbench --test=cpu --cpu-max-prime=<N> --num-threads=<N> run
|
|
||||||
|
|
||||||
# Thread test
|
|
||||||
sysbench --test=threads --thread-yields=<N> --num-threads=<N> --max-time=<N> run
|
|
||||||
|
|
||||||
# Mutex test
|
|
||||||
sysbench --test=mutex --mutex-num=<N> --mutex-locks=<N> --num-threads=<N>
|
|
||||||
|
|
||||||
# File test, the file-total-size and file-num of prepare and run must be consistent
|
|
||||||
sysbench --test=fileio --file-total-size=<N><K,M,G> --file-num=<N> prepare
|
|
||||||
sysbench --test=fileio --file-total-size=<N><K,M,G> --file-num=<N> --file-test-mode=<Type> --file-block-size=<N><K,M,G> --max-time=<N> run
|
|
||||||
|
|
||||||
# Memory test
|
|
||||||
sysbench --test=memory --memory-block-size=<N><K,M,G> --memory-access-mode=<Type> --memory-oper=<Type> run
|
|
||||||
```
|
|
||||||
|
|
||||||
## Membench
|
|
||||||
Membench is used to establish a baseline for memory bandwidth and latency. For specific usage and options, use:
|
|
||||||
```shell
|
|
||||||
membench --help
|
|
||||||
```
|
|
||||||
Here we list some general commands to use membench:
|
|
||||||
```shell
|
|
||||||
# Measure the latency of mmap
|
|
||||||
membench -runtime=5 -dir=/dev/zero -size=<N><K,M,G> -engine=mmap_lat
|
|
||||||
|
|
||||||
# Measure the latency of page fault handling. The size must be consistent with the file size.
|
|
||||||
membench -runtime=5 -dir=path_to_a_file -size=<N><K,M,G> -copysize=<N><K,M,G> -mode=<Type> -engine=page_fault
|
|
||||||
|
|
||||||
# This is a easy way to generate a file with target size in Linux.
|
|
||||||
# The following command can create a file named 512K.file with the size 512K.
|
|
||||||
dd if=/dev/zero of=512K.file bs=1K count=512
|
|
||||||
```
|
|
||||||
|
|
||||||
## Iperf
|
|
||||||
iPerf is a tool for actively measuring the maximum achievable bandwidth on IP networks. Usage and options are detailed in:
|
|
||||||
```shell
|
|
||||||
iperf3 -h
|
|
||||||
```
|
|
||||||
iperf can run in the following instructions:
|
|
||||||
```shell
|
|
||||||
export HOST_ADDR=127.0.0.1
|
|
||||||
export HOST_PORT=8888
|
|
||||||
iperf3 -s -B $HOST_ADDR -p $HOST_PORT -D # Start the server as a daemon
|
|
||||||
iperf3 -c $HOST_ADDR -p $HOST_PORT # Start the client
|
|
||||||
```
|
|
||||||
Note that [a variant of iperf3](https://github.com/stefano-garzarella/iperf-vsock) can measure the performance of `vsock`. But the implemented `vsock` has not been verified to work well in it.
|
|
0
regression/.gitignore → test/.gitignore
vendored
0
regression/.gitignore → test/.gitignore
vendored
@ -26,7 +26,7 @@ INITRAMFS_ALL_DIRS := \
|
|||||||
$(INITRAMFS)/lib64 \
|
$(INITRAMFS)/lib64 \
|
||||||
$(INITRAMFS)/bin \
|
$(INITRAMFS)/bin \
|
||||||
$(INITRAMFS)/usr/bin \
|
$(INITRAMFS)/usr/bin \
|
||||||
$(INITRAMFS)/regression \
|
$(INITRAMFS)/test \
|
||||||
$(INITRAMFS)/benchmark \
|
$(INITRAMFS)/benchmark \
|
||||||
$(INITRAMFS_EMPTY_DIRS)
|
$(INITRAMFS_EMPTY_DIRS)
|
||||||
SYSCALL_TEST_DIR := $(INITRAMFS)/opt/syscall_test
|
SYSCALL_TEST_DIR := $(INITRAMFS)/opt/syscall_test
|
||||||
@ -74,8 +74,8 @@ $(INITRAMFS)/usr/bin: | $(INITRAMFS)/bin
|
|||||||
@mkdir -p $@
|
@mkdir -p $@
|
||||||
@cp /usr/bin/busybox $@
|
@cp /usr/bin/busybox $@
|
||||||
|
|
||||||
.PHONY: $(INITRAMFS)/regression
|
.PHONY: $(INITRAMFS)/test
|
||||||
$(INITRAMFS)/regression:
|
$(INITRAMFS)/test:
|
||||||
@make --no-print-directory -C apps
|
@make --no-print-directory -C apps
|
||||||
|
|
||||||
$(INITRAMFS)/benchmark: | $(INITRAMFS)/benchmark/bin
|
$(INITRAMFS)/benchmark: | $(INITRAMFS)/benchmark/bin
|
@ -6,7 +6,7 @@ MKFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
|
|||||||
CUR_DIR := $(patsubst %/,%,$(dir $(MKFILE_PATH)))
|
CUR_DIR := $(patsubst %/,%,$(dir $(MKFILE_PATH)))
|
||||||
|
|
||||||
INITRAMFS ?= $(CUR_DIR)/../build/initramfs
|
INITRAMFS ?= $(CUR_DIR)/../build/initramfs
|
||||||
REGRESSION_BUILD_DIR ?= $(INITRAMFS)/regression
|
TEST_BUILD_DIR ?= $(INITRAMFS)/test
|
||||||
|
|
||||||
# These test apps are sorted by name
|
# These test apps are sorted by name
|
||||||
TEST_APPS := \
|
TEST_APPS := \
|
||||||
@ -48,17 +48,17 @@ $(TEST_APPS):
|
|||||||
|
|
||||||
.PHONY: format
|
.PHONY: format
|
||||||
format:
|
format:
|
||||||
@echo "Fixing code format for regression tests..."
|
@echo "Fixing code format for general tests..."
|
||||||
@clang-format -i $(C_SOURCES)
|
@clang-format -i $(C_SOURCES)
|
||||||
|
|
||||||
.PHONY: check
|
.PHONY: check
|
||||||
check:
|
check:
|
||||||
@echo "Checking code format for regression tests..."
|
@echo "Checking code format for general tests..."
|
||||||
@clang-format --dry-run --Werror $(C_SOURCES)
|
@clang-format --dry-run --Werror $(C_SOURCES)
|
||||||
|
|
||||||
$(REGRESSION_BUILD_DIR):
|
$(TEST_BUILD_DIR):
|
||||||
@mkdir -p $@
|
@mkdir -p $@
|
||||||
|
|
||||||
.PHONY: scripts
|
.PHONY: scripts
|
||||||
scripts: | $(REGRESSION_BUILD_DIR)
|
scripts: | $(TEST_BUILD_DIR)
|
||||||
@make --no-print-directory BUILD_DIR=$(REGRESSION_BUILD_DIR) -C scripts
|
@make --no-print-directory BUILD_DIR=$(TEST_BUILD_DIR) -C scripts
|
@ -11,7 +11,7 @@ int main()
|
|||||||
printf("Execve a new file /execve/hello:\n");
|
printf("Execve a new file /execve/hello:\n");
|
||||||
// flush the stdout content to ensure the content print to console
|
// flush the stdout content to ensure the content print to console
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
execve("/regression/execve/hello", argv, envp);
|
execve("/test/execve/hello", argv, envp);
|
||||||
printf("Should not print\n");
|
printf("Should not print\n");
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
return 0;
|
return 0;
|
@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: MPL-2.0
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
CUR_DIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
|
CUR_DIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
|
||||||
BUILD_DIR := $(CUR_DIR)/../../build/initramfs/regression/network
|
BUILD_DIR := $(CUR_DIR)/../../build/initramfs/test/network
|
||||||
MONGOOSE_DIR := $(CUR_DIR)
|
MONGOOSE_DIR := $(CUR_DIR)
|
||||||
MONGOOSE_C := $(MONGOOSE_DIR)/mongoose.c
|
MONGOOSE_C := $(MONGOOSE_DIR)/mongoose.c
|
||||||
MONGOOSE_H := $(MONGOOSE_DIR)/mongoose.h
|
MONGOOSE_H := $(MONGOOSE_DIR)/mongoose.h
|
@ -1,16 +1,16 @@
|
|||||||
/* SPDX-License-Identifier: MPL-2.0 */
|
/* SPDX-License-Identifier: MPL-2.0 */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A framework for writing regression tests.
|
* A framework for writing general tests.
|
||||||
*
|
*
|
||||||
* A regression test typically consists of two parts, the setup part and the
|
* A general test typically consists of two parts, the setup part and the
|
||||||
* test part. The setup part contains setup functions that set up the context
|
* test part. The setup part contains setup functions that set up the context
|
||||||
* for the subsequent tests to run. The setup functions cannot fail, and if they
|
* for the subsequent tests to run. The setup functions cannot fail, and if they
|
||||||
* do, execution is aborted because the subsequent tests will not work as
|
* do, execution is aborted because the subsequent tests will not work as
|
||||||
* expected either. The test functions, on the other hand, can fail, and if they
|
* expected either. The test functions, on the other hand, can fail, and if they
|
||||||
* do, they are reported as test failures.
|
* do, they are reported as test failures.
|
||||||
*
|
*
|
||||||
* The framework provides basic utilities for writing regression tests:
|
* The framework provides basic utilities for writing general tests:
|
||||||
*
|
*
|
||||||
* - To define a setup function or a test function, FN_SETUP() or FN_TEST() can
|
* - To define a setup function or a test function, FN_SETUP() or FN_TEST() can
|
||||||
* be used. These functions are automatically executed in the order of their
|
* be used. These functions are automatically executed in the order of their
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
NETTEST_DIR=/regression/network
|
NETTEST_DIR=/test/network
|
||||||
cd ${NETTEST_DIR}
|
cd ${NETTEST_DIR}
|
||||||
|
|
||||||
echo "Start network test......"
|
echo "Start network test......"
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
SCRIPT_DIR=/regression
|
SCRIPT_DIR=/test
|
||||||
cd ${SCRIPT_DIR}/..
|
cd ${SCRIPT_DIR}/..
|
||||||
|
|
||||||
echo "Start process test......"
|
echo "Start process test......"
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
SCRIPT_DIR=/regression
|
SCRIPT_DIR=/test
|
||||||
cd ${SCRIPT_DIR}
|
cd ${SCRIPT_DIR}
|
||||||
|
|
||||||
./shell_cmd.sh
|
./shell_cmd.sh
|
||||||
@ -13,4 +13,4 @@ cd ${SCRIPT_DIR}
|
|||||||
./network.sh
|
./network.sh
|
||||||
./test_epoll_pwait.sh
|
./test_epoll_pwait.sh
|
||||||
|
|
||||||
echo "All regression tests passed."
|
echo "All general tests passed."
|
@ -8,7 +8,7 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
VSOCK_DIR=/regression/vsock
|
VSOCK_DIR=/test/vsock
|
||||||
cd ${VSOCK_DIR}
|
cd ${VSOCK_DIR}
|
||||||
|
|
||||||
echo "Start vsock test......"
|
echo "Start vsock test......"
|
@ -5,7 +5,7 @@
|
|||||||
set -e
|
set -e
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
SCRIPT_DIR=/regression
|
SCRIPT_DIR=/test
|
||||||
cd ${SCRIPT_DIR}
|
cd ${SCRIPT_DIR}
|
||||||
|
|
||||||
touch hello.txt
|
touch hello.txt
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
EPOLLTEST_DIR=/regression/epoll
|
EPOLLTEST_DIR=/test/epoll
|
||||||
cd ${EPOLLTEST_DIR}
|
cd ${EPOLLTEST_DIR}
|
||||||
|
|
||||||
echo "Start epoll_pwait test......"
|
echo "Start epoll_pwait test......"
|
@ -5,7 +5,7 @@ INCLUDE_MAKEFILE := $(lastword $(MAKEFILE_LIST))
|
|||||||
CUR_DIR := $(shell dirname $(realpath $(MAIN_MAKEFILE)))
|
CUR_DIR := $(shell dirname $(realpath $(MAIN_MAKEFILE)))
|
||||||
CUR_DIR_NAME := $(shell basename $(realpath $(CUR_DIR)))
|
CUR_DIR_NAME := $(shell basename $(realpath $(CUR_DIR)))
|
||||||
BUILD_DIR := $(CUR_DIR)/../../build
|
BUILD_DIR := $(CUR_DIR)/../../build
|
||||||
OBJ_OUTPUT_DIR := $(BUILD_DIR)/initramfs/regression/$(CUR_DIR_NAME)
|
OBJ_OUTPUT_DIR := $(BUILD_DIR)/initramfs/test/$(CUR_DIR_NAME)
|
||||||
DEP_OUTPUT_DIR := $(BUILD_DIR)/dep/$(CUR_DIR_NAME)
|
DEP_OUTPUT_DIR := $(BUILD_DIR)/dep/$(CUR_DIR_NAME)
|
||||||
C_SRCS := $(wildcard *.c)
|
C_SRCS := $(wildcard *.c)
|
||||||
C_OBJS := $(addprefix $(OBJ_OUTPUT_DIR)/,$(C_SRCS:%.c=%))
|
C_OBJS := $(addprefix $(OBJ_OUTPUT_DIR)/,$(C_SRCS:%.c=%))
|
146
test/benchmark/README.md
Normal file
146
test/benchmark/README.md
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
# Introduction to benchmarks
|
||||||
|
|
||||||
|
## Overview of supported benchmarks
|
||||||
|
The benchmark suite contains several benchmarks that can be used to evaluate the performance of the Asterinas platform. The following benchmarks are supported:
|
||||||
|
|
||||||
|
- [Sysbench](#Sysbench)
|
||||||
|
- [Membench](#Membench)
|
||||||
|
- [Iperf](#Iperf)
|
||||||
|
|
||||||
|
### Sysbench
|
||||||
|
Sysbench is a scriptable benchmark tool that evaluates system performance. It includes five kinds of tests: CPU, memory, file I/O, mutex performance, and thread performance. Detailed usage and options can be found by using:
|
||||||
|
```shell
|
||||||
|
sysbench --help
|
||||||
|
sysbench --test=<test_name> help
|
||||||
|
```
|
||||||
|
Here we list some general commands for evaluation:
|
||||||
|
```shell
|
||||||
|
# CPU test
|
||||||
|
sysbench --test=cpu --cpu-max-prime=<N> --num-threads=<N> run
|
||||||
|
|
||||||
|
# Thread test
|
||||||
|
sysbench --test=threads --thread-yields=<N> --num-threads=<N> --max-time=<N> run
|
||||||
|
|
||||||
|
# Mutex test
|
||||||
|
sysbench --test=mutex --mutex-num=<N> --mutex-locks=<N> --num-threads=<N>
|
||||||
|
|
||||||
|
# File test, the file-total-size and file-num of prepare and run must be consistent
|
||||||
|
sysbench --test=fileio --file-total-size=<N><K,M,G> --file-num=<N> prepare
|
||||||
|
sysbench --test=fileio --file-total-size=<N><K,M,G> --file-num=<N> --file-test-mode=<Type> --file-block-size=<N><K,M,G> --max-time=<N> run
|
||||||
|
|
||||||
|
# Memory test
|
||||||
|
sysbench --test=memory --memory-block-size=<N><K,M,G> --memory-access-mode=<Type> --memory-oper=<Type> run
|
||||||
|
```
|
||||||
|
|
||||||
|
### Membench
|
||||||
|
Membench is used to establish a baseline for memory bandwidth and latency. For specific usage and options, use:
|
||||||
|
```shell
|
||||||
|
membench --help
|
||||||
|
```
|
||||||
|
Here we list some general commands to use membench:
|
||||||
|
```shell
|
||||||
|
# Measure the latency of mmap
|
||||||
|
membench -runtime=5 -dir=/dev/zero -size=<N><K,M,G> -engine=mmap_lat
|
||||||
|
|
||||||
|
# Measure the latency of page fault handling. The size must be consistent with the file size.
|
||||||
|
membench -runtime=5 -dir=path_to_a_file -size=<N><K,M,G> -copysize=<N><K,M,G> -mode=<Type> -engine=page_fault
|
||||||
|
|
||||||
|
# This is a easy way to generate a file with target size in Linux.
|
||||||
|
# The following command can create a file named 512K.file with the size 512K.
|
||||||
|
dd if=/dev/zero of=512K.file bs=1K count=512
|
||||||
|
```
|
||||||
|
|
||||||
|
### Iperf
|
||||||
|
iPerf is a tool for actively measuring the maximum achievable bandwidth on IP networks. Usage and options are detailed in:
|
||||||
|
```shell
|
||||||
|
iperf3 -h
|
||||||
|
```
|
||||||
|
iperf can run in the following instructions:
|
||||||
|
```shell
|
||||||
|
export HOST_ADDR=127.0.0.1
|
||||||
|
export HOST_PORT=8888
|
||||||
|
iperf3 -s -B $HOST_ADDR -p $HOST_PORT -D # Start the server as a daemon
|
||||||
|
iperf3 -c $HOST_ADDR -p $HOST_PORT # Start the client
|
||||||
|
```
|
||||||
|
Note that [a variant of iperf3](https://github.com/stefano-garzarella/iperf-vsock) can measure the performance of `vsock`. But the implemented `vsock` has not been verified to work well in it.
|
||||||
|
|
||||||
|
## Add benchmark to benchmark CI
|
||||||
|
|
||||||
|
To add a new benchmark to the Asternias Continuous Integration (CI) system, follow these detailed steps:
|
||||||
|
|
||||||
|
### Step 1: Add the Benchmark to the `asterinas/test/benchmarks` Directory
|
||||||
|
|
||||||
|
1. **Create the Benchmark Directory:**
|
||||||
|
- Navigate to `asterinas/test/benchmarks`.
|
||||||
|
- Create a new directory named after your benchmark, e.g., `getpid`.
|
||||||
|
|
||||||
|
2. **Create the Necessary Files:**
|
||||||
|
- **config.json:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"alert_threshold": "125%",
|
||||||
|
"pattern": "Syscall average latency:",
|
||||||
|
"field": "4"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
- `alert_threshold`: Set the threshold for alerting. If the benchmark result exceeds this threshold, an alert will be triggered.
|
||||||
|
- `pattern`: Define the pattern to extract the benchmark result from the output.
|
||||||
|
- `field`: Specify the index of the result in the extracted output.
|
||||||
|
|
||||||
|
For example, if the benchmark output is "Syscall average latency: 1000 ns", the `pattern` is "Syscall average latency:", and the `field` is "4". `jq` will extract "1000" as the benchmark result.
|
||||||
|
|
||||||
|
- **result_template.json:**
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": "Average Syscall Latency on Linux",
|
||||||
|
"unit": "ns",
|
||||||
|
"value": 0,
|
||||||
|
"extra": "linux_avg"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Average Syscall Latency on Asterinas",
|
||||||
|
"unit": "ns",
|
||||||
|
"value": 0,
|
||||||
|
"extra": "aster_avg"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
- Adjust `name` and `unit` according to your benchmark specifics.
|
||||||
|
|
||||||
|
- **run.sh:**
|
||||||
|
```bash
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
/benchmark/bin/getpid
|
||||||
|
```
|
||||||
|
- This script runs the benchmark. Ensure the path to the benchmark binary is correct. `asterinas/test/Makefile` handles the benchmark binaries.
|
||||||
|
|
||||||
|
### Step 2: Update the `asterinas/.github/benchmarks.yml` File
|
||||||
|
|
||||||
|
1. **Edit the Benchmarks Configuration:**
|
||||||
|
- Open `asterinas/.github/benchmarks.yml`.
|
||||||
|
- Add your benchmark to the `strategy.matrix.benchmark` list:
|
||||||
|
```yaml
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
benchmark: [getpid]
|
||||||
|
fail-fast: false
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Test the Benchmark Locally
|
||||||
|
|
||||||
|
1. **Run the Benchmark:**
|
||||||
|
- Execute the following command to test the benchmark locally:
|
||||||
|
```bash
|
||||||
|
cd asterinas
|
||||||
|
bash test/benchmark/bench_linux_aster.sh getpid
|
||||||
|
```
|
||||||
|
- Ensure the benchmark runs successfully and check the results in `asterinas/result_getpid.json`.
|
||||||
|
|
||||||
|
### Additional Considerations
|
||||||
|
|
||||||
|
- **Validation:** After adding and testing the benchmark, ensure that the CI pipeline correctly integrates the new benchmark by triggering a CI build.
|
||||||
|
- **Documentation:** Update any relevant documentation to include the new benchmark, explaining its purpose and how to interpret its results.
|
||||||
|
|
||||||
|
By following these steps, you will successfully integrate a new benchmark into the Asternias CI system, enhancing its capability to evaluate platform performance.
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user