mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-08 12:56:48 +00:00
Reorganize the structure of benchmarks
This commit is contained in:
parent
7a13c0dff6
commit
32bfa76703
2
Makefile
2
Makefile
@ -57,7 +57,7 @@ endif
|
||||
|
||||
# If the BENCHMARK is set, we will run the benchmark in the kernel mode.
|
||||
ifneq ($(BENCHMARK), none)
|
||||
CARGO_OSDK_ARGS += --init-args="/benchmark/common/runner.sh $(BENCHMARK)"
|
||||
CARGO_OSDK_ARGS += --init-args="/benchmark/common/bench_runner.sh $(BENCHMARK) asterinas"
|
||||
# TODO: remove this workaround after enabling kernel virtual area.
|
||||
OSTD_TASK_STACK_SIZE_IN_PAGES = 7
|
||||
endif
|
||||
|
@ -1,5 +1,6 @@
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
BINARY_CACHE_DIR := /opt/linux_binary_cache
|
||||
VDSO_DIR := ../target
|
||||
VDSO_LIB := $(VDSO_DIR)/vdso64.so
|
||||
MKFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
|
||||
@ -7,7 +8,6 @@ CUR_DIR := $(patsubst %/,%,$(dir $(MKFILE_PATH)))
|
||||
ATOMIC_WGET := $(CUR_DIR)/../tools/atomic_wget.sh
|
||||
BUILD_DIR := $(CUR_DIR)/build
|
||||
INITRAMFS := $(BUILD_DIR)/initramfs
|
||||
BENCHMARK_ENTRYPOINT := $(CUR_DIR)/benchmark/benchmark_entrypoint.sh
|
||||
INITRAMFS_FILELIST := $(BUILD_DIR)/initramfs.filelist
|
||||
INITRAMFS_IMAGE := $(BUILD_DIR)/initramfs.cpio.gz
|
||||
EXT2_IMAGE := $(BUILD_DIR)/ext2.img
|
||||
@ -61,13 +61,17 @@ $(INITRAMFS)/lib/x86_64-linux-gnu: | $(VDSO_LIB)
|
||||
@# required for VDSO
|
||||
@cp -L $(VDSO_LIB) $@
|
||||
|
||||
$(VDSO_LIB): | $(VDSO_DIR)
|
||||
$(VDSO_LIB): | $(VDSO_DIR) $(BINARY_CACHE_DIR)/vdso64.so
|
||||
@# TODO: use a custom compiled vdso.so file in the future.
|
||||
$(ATOMIC_WGET) $@ "https://raw.githubusercontent.com/asterinas/linux_vdso/2a6d2db/vdso64.so"
|
||||
@cp $(BINARY_CACHE_DIR)/vdso64.so $@
|
||||
|
||||
$(VDSO_DIR):
|
||||
@mkdir -p $@
|
||||
|
||||
$(BINARY_CACHE_DIR)/vdso64.so:
|
||||
@mkdir -p $(BINARY_CACHE_DIR)
|
||||
@$(ATOMIC_WGET) $@ "https://raw.githubusercontent.com/asterinas/linux_vdso/2a6d2db/vdso64.so"
|
||||
|
||||
$(INITRAMFS)/lib64:
|
||||
@mkdir -p $@
|
||||
@cp -L /lib64/ld-linux-x86-64.so.2 $@
|
||||
@ -97,9 +101,6 @@ $(INITRAMFS)/test:
|
||||
|
||||
$(INITRAMFS)/benchmark: | $(INITRAMFS)/benchmark/bin
|
||||
@cp -rf $(CUR_DIR)/benchmark/* $@
|
||||
@if [ -e $(BENCHMARK_ENTRYPOINT) ]; then \
|
||||
cp $(BENCHMARK_ENTRYPOINT) $@; \
|
||||
fi
|
||||
|
||||
$(INITRAMFS)/benchmark/bin:
|
||||
@mkdir -p $@
|
||||
|
@ -72,25 +72,29 @@ To add a new benchmark to the Asternias Continuous Integration (CI) system, foll
|
||||
|
||||
1. **Create the Benchmark Directory:**
|
||||
- Navigate to `asterinas/test/benchmarks`.
|
||||
- Create a new directory named after your benchmark, e.g., `getpid`.
|
||||
- Create a new directory named after your benchmark, e.g., `lmbench/getpid`.
|
||||
|
||||
2. **Create the Necessary Files:**
|
||||
- **config.json:**
|
||||
```json
|
||||
{
|
||||
"alert_threshold": "125%",
|
||||
"alert_tool": "customBiggerIsBetter",
|
||||
"search_pattern": "134.22",
|
||||
"result_index": "2",
|
||||
"description": "The memory bandwidth for copying 128 MB of data on a single processor using the fcp (fast copy) method."
|
||||
}
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "Simple syscall:",
|
||||
"result_index": "3",
|
||||
"description": "lat_syscall null",
|
||||
"title": "[Process] The cost of getpid",
|
||||
"show_in_overview": "false"
|
||||
}
|
||||
```
|
||||
|
||||
- `alert_threshold`: Set the threshold for alerting. If the benchmark result exceeds this threshold, an alert will be triggered. Note that the threshold should usually be greater than 100%.
|
||||
- `alert_threshold`: Set the threshold for alerting. If the benchmark result exceeds this threshold, an alert will be triggered. Note that the threshold should usually be greater than 100%. If your results are not stable, set it to a bigger value.
|
||||
- `alert_tool`: Choose the validation tool to use. The available options are `customBiggerIsBetter` and `customSmallerIsBetter`. Refer to [this](https://github.com/benchmark-action/github-action-benchmark?tab=readme-ov-file#tool-required) for more details. If using `customBiggerIsBetter`, the alert will be triggered when `prev.value / current.value` exceeds the threshold. If using `customSmallerIsBetter`, the alert will be triggered when `current.value / prev.value` exceeds the threshold.
|
||||
- `search_pattern`: Define a regular expression to extract benchmark results from the output using `awk`. This regular expression is designed to match specific patterns in the output, effectively isolating the benchmark results and producing a set of fragments.
|
||||
- `result_index`: Specify the index of the result in the extracted output. This field is aligned with `awk`'s action.
|
||||
- `description`: Provide a brief description of the benchmark.
|
||||
- `title`: Set the title of the benchmark.
|
||||
- `show_in_overview`: Default is true. Set to `false` to avoid displaying the benchmark in the overview results.
|
||||
|
||||
For example, if the benchmark output is "Syscall average latency: 1000 ns", the `search_pattern` is "Syscall average latency:", and the `result_index` is "4". `awk` will extract "1000" as the benchmark result. See the `awk` [manual](https://www.gnu.org/software/gawk/manual/gawk.html#Getting-Started) for more information.
|
||||
|
||||
@ -102,13 +106,13 @@ To add a new benchmark to the Asternias Continuous Integration (CI) system, foll
|
||||
"name": "Average Syscall Latency on Linux",
|
||||
"unit": "ns",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average Syscall Latency on Asterinas",
|
||||
"unit": "ns",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
||||
```
|
||||
@ -118,7 +122,7 @@ To add a new benchmark to the Asternias Continuous Integration (CI) system, foll
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
/benchmark/bin/getpid
|
||||
/benchmark/bin/lmbench/lat_syscall -P 1 null
|
||||
```
|
||||
- This script runs the benchmark. Ensure the path to the benchmark binary is correct. `asterinas/test/Makefile` handles the benchmark binaries.
|
||||
|
||||
@ -130,7 +134,7 @@ To add a new benchmark to the Asternias Continuous Integration (CI) system, foll
|
||||
```yaml
|
||||
strategy:
|
||||
matrix:
|
||||
benchmark: [getpid]
|
||||
benchmark: [lmbench/getpid]
|
||||
fail-fast: false
|
||||
```
|
||||
|
||||
@ -140,7 +144,7 @@ To add a new benchmark to the Asternias Continuous Integration (CI) system, foll
|
||||
- Execute the following command to test the benchmark locally:
|
||||
```bash
|
||||
cd asterinas
|
||||
bash test/benchmark/bench_linux_aster.sh getpid
|
||||
bash test/benchmark/bench_linux_and_aster.sh lmbench/getpid
|
||||
```
|
||||
- Ensure the benchmark runs successfully and check the results in `asterinas/result_getpid.json`.
|
||||
|
||||
|
@ -10,32 +10,64 @@ command -v jq >/dev/null 2>&1 || { echo >&2 "jq is not installed. Aborting."; ex
|
||||
|
||||
# Script directory
|
||||
BENCHMARK_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)"
|
||||
# Kernel image
|
||||
KERNEL_DIR="/root/dependency"
|
||||
LINUX_KERNEL="${KERNEL_DIR}/vmlinuz"
|
||||
KERNEL_VERSION="5.15.0-105-generic"
|
||||
MODULES_DIR="${BENCHMARK_DIR}/../build/initramfs/lib/modules/${KERNEL_VERSION}/kernel"
|
||||
# Dependencies for Linux
|
||||
LINUX_DEPENDENCIES_DIR="/opt/linux_binary_cache"
|
||||
LINUX_KERNEL="${LINUX_DEPENDENCIES_DIR}/vmlinuz"
|
||||
LINUX_KERNEL_VERSION="5.15.0-105-generic"
|
||||
LINUX_MODULES_DIR="${BENCHMARK_DIR}/../build/initramfs/lib/modules/${LINUX_KERNEL_VERSION}/kernel"
|
||||
# Atomic wget script
|
||||
WGET_SCRIPT="${BENCHMARK_DIR}/../../tools/atomic_wget.sh"
|
||||
|
||||
# Generate entrypoint script for Linux cases
|
||||
generate_entrypoint_script() {
|
||||
# Prepare Linux kernel and modules
|
||||
prepare_libs() {
|
||||
# Download the Linux kernel and modules
|
||||
mkdir -p "${LINUX_DEPENDENCIES_DIR}"
|
||||
|
||||
if [ ! -f "${LINUX_KERNEL}" ]; then
|
||||
echo "Downloading the Linux kernel image..."
|
||||
${WGET_SCRIPT} "${LINUX_KERNEL}" "https://raw.githubusercontent.com/asterinas/linux_binary_cache/8a5b6fd/vmlinuz-${LINUX_KERNEL_VERSION}" || {
|
||||
echo "Failed to download the Linux kernel image."
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
if [ ! -f "${LINUX_DEPENDENCIES_DIR}/virtio_blk.ko" ]; then
|
||||
echo "Downloading the virtio_blk kernel module..."
|
||||
${WGET_SCRIPT} "${LINUX_DEPENDENCIES_DIR}/virtio_blk.ko" "https://raw.githubusercontent.com/asterinas/linux_binary_cache/8a5b6fd/kernel/drivers/block/virtio_blk.ko" || {
|
||||
echo "Failed to download the Linux kernel module."
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
# Copy the kernel modules to the initramfs directory
|
||||
if [ ! -f "${LINUX_MODULES_DIR}/drivers/block/virtio_blk.ko" ]; then
|
||||
mkdir -p "${LINUX_MODULES_DIR}/drivers/block"
|
||||
cp ${LINUX_DEPENDENCIES_DIR}/virtio_blk.ko "${LINUX_MODULES_DIR}/drivers/block/virtio_blk.ko"
|
||||
fi
|
||||
}
|
||||
|
||||
# Parse the results from the benchmark output
|
||||
parse_results() {
|
||||
local benchmark="$1"
|
||||
local init_script=$(cat <<EOF
|
||||
#!/bin/sh
|
||||
mount -t devtmpfs devtmpfs /dev
|
||||
ip link set lo up
|
||||
modprobe virtio_blk
|
||||
mount -t ext2 /dev/vda /ext2
|
||||
local search_pattern="$2"
|
||||
local result_index="$3"
|
||||
local linux_output="$4"
|
||||
local aster_output="$5"
|
||||
local result_template="$6"
|
||||
local result_file="$7"
|
||||
|
||||
echo "Running ${benchmark}"
|
||||
chmod +x /benchmark/${benchmark}/run.sh
|
||||
/benchmark/${benchmark}/run.sh
|
||||
local linux_result aster_result
|
||||
linux_result=$(awk "/${search_pattern}/ {result=\$$result_index} END {print result}" "${linux_output}" | tr -d '\r')
|
||||
aster_result=$(awk "/${search_pattern}/ {result=\$$result_index} END {print result}" "${aster_output}" | tr -d '\r')
|
||||
|
||||
if [ -z "${linux_result}" ] || [ -z "${aster_result}" ]; then
|
||||
echo "Error: Failed to parse the results from the benchmark output" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
poweroff -f
|
||||
EOF
|
||||
)
|
||||
echo "$init_script"
|
||||
echo "Updating the result template with extracted values..."
|
||||
jq --arg linux_result "${linux_result}" --arg aster_result "${aster_result}" \
|
||||
'(.[] | select(.extra == "linux_result") | .value) |= $linux_result |
|
||||
(.[] | select(.extra == "aster_result") | .value) |= $aster_result' \
|
||||
"${result_template}" > "${result_file}"
|
||||
}
|
||||
|
||||
# Run the benchmark on Linux and Asterinas
|
||||
@ -47,14 +79,14 @@ run_benchmark() {
|
||||
local linux_output="${BENCHMARK_DIR}/linux_output.txt"
|
||||
local aster_output="${BENCHMARK_DIR}/aster_output.txt"
|
||||
local result_template="${BENCHMARK_DIR}/${benchmark}/result_template.json"
|
||||
local result_file="result_${benchmark}.json"
|
||||
local benchmark_name=$(basename "${benchmark}")
|
||||
local result_file="result_${benchmark_name}.json"
|
||||
|
||||
echo "Preparing libraries..."
|
||||
prepare_libs
|
||||
|
||||
# Entrypoint script for initramfs
|
||||
local initramfs_entrypoint_script="${BENCHMARK_DIR}/benchmark_entrypoint.sh"
|
||||
generate_entrypoint_script "${benchmark}" > "${initramfs_entrypoint_script}"
|
||||
chmod +x "${initramfs_entrypoint_script}"
|
||||
|
||||
local qemu_cmd="/usr/local/qemu/bin/qemu-system-x86_64 \
|
||||
local asterinas_cmd="make run BENCHMARK=${benchmark} ENABLE_KVM=1 RELEASE_LTO=1 2>&1 | tee ${aster_output}"
|
||||
local linux_cmd="/usr/local/qemu/bin/qemu-system-x86_64 \
|
||||
--no-reboot \
|
||||
-smp 1 \
|
||||
-m 8G \
|
||||
@ -65,56 +97,27 @@ run_benchmark() {
|
||||
-initrd ${BENCHMARK_DIR}/../build/initramfs.cpio.gz \
|
||||
-drive if=none,format=raw,id=x0,file=${BENCHMARK_DIR}/../build/ext2.img \
|
||||
-device virtio-blk-pci,bus=pcie.0,addr=0x6,drive=x0,serial=vext2,disable-legacy=on,disable-modern=off,queue-size=64,num-queues=1,config-wce=off,request-merging=off,write-cache=off,backend_defaults=off,discard=off,event_idx=off,indirect_desc=off,ioeventfd=off,queue_reset=off \
|
||||
-append 'console=ttyS0 rdinit=/benchmark/benchmark_entrypoint.sh mitigations=off hugepages=0 transparent_hugepage=never' \
|
||||
-append 'console=ttyS0 rdinit=/benchmark/common/bench_runner.sh ${benchmark} linux mitigations=off hugepages=0 transparent_hugepage=never' \
|
||||
-nographic \
|
||||
2>&1 | tee ${linux_output}"
|
||||
|
||||
if [ ! -f "${LINUX_KERNEL}" ]; then
|
||||
echo "Downloading the Linux kernel image..."
|
||||
mkdir -p "${KERNEL_DIR}"
|
||||
${WGET_SCRIPT} "${LINUX_KERNEL}" "https://raw.githubusercontent.com/asterinas/linux_kernel/9e66d28/vmlinuz-${KERNEL_VERSION}"
|
||||
fi
|
||||
if [ ! -f "${MODULES_DIR}" ]; then
|
||||
echo "Downloading additional kernel modules..."
|
||||
mkdir -p "${MODULES_DIR}/drivers/block"
|
||||
${WGET_SCRIPT} "${MODULES_DIR}/drivers/block/virtio_blk.ko" "https://raw.githubusercontent.com/asterinas/linux_kernel/f938bde/modules/virtio_blk.ko"
|
||||
fi
|
||||
2>&1 | tee ${linux_output}"
|
||||
|
||||
echo "Running benchmark ${benchmark} on Asterinas..."
|
||||
make run BENCHMARK=${benchmark} ENABLE_KVM=1 RELEASE_LTO=1 2>&1 | tee "${aster_output}"
|
||||
|
||||
eval "$asterinas_cmd"
|
||||
echo "Running benchmark ${benchmark} on Linux..."
|
||||
# Disable unsupported ext2 features of Asterinas on Linux to ensure fairness
|
||||
mke2fs -F -O ^ext_attr -O ^resize_inode -O ^dir_index ${BENCHMARK_DIR}/../build/ext2.img
|
||||
make initramfs
|
||||
eval "$qemu_cmd"
|
||||
eval "$linux_cmd"
|
||||
|
||||
echo "Parsing results..."
|
||||
local linux_avg aster_avg
|
||||
linux_avg=$(awk "/${search_pattern}/ {result=\$$result_index} END {print result}" "${linux_output}" | tr -d '\r')
|
||||
aster_avg=$(awk "/${search_pattern}/ {result=\$$result_index} END {print result}" "${aster_output}" | tr -d '\r')
|
||||
|
||||
if [ -z "${linux_avg}" ] || [ -z "${aster_avg}" ]; then
|
||||
echo "Error: Failed to parse the average value from the benchmark output" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Updating the result template with average values..."
|
||||
jq --arg linux_avg "${linux_avg}" --arg aster_avg "${aster_avg}" \
|
||||
'(.[] | select(.extra == "linux_avg") | .value) |= $linux_avg |
|
||||
(.[] | select(.extra == "aster_avg") | .value) |= $aster_avg' \
|
||||
"${result_template}" > "${result_file}"
|
||||
parse_results "$benchmark" "$search_pattern" "$result_index" "$linux_output" "$aster_output" "$result_template" "$result_file"
|
||||
|
||||
echo "Cleaning up..."
|
||||
rm -f "${initramfs_entrypoint_script}"
|
||||
rm -f "${linux_output}"
|
||||
rm -f "${aster_output}"
|
||||
}
|
||||
|
||||
|
||||
# Main
|
||||
|
||||
BENCHMARK="$1"
|
||||
|
||||
echo "Running benchmark ${BENCHMARK}..."
|
||||
pwd
|
||||
if [ ! -d "$BENCHMARK_DIR/$BENCHMARK" ]; then
|
||||
|
77
test/benchmark/common/bench_runner.sh
Executable file
77
test/benchmark/common/bench_runner.sh
Executable file
@ -0,0 +1,77 @@
|
||||
#!/bin/sh
|
||||
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
# Entrypoint for the benchmark VM
|
||||
|
||||
set -e
|
||||
|
||||
BENCHMARK_DIR="/benchmark"
|
||||
|
||||
BENCH_NAME=$1
|
||||
SYSTEM=$2
|
||||
|
||||
print_help() {
|
||||
echo "Usage: $0 <benchmark_name> <system_type>"
|
||||
echo " benchmark_name: The name of the benchmark to run."
|
||||
echo " system_type: The type of system to run the benchmark on. 'linux' or 'asterinas'."
|
||||
}
|
||||
|
||||
# Validate arguments
|
||||
check_bench_name() {
|
||||
if [ -z "${BENCH_NAME}" ] || [ -z "${SYSTEM}" ]; then
|
||||
echo "Error: Invalid arguments."
|
||||
print_help
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local full_path="${BENCHMARK_DIR}/${BENCH_NAME}"
|
||||
|
||||
if ! [ -d "${full_path}" ]; then
|
||||
echo "Directory '${BENCH_NAME}' does not exist in the benchmark directory."
|
||||
print_help
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
prepare_system() {
|
||||
if [ ! -d /tmp ]; then
|
||||
mkdir /tmp
|
||||
fi
|
||||
|
||||
/sbin/ldconfig
|
||||
|
||||
# System-specific preparation
|
||||
if [ "$SYSTEM" = "linux" ]; then
|
||||
mount -t devtmpfs devtmpfs /dev
|
||||
ip link set lo up
|
||||
modprobe virtio_blk
|
||||
mkfs.ext2 -F /dev/vda
|
||||
mount -t ext2 /dev/vda /ext2
|
||||
elif [ "$SYSTEM" = "asterinas" ]; then
|
||||
# Asterinas-specific preparation (if any)
|
||||
:
|
||||
else
|
||||
echo "Error: Unknown system type. Please set SYSTEM to 'linux' or 'asterinas'."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
# Check if the benchmark name is valid
|
||||
check_bench_name
|
||||
|
||||
# Prepare the system
|
||||
prepare_system
|
||||
|
||||
# Run the benchmark
|
||||
BENCH_SCRIPT=${BENCHMARK_DIR}/${BENCH_NAME}/run.sh
|
||||
chmod +x ${BENCH_SCRIPT}
|
||||
${BENCH_SCRIPT}
|
||||
|
||||
# Shutdown explicitly if running on Linux
|
||||
if [ "$SYSTEM" = "linux" ]; then
|
||||
poweroff -f
|
||||
fi
|
||||
}
|
||||
|
||||
main "$@"
|
@ -1,45 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
set -e
|
||||
|
||||
print_help() {
|
||||
echo "Usage: $0 bench_name"
|
||||
echo ""
|
||||
echo "The bench_name argument must be one of the directory under asterinas/test/benchmark/".
|
||||
}
|
||||
|
||||
BENCH_NAME=$1
|
||||
SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd)
|
||||
|
||||
# Validate arguments
|
||||
check_bench_name() {
|
||||
if [ -z "${BENCH_NAME}" ]; then
|
||||
echo "Error: No directory provided."
|
||||
print_help
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local full_path="${SCRIPT_DIR}/../${BENCH_NAME}"
|
||||
|
||||
if ! [ -d "${full_path}" ]; then
|
||||
echo "Directory '${BENCH_NAME}' does not exist in the script directory."
|
||||
print_help
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
check_bench_name
|
||||
|
||||
BENCH_SCRIPT=${SCRIPT_DIR}/../${BENCH_NAME}/run.sh
|
||||
|
||||
# Prepare the environment
|
||||
if [ ! -d /tmp ]; then
|
||||
mkdir /tmp
|
||||
fi
|
||||
/sbin/ldconfig
|
||||
chmod +x ${BENCH_SCRIPT}
|
||||
|
||||
# Run the benchmark
|
||||
${BENCH_SCRIPT}
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customBiggerIsBetter",
|
||||
"search_pattern": "lmdd result:",
|
||||
"result_index": "8",
|
||||
"description": "The bandwidth of file copy."
|
||||
}
|
||||
"description": "lmdd",
|
||||
"title": "[EXT2] The bandwidth of copying data between files"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average file copy bandwidth on Linux",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average file copy bandwidth on Asterinas",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "Fifo latency",
|
||||
"result_index": "3",
|
||||
"description": "The latency of fifo on a single processor."
|
||||
}
|
||||
"description": "lat_fifo",
|
||||
"title": "[FIFO] The cost of write+read (1B)"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average fifo latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average fifo latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customBiggerIsBetter",
|
||||
"search_pattern": "536.87",
|
||||
"result_index": "2",
|
||||
"description": "The bandwidth of reading a file on a single processor."
|
||||
"description": "bw_mem fcp",
|
||||
"title": "[Memory] The bandwidth of copying integers"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average memory copy bandwidth on Linux",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average memory copy bandwidth on Asterinas",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customBiggerIsBetter",
|
||||
"search_pattern": "268.44",
|
||||
"result_index": "2",
|
||||
"description": "The bandwidth of mmap on a single processor."
|
||||
}
|
||||
"description": "bw_mmap",
|
||||
"title": "[Memory] The bandwidth of mmap"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average mmap bandwidth on Linux",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average mmap bandwidth on Asterinas",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "4.194304",
|
||||
"result_index": "2",
|
||||
"description": "The latency of mmap on a single processor."
|
||||
}
|
||||
"description": "lat_mmap",
|
||||
"title": "[Memory] The cost of mmap+unmap"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average mmap latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average mmap latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "Pagefaults on ",
|
||||
"result_index": "4",
|
||||
"description": "The latency of handling page fault on a single processor."
|
||||
}
|
||||
"description": "lat_pagefault",
|
||||
"title": "[Memory] The cost of page fault handling"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average page fault latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average page fault latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customBiggerIsBetter",
|
||||
"search_pattern": "536.87",
|
||||
"result_index": "2",
|
||||
"description": "The memory bandwidth for reading 512 MB of data on a single processor."
|
||||
}
|
||||
"description": "bw_mem frd",
|
||||
"title": "[Memory] The bandwidth of reading integers"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average memory read bandwidth on Linux",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average memory read bandwidth on Asterinas",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customBiggerIsBetter",
|
||||
"search_pattern": "536.87",
|
||||
"result_index": "2",
|
||||
"description": "The memory bandwidth for copying 512 MB of data on a single processor using the fcp (fast copy) method."
|
||||
}
|
||||
"description": "bw_mem fwr",
|
||||
"title": "[Memory] The bandwidth of writing integers"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average memory write bandwidth on Linux",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average memory write bandwidth on Asterinas",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customBiggerIsBetter",
|
||||
"search_pattern": "Pipe bandwidth",
|
||||
"result_index": "3",
|
||||
"description": "The bandwidth of pipe on a single processor."
|
||||
}
|
||||
"description": "bw_pipe",
|
||||
"title": "[Pipes] The bandwidth"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average pipe bandwidth on Linux",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average pipe bandwidth on Asterinas",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "Pipe latency",
|
||||
"result_index": "3",
|
||||
"description": "The latency of pipe on a single processor."
|
||||
}
|
||||
"description": "lat_pipe",
|
||||
"title": "[Pipes] The cost of write+read (1B)"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average pipe latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average pipe latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "18 ",
|
||||
"result_index": "2",
|
||||
"description": "The latency of context switching between 18 contexts on a single processor."
|
||||
}
|
||||
"description": "lat_ctx 2",
|
||||
"title": "[Process] The cost of context switching"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average context switch latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average context switch latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "Process fork\\+execve",
|
||||
"result_index": "3",
|
||||
"description": "The latency of creating and executing processes on a single processor."
|
||||
}
|
||||
"description": "lat_proc exec",
|
||||
"title": "[Process] The cost of fork+exec+exit"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average exec latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average exec latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "Process fork",
|
||||
"result_index": "3",
|
||||
"description": "The latency of the fork system call on a single processor."
|
||||
}
|
||||
"description": "lat_proc fork",
|
||||
"title": "[Process] The cost of fork+exit"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average Fork latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average Fork latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "Simple syscall:",
|
||||
"result_index": "3",
|
||||
"description": "The latency of the getpid system call on a single processor."
|
||||
}
|
||||
"description": "lat_syscall null",
|
||||
"title": "[Process] The cost of getppid"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average syscall latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average syscall latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "Process fork\\+\\/bin\\/sh",
|
||||
"result_index": "4",
|
||||
"description": "The latency of creating and executing a shell process on a single processor."
|
||||
}
|
||||
"description": "lat_proc shell",
|
||||
"title": "[Process] The cost of fork+exec+shell+exit"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average shell latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average shell latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customBiggerIsBetter",
|
||||
"search_pattern": "^0k",
|
||||
"result_index": "2",
|
||||
"description": "The number of 0k-sized files created then deleted over a duration."
|
||||
}
|
||||
"description": "lat_fs -s 0k",
|
||||
"title": "[Ramfs] The cost of creating/deleting small files (0KB)"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Number of created/deleted files on Linux",
|
||||
"unit": "number",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Number of created/deleted files on Asterinas",
|
||||
"unit": "number",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customBiggerIsBetter",
|
||||
"search_pattern": "10k",
|
||||
"result_index": "2",
|
||||
"description": "The number of 10k-sized files created then deleted over a duration."
|
||||
}
|
||||
"description": "lat_fs -s 10K",
|
||||
"title": "[Ramfs] The cost of creating/deleting small files (10KB)"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Number of created/deleted files on Linux",
|
||||
"unit": "number",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Number of created/deleted files on Asterinas",
|
||||
"unit": "number",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "Semaphore latency:",
|
||||
"result_index": "3",
|
||||
"description": "The latency of semaphore on a single processor."
|
||||
}
|
||||
"description": "lat_sem",
|
||||
"title": "[Semaphores] The cost of semop"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average semaphore latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average semaphore latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "Signal handler overhead:",
|
||||
"result_index": "4",
|
||||
"description": "The latency of signal handling on a single processor."
|
||||
}
|
||||
"description": "lat_sig catch",
|
||||
"title": "[Signals] The cost of catching a signal"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average Signal handler overhead on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average Signal handler overhead on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "Signal handler installation:",
|
||||
"result_index": "4",
|
||||
"description": "The latency of signal handling on a single processor."
|
||||
}
|
||||
"description": "lat_sig install",
|
||||
"title": "[Signals] The cost of installing a signal handler"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average Signal handler install latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average Signal handler install latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "Protection fault:",
|
||||
"result_index": "3",
|
||||
"description": "The latency to catch a protection fault on a single processor."
|
||||
}
|
||||
"description": "lat_sig prot",
|
||||
"title": "[Signals] The cost of catching a segfault"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average protection fault latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average protection fault latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customBiggerIsBetter",
|
||||
"search_pattern": "0.004096 ",
|
||||
"result_index": "2",
|
||||
"description": "The bandwidth of TCP with 4096 message size in localhost."
|
||||
}
|
||||
"description": "bw_tcp -l",
|
||||
"title": "[TCP sockets] The bandwidth (localhost)"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average TCP bandwidth on Linux",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average TCP bandwidth on Asterinas",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "TCP\\/IP connection cost to 127.0.0.1:",
|
||||
"result_index": "6",
|
||||
"description": "The latency of TCP connection on localhost."
|
||||
}
|
||||
"description": "lat_connect",
|
||||
"title": "[TCP sockets] The latency of connect"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average TCP connection latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average TCP connection latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customBiggerIsBetter",
|
||||
"search_pattern": "Avg xfer: ",
|
||||
"result_index": "8",
|
||||
"description": "The bandwidth of simple HTTP transaction with 64MB file."
|
||||
}
|
||||
"description": "bw_http",
|
||||
"title": "[HTTP] The bandwidth"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average simple HTTP transaction bandwidth on Linux",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average simple HTTP transaction bandwidth on Asterinas",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "TCP latency using 127.0.0.1:",
|
||||
"result_index": "5",
|
||||
"description": "The latency of TCP on localhost."
|
||||
}
|
||||
"description": "lat_tcp",
|
||||
"title": "[TCP sockets] The latency of write+read"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average TCP latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average TCP latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "Select on 200 tcp fd's:",
|
||||
"result_index": "6",
|
||||
"description": "The latency of select TCP on a single processor."
|
||||
}
|
||||
"description": "lat_select",
|
||||
"title": "[Network] The cost of select (TCP fds)"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average select TCP latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average select TCP latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "UDP latency using 127.0.0.1:",
|
||||
"result_index": "5",
|
||||
"description": "The latency of UDP on localhost."
|
||||
}
|
||||
"description": "lat_udp",
|
||||
"title": "[UDP sockets] The latency of write+read"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average UDP latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average UDP latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customBiggerIsBetter",
|
||||
"search_pattern": "sock stream bandwidth",
|
||||
"result_index": "5",
|
||||
"description": "The bandwidth of UNIX domain socket communication on a single processor."
|
||||
"description": "bw_unix",
|
||||
"title": "[Unix sockets] The bandwidth"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average unix bandwidth on Linux",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average unix bandwidth on Asterinas",
|
||||
"unit": "MB/s",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "UNIX connection cost:",
|
||||
"result_index": "4",
|
||||
"description": "The latency of UNIX domain socket connection on a single processor."
|
||||
"description": "lat_connect",
|
||||
"title": "[Unix sockets] The latency of connect"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average unix connect latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average unix connect latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "sock stream latency",
|
||||
"result_index": "5",
|
||||
"description": "The latency of UNIX domain socket communication on a single processor."
|
||||
"description": "lat_unix",
|
||||
"title": "[Unix sockets] The latency of write+read"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average unix latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average unix latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "Fcntl lock latency:",
|
||||
"result_index": "4",
|
||||
"description": "The latency of file locking on a single processor."
|
||||
}
|
||||
"description": "lat_fcntl",
|
||||
"title": "[VFS] The cost of record locking/unlocking via fcntl"
|
||||
}
|
@ -3,12 +3,12 @@
|
||||
"name": "Average file locking latency on Linux",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "linux_avg"
|
||||
"extra": "linux_result"
|
||||
},
|
||||
{
|
||||
"name": "Average file locking latency on Asterinas",
|
||||
"unit": "µs",
|
||||
"value": 0,
|
||||
"extra": "aster_avg"
|
||||
"extra": "aster_result"
|
||||
}
|
||||
]
|
@ -3,5 +3,6 @@
|
||||
"alert_tool": "customSmallerIsBetter",
|
||||
"search_pattern": "Simple fstat",
|
||||
"result_index": "3",
|
||||
"description": "The latency of the fstat system call on a single processor."
|
||||
}
|
||||
"description": "lat_syscall fstat",
|
||||
"title": "[VFS] The cost of fstat"
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user