Reconstruct benchmark workflow to support multiple architectures

This commit is contained in:
Fabian LI 2025-04-10 10:55:33 +08:00 committed by Tate, Hongliang Tian
parent 6aba270a9b
commit 5e4b612b46
4 changed files with 350 additions and 134 deletions

147
.github/actions/benchmark/action.yml vendored Normal file
View File

@ -0,0 +1,147 @@
name: 'Benchmark'
description: 'Run benchmarks for Asterinas'
inputs:
task:
description: 'Task to run (benchmark, result)'
required: true
platform:
description: 'Platform to benchmark (x86-64, tdx)'
required: true
benchmark:
description: 'The benchmark to run'
required: false
benchmark-secret:
description: 'Secret token for benchmark action data submission'
required: false
runs:
using: 'composite'
steps:
- name: Set up the environment
shell: bash
run: |
# If the task is 'benchmark', set up the environment for benchmarking
# If the task is 'result', set up the environment for result processing
if [[ "${{ inputs.task }}" == "benchmark" ]]; then
echo "Setting up environment for benchmarking..."
git config --global --add safe.directory /__w/asterinas/asterinas
git config --global http.sslVerify false
git config --global http.version HTTP/1.1
elif [[ "${{ inputs.task }}" == "result" ]]; then
echo "Setting up environment for result processing..."
sudo apt-get update && sudo apt-get install -y yq jq
else
echo "Unknown task: ${{ inputs.task }}"
exit 1
fi
- name: Run benchmarks
if: ${{ inputs.task == 'benchmark' }}
shell: bash
run: |
make install_osdk
bash test/benchmark/bench_linux_and_aster.sh "${{ matrix.benchmarks }}" "${{ inputs.platform }}"
BENCHMARK_ARTIFACT=results_$(echo "${{ matrix.benchmarks }}" | tr '/' '-')
echo "BENCHMARK_ARTIFACT=$BENCHMARK_ARTIFACT" >> $GITHUB_ENV
- name: Store benchmark results
if: ${{ inputs.task == 'benchmark' }}
uses: actions/upload-artifact@v4
with:
name: ${{ env.BENCHMARK_ARTIFACT }}
if-no-files-found: error # Fail the benchmark job if no file is found.
path: |
result_*.json
- name: Download Benchmark Results
if: ${{ inputs.task == 'result' }}
uses: actions/download-artifact@v4
with:
pattern: results_*
path: ./results
merge-multiple: true
- name: Generate all benchmark config files
if: ${{ inputs.task == 'result' }}
shell: bash
run: |
mkdir -p configs
BENCHMARK_LIST=$(ls results/result_*.json | sed 's/.*result_//' | sed 's/\.json//' | jq -R -s -c 'split("\n")[:-1]')
echo "Processing benchmarks: $BENCHMARK_LIST"
# Loop through the benchmark identifiers provided by the Matrix job
for benchmark_id in $(echo "$BENCHMARK_LIST" | jq -r '.[]'); do
echo "--- Processing $benchmark_id ---"
BENCHMARK_DIR=$(echo "$benchmark_id" | sed 's/-/\//g')
BENCHMARK_SUITE=$(echo "$BENCHMARK_DIR" | awk -F'/' '{print $1}')
BENCHMARK_NAME=$(echo "$BENCHMARK_DIR" | sed -E 's|^[^/]+/||; s|/bench_results||g; s|/|_|g')
BENCH_RESULT_YAML="test/benchmark/${BENCHMARK_DIR}/bench_result.yaml"
[ -f "$BENCH_RESULT_YAML" ] || BENCH_RESULT_YAML="test/benchmark/${BENCHMARK_DIR}.yaml"
if [ ! -f "$BENCH_RESULT_YAML" ]; then
echo "Warning: YAML file not found for $benchmark_id at $BENCH_RESULT_YAML. Skipping config generation."
continue
fi
# Extract data using yq
ALERT_THRESHOLD=$(yq -r '.alert.threshold // "130%"' "$BENCH_RESULT_YAML")
ALERT_TOOL=$(yq -r 'if (.alert.bigger_is_better == true) then "customBiggerIsBetter" else "customSmallerIsBetter" end' "$BENCH_RESULT_YAML")
TITLE=$(yq -r '.chart.title // "Undefined"' "$BENCH_RESULT_YAML")
DESCRIPTION=$(yq -r '.chart.description // "No description provided"' "$BENCH_RESULT_YAML")
# Generate summary JSON if needed (only once per suite)
SUMMARY_JSON="test/benchmark/$BENCHMARK_SUITE/summary.json"
if [ ! -f "$SUMMARY_JSON" ]; then
SUMMARY_YAML="test/benchmark/$BENCHMARK_SUITE/summary.yaml"
if [ -f "$SUMMARY_YAML" ]; then
yq . "$SUMMARY_YAML" > "$SUMMARY_JSON"
echo "Generated $SUMMARY_JSON"
else
echo "Warning: summary.yaml not found for suite $BENCHMARK_SUITE"
fi
fi
# Define file paths
CONFIG_FILE="configs/config_${benchmark_id}.json"
RESULT_FILE="results/result_${benchmark_id}.json"
# Create JSON structure using jq
jq -n \
--arg title "$TITLE" \
--arg description "$DESCRIPTION" \
--arg suite "${{ inputs.platform }}/$BENCHMARK_SUITE" \
--arg name "$BENCHMARK_NAME" \
--arg threshold "$ALERT_THRESHOLD" \
--arg tool "$ALERT_TOOL" \
--arg result_path "$RESULT_FILE" \
--arg summary_path "$SUMMARY_JSON" \
'{
metadata: {
title: $title,
description: $description,
suite: $suite,
name: $name,
threshold: $threshold,
tool: $tool,
summary: $summary_path
},
result: $result_path
}' > "$CONFIG_FILE"
echo "Generated config file $CONFIG_FILE"
done
- name: Store benchmark results
if: ${{ inputs.task == 'result' }}
uses: asterinas/github-action-benchmark@v5
with:
# Use glob pattern to find all generated config files
output-file-path: "configs/config_*.json"
github-token: ${{ inputs.benchmark-secret }}
gh-repository: 'github.com/asterinas/benchmark'
auto-push: true
comment-on-alert: true
fail-on-alert: false
max-items-in-chart: 60
ref: ${{ github.sha }}

View File

@ -112,28 +112,12 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Set up the environment
run: |
chmod +x test/benchmark/bench_linux_and_aster.sh
# Set up git due to the network issue on the self-hosted runner
git config --global --add safe.directory /__w/asterinas/asterinas
git config --global http.sslVerify false
git config --global http.version HTTP/1.1
- name: Run benchmarks
run: |
make install_osdk
bash test/benchmark/bench_linux_and_aster.sh "${{ matrix.benchmarks }}"
BENCHMARK_ARTIFACT=results_$(echo "${{ matrix.benchmarks }}" | tr '/' '-')
echo "BENCHMARK_ARTIFACT=$BENCHMARK_ARTIFACT" >> $GITHUB_ENV
- name: Store benchmark results
uses: actions/upload-artifact@v4
uses: ./.github/actions/benchmark
with:
name: ${{ env.BENCHMARK_ARTIFACT }}
if-no-files-found: error # Fail the benchmark job if no file is found.
path: |
result_*.json
task: benchmark
platform: x86-64
benchmark: ${{ matrix.benchmarks }}
Results:
runs-on: ubuntu-latest
@ -142,95 +126,10 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Download Benchmark Results
uses: actions/download-artifact@v4
with:
pattern: results_*
path: ./results
merge-multiple: true
- name: Set up the environment
run: |
sudo apt-get update && sudo apt-get install -y yq jq
- name: Generate all benchmark config files
run: |
mkdir -p configs
BENCHMARK_LIST=$(ls results/result_*.json | sed 's/.*result_//' | sed 's/\.json//' | jq -R -s -c 'split("\n")[:-1]')
echo "Processing benchmarks: $BENCHMARK_LIST"
# Loop through the benchmark identifiers provided by the Matrix job
for benchmark_id in $(echo "$BENCHMARK_LIST" | jq -r '.[]'); do
echo "--- Processing $benchmark_id ---"
BENCHMARK_DIR=$(echo "$benchmark_id" | sed 's/-/\//g')
BENCHMARK_SUITE=$(echo "$BENCHMARK_DIR" | awk -F'/' '{print $1}')
BENCHMARK_NAME=$(echo "$BENCHMARK_DIR" | sed -E 's|^[^/]+/||; s|/bench_results||g; s|/|_|g')
BENCH_RESULT_YAML="test/benchmark/${BENCHMARK_DIR}/bench_result.yaml"
[ -f "$BENCH_RESULT_YAML" ] || BENCH_RESULT_YAML="test/benchmark/${BENCHMARK_DIR}.yaml"
if [ ! -f "$BENCH_RESULT_YAML" ]; then
echo "Warning: YAML file not found for $benchmark_id at $BENCH_RESULT_YAML. Skipping config generation."
continue
fi
# Extract data using yq
ALERT_THRESHOLD=$(yq -r '.alert.threshold // "130%"' "$BENCH_RESULT_YAML")
ALERT_TOOL=$(yq -r 'if (.alert.bigger_is_better == true) then "customBiggerIsBetter" else "customSmallerIsBetter" end' "$BENCH_RESULT_YAML")
TITLE=$(yq -r '.chart.title // "Undefined"' "$BENCH_RESULT_YAML")
DESCRIPTION=$(yq -r '.chart.description // "No description provided"' "$BENCH_RESULT_YAML")
# Generate summary JSON if needed (only once per suite)
SUMMARY_JSON="test/benchmark/$BENCHMARK_SUITE/summary.json"
if [ ! -f "$SUMMARY_JSON" ]; then
SUMMARY_YAML="test/benchmark/$BENCHMARK_SUITE/summary.yaml"
if [ -f "$SUMMARY_YAML" ]; then
yq . "$SUMMARY_YAML" > "$SUMMARY_JSON"
echo "Generated $SUMMARY_JSON"
else
echo "Warning: summary.yaml not found for suite $BENCHMARK_SUITE"
fi
fi
# Define file paths
CONFIG_FILE="configs/config_${benchmark_id}.json"
RESULT_FILE="results/result_${benchmark_id}.json"
# Create JSON structure using jq
jq -n \
--arg title "$TITLE" \
--arg description "$DESCRIPTION" \
--arg suite "$BENCHMARK_SUITE" \
--arg name "$BENCHMARK_NAME" \
--arg threshold "$ALERT_THRESHOLD" \
--arg tool "$ALERT_TOOL" \
--arg result_path "$RESULT_FILE" \
--arg summary_path "$SUMMARY_JSON" \
'{
metadata: {
title: $title,
description: $description,
suite: $suite,
name: $name,
threshold: $threshold,
tool: $tool,
summary: $summary_path
},
result: $result_path
}' > "$CONFIG_FILE"
echo "Generated config file $CONFIG_FILE"
done
- name: Store benchmark results
uses: asterinas/github-action-benchmark@v5
uses: ./.github/actions/benchmark
with:
# Use glob pattern to find all generated config files
output-file-path: "configs/config_*.json"
github-token: ${{ secrets.BENCHMARK_SECRET }}
gh-repository: 'github.com/asterinas/benchmark'
auto-push: true
comment-on-alert: true
fail-on-alert: false
max-items-in-chart: 60
ref: ${{ github.sha }}
task: result
platform: x86-64
benchmark-secret: ${{ secrets.BENCHMARK_SECRET }}

136
.github/workflows/benchmark_x86_tdx.yml vendored Normal file
View File

@ -0,0 +1,136 @@
name: "Benchmark Intel TDX "
on:
# In case of manual trigger, use workflow_dispatch
workflow_dispatch:
schedule:
# Schedule to run on every day at 20:00 UTC (04:00 Beijing Time)
- cron: '0 18 * * *'
jobs:
Benchmarks:
runs-on: self-hosted
strategy:
matrix:
benchmarks:
- sysbench/cpu_lat
- sysbench/thread_lat
# Memory-related benchmarks
- lmbench/mem_read_bw
- lmbench/mem_write_bw
- lmbench/mem_copy_bw
- lmbench/mem_pagefault_lat
- lmbench/mem_mmap_bw
- lmbench/mem_mmap_lat
# Process-related benchmarks
- lmbench/process_getppid_lat
- lmbench/process_ctx_lat
- lmbench/process_fork_lat
- lmbench/process_exec_lat
- lmbench/process_shell_lat
# Signal-related benchmarks
- lmbench/signal_catch_lat
- lmbench/signal_install_lat
- lmbench/signal_prot_lat
# IPC-related benchmarks
- lmbench/pipe_lat
- lmbench/pipe_bw
- lmbench/fifo_lat
- lmbench/semaphore_lat
- lmbench/unix_lat
- lmbench/unix_bw
- lmbench/unix_connect_lat
# Syscall-related benchmarks
- lmbench/vfs_fstat_lat
- lmbench/vfs_open_lat
- lmbench/vfs_stat_lat
- lmbench/vfs_write_lat
- lmbench/vfs_read_lat
- lmbench/vfs_select_lat
- lmbench/vfs_fcntl_lat
- lmbench/vfs_read_pagecache_bw
# File-related benchmarks
- lmbench/ramfs_create_delete_files_0k_ops
- lmbench/ramfs_create_delete_files_10k_ops
- lmbench/ramfs_copy_files_bw
- lmbench/ext2_create_delete_files_0k_ops
- lmbench/ext2_create_delete_files_10k_ops
- lmbench/ext2_copy_files_bw
# FIXME: IOMMU are not supported in TDX now.
# - fio/ext2_seq_write_bw
# - fio/ext2_seq_read_bw
- fio/ext2_seq_write_bw_no_iommu
- fio/ext2_seq_read_bw_no_iommu
# Loopback-related network benchmarks
- lmbench/tcp_loopback_bw_128
- lmbench/tcp_loopback_bw_4k
- lmbench/tcp_loopback_bw_64k
- lmbench/tcp_loopback_lat
- lmbench/tcp_loopback_connect_lat
- lmbench/tcp_loopback_select_lat
- lmbench/tcp_loopback_http_bw
- lmbench/udp_loopback_lat
# VirtIO-net-related network benchmarks
- lmbench/tcp_virtio_bw_128
- lmbench/tcp_virtio_bw_64k
- lmbench/tcp_virtio_connect_lat
- lmbench/tcp_virtio_lat
- lmbench/udp_virtio_lat
- iperf3/tcp_virtio_bw
# Scheduler-related benchmarks
- hackbench/group8_smp1
# FIXME: hackbench panics on multi-core settings now.
# - hackbench/group8_smp8
# - hackbench/group8_smp16
- schbench/smp1
- schbench/smp8
# Nginx benchmarks
- nginx/http_req10k_conc1_bw
- nginx/http_req10k_conc20_bw
- nginx/http_file4KB_bw
- nginx/http_file8KB_bw
- nginx/http_file16KB_bw
- nginx/http_file32KB_bw
- nginx/http_file64KB_bw
# Redis benchmarks
- redis/ping_inline_100k_conc20_rps
- redis/ping_mbulk_100k_conc20_rps
- redis/get_100k_conc20_rps
- redis/set_100k_conc20_rps
# SQLite benchmarks
- sqlite/ext2_benchmarks
- sqlite/ramfs_benchmarks
# Memcached benchmarks
- memcached/t8_conc32_window10k
- memcached/t8_conc32_window20k
- memcached/t16_conc64_window10k
fail-fast: false
# FIXME: Remove the following line after fixing the parallel execution of network benchmarks.
max-parallel: 1
timeout-minutes: 60
container:
image: asterinas/asterinas:0.14.1-20250326-tdx
options: --device=/dev/kvm --privileged
steps:
- uses: actions/checkout@v4
- name: Run benchmarks
uses: ./.github/actions/benchmark
with:
task: benchmark
platform: x86-64
benchmark: ${{ matrix.benchmarks }}
Results:
runs-on: ubuntu-latest
needs: Benchmarks
if: always()
steps:
- uses: actions/checkout@v4
- name: Store benchmark results
uses: ./.github/actions/benchmark
with:
task: result
platform: x86-64
benchmark-secret: ${{ secrets.BENCHMARK_SECRET }}

View File

@ -110,44 +110,76 @@ run_benchmark() {
esac
done <<< "$runtime_configs_str"
# Prepare commands for Asterinas and Linux
local asterinas_cmd="make run BENCHMARK=${benchmark} ${aster_scheme_cmd_part} SMP=${smp_val} MEM=${mem_val} ENABLE_KVM=1 RELEASE_LTO=1 NETDEV=tap VHOST=on 2>&1"
local linux_cmd="/usr/local/qemu/bin/qemu-system-x86_64 \
--no-reboot \
-smp ${smp_val} \
-m ${mem_val} \
-machine q35,kernel-irqchip=split \
-cpu Icelake-Server,-pcid,+x2apic \
--enable-kvm \
-kernel ${LINUX_KERNEL} \
-initrd ${BENCHMARK_ROOT}/../build/initramfs.cpio.gz \
-drive if=none,format=raw,id=x0,file=${BENCHMARK_ROOT}/../build/ext2.img \
-device virtio-blk-pci,bus=pcie.0,addr=0x6,drive=x0,serial=vext2,disable-legacy=on,disable-modern=off,queue-size=64,num-queues=1,request-merging=off,backend_defaults=off,discard=off,write-zeroes=off,event_idx=off,indirect_desc=off,queue_reset=off \
-append 'console=ttyS0 rdinit=/benchmark/common/bench_runner.sh ${benchmark} linux mitigations=off hugepages=0 transparent_hugepage=never quiet' \
-netdev tap,id=net01,script=${BENCHMARK_ROOT}/../../tools/net/qemu-ifup.sh,downscript=${BENCHMARK_ROOT}/../../tools/net/qemu-ifdown.sh,vhost=on \
-device virtio-net-pci,netdev=net01,disable-legacy=on,disable-modern=off,csum=off,guest_csum=off,ctrl_guest_offloads=off,guest_tso4=off,guest_tso6=off,guest_ecn=off,guest_ufo=off,host_tso4=off,host_tso6=off,host_ecn=off,host_ufo=off,mrg_rxbuf=off,ctrl_vq=off,ctrl_rx=off,ctrl_vlan=off,ctrl_rx_extra=off,guest_announce=off,ctrl_mac_addr=off,host_ufo=off,guest_uso4=off,guest_uso6=off,host_uso=off \
-nographic \
2>&1"
# Prepare commands for Asterinas and Linux using arrays
local asterinas_cmd_arr=(make run "BENCHMARK=${benchmark}")
# Add scheme part only if it's not empty and the platform is not TDX (OSDK doesn't support multiple SCHEME)
[[ -n "$aster_scheme_cmd_part" && "$platform" != "tdx" ]] && asterinas_cmd_arr+=("$aster_scheme_cmd_part")
asterinas_cmd_arr+=(
"SMP=${smp_val}"
"MEM=${mem_val}"
ENABLE_KVM=1
RELEASE_LTO=1
NETDEV=tap
VHOST=on
)
if [[ "$platform" == "tdx" ]]; then
asterinas_cmd_arr+=(INTEL_TDX=1)
fi
# Trim leading/trailing whitespace from commands before eval
asterinas_cmd=$(echo "$asterinas_cmd" | sed 's/^ *//;s/ *$//;s/ */ /g')
linux_cmd=$(echo "$linux_cmd" | sed 's/^ *//;s/ *$//;s/ */ /g')
# TODO:
# 1. Current linux kernel is not TDX compatible. Replace with TDX compatible version later.
# 2. `guest_uso4=off,guest_uso6=off,host_uso=off` is not supported by the QEMU of TDX development image.
local linux_cmd_arr=(
qemu-system-x86_64
--no-reboot
-smp "${smp_val}"
-m "${mem_val}"
-machine q35,kernel-irqchip=split
-cpu Icelake-Server,-pcid,+x2apic
--enable-kvm
-kernel "${LINUX_KERNEL}"
-initrd "${BENCHMARK_ROOT}/../build/initramfs.cpio.gz"
-drive "if=none,format=raw,id=x0,file=${BENCHMARK_ROOT}/../build/ext2.img"
-device "virtio-blk-pci,bus=pcie.0,addr=0x6,drive=x0,serial=vext2,disable-legacy=on,disable-modern=off,queue-size=64,num-queues=1,request-merging=off,backend_defaults=off,discard=off,write-zeroes=off,event_idx=off,indirect_desc=off,queue_reset=off"
-append "console=ttyS0 rdinit=/benchmark/common/bench_runner.sh ${benchmark} linux mitigations=off hugepages=0 transparent_hugepage=never quiet"
-netdev "tap,id=net01,script=${BENCHMARK_ROOT}/../../tools/net/qemu-ifup.sh,downscript=${BENCHMARK_ROOT}/../../tools/net/qemu-ifdown.sh,vhost=on"
-nographic
)
if [[ "$platform" != "tdx" ]]; then
linux_cmd_arr+=(
-device "virtio-net-pci,netdev=net01,disable-legacy=on,disable-modern=off,csum=off,guest_csum=off,ctrl_guest_offloads=off,guest_tso4=off,guest_tso6=off,guest_ecn=off,guest_ufo=off,host_tso4=off,host_tso6=off,host_ecn=off,host_ufo=off,mrg_rxbuf=off,ctrl_vq=off,ctrl_rx=off,ctrl_vlan=off,ctrl_rx_extra=off,guest_announce=off,ctrl_mac_addr=off,host_ufo=off,guest_uso4=off,guest_uso6=off,host_uso=off"
)
else
linux_cmd_arr+=(
-device "virtio-net-pci,netdev=net01,disable-legacy=on,disable-modern=off,csum=off,guest_csum=off,ctrl_guest_offloads=off,guest_tso4=off,guest_tso6=off,guest_ecn=off,guest_ufo=off,host_tso4=off,host_tso6=off,host_ecn=off,host_ufo=off,mrg_rxbuf=off,ctrl_vq=off,ctrl_rx=off,ctrl_vlan=off,ctrl_rx_extra=off,guest_announce=off,ctrl_mac_addr=off,host_ufo=off"
)
fi
# Run the benchmark depending on the mode
case "${run_mode}" in
"guest_only")
echo "Running benchmark ${benchmark} on Asterinas..."
eval "$asterinas_cmd" | tee ${ASTER_OUTPUT}
# Execute directly from array, redirect stderr to stdout, then tee
"${asterinas_cmd_arr[@]}" 2>&1 | tee "${ASTER_OUTPUT}"
prepare_fs
echo "Running benchmark ${benchmark} on Linux..."
eval "$linux_cmd" | tee ${LINUX_OUTPUT}
# Execute directly from array, redirect stderr to stdout, then tee
"${linux_cmd_arr[@]}" 2>&1 | tee "${LINUX_OUTPUT}"
;;
"host_guest")
# Note: host_guest_bench_runner.sh expects commands as single strings.
# We need to reconstruct the string representation for compatibility.
# Use printf %q to quote arguments safely.
local asterinas_cmd_str
printf -v asterinas_cmd_str '%q ' "${asterinas_cmd_arr[@]}"
local linux_cmd_str
printf -v linux_cmd_str '%q ' "${linux_cmd_arr[@]}"
echo "Running benchmark ${benchmark} on host and guest..."
bash "${BENCHMARK_ROOT}/common/host_guest_bench_runner.sh" \
"${BENCHMARK_ROOT}/${benchmark}" \
"${asterinas_cmd}" \
"${linux_cmd}" \
"${asterinas_cmd_str}" \
"${linux_cmd_str}" \
"${ASTER_OUTPUT}" \
"${LINUX_OUTPUT}"
;;
@ -181,6 +213,8 @@ cleanup() {
# Main function to coordinate the benchmark run
main() {
local benchmark="$1"
local platform="$2"
if [[ -z "${BENCHMARK_ROOT}/${benchmark}" ]]; then
echo "Error: No benchmark specified" >&2
exit 1