diff --git a/.github/workflows/benchmark_asterinas.yml b/.github/workflows/benchmark_asterinas.yml index 4bf6c15b8..0e252b90f 100644 --- a/.github/workflows/benchmark_asterinas.yml +++ b/.github/workflows/benchmark_asterinas.yml @@ -11,7 +11,7 @@ jobs: runs-on: self-hosted strategy: matrix: - benchmark: + benchmarks: - sysbench/cpu_lat - sysbench/thread_lat # Memory-related benchmarks @@ -90,68 +90,102 @@ jobs: RUSTUP_UPDATE_ROOT: https://mirrors.ustc.edu.cn/rust-static/rustup steps: - - uses: actions/checkout@v2 - - name: Set up the environment - run: | - chmod +x test/benchmark/bench_linux_and_aster.sh - # Set up git due to the network issue on the self-hosted runner - git config --global --add safe.directory /__w/asterinas/asterinas - git config --global http.sslVerify false - git config --global http.version HTTP/1.1 + - uses: actions/checkout@v4 + - name: Set up the environment + run: | + chmod +x test/benchmark/bench_linux_and_aster.sh + # Set up git due to the network issue on the self-hosted runner + git config --global --add safe.directory /__w/asterinas/asterinas + git config --global http.sslVerify false + git config --global http.version HTTP/1.1 - - name: Run benchmark - uses: nick-invision/retry@v2 # Retry the benchmark command in case of failure - with: - timeout_minutes: 20 - max_attempts: 3 - command: | - make install_osdk - BENCHMARK_TYPE=$(jq -r '.benchmark_type' test/benchmark/${{ matrix.benchmark }}/config.json) - ASTER_SCHEME=$(jq -r '.aster_scheme' test/benchmark/${{ matrix.benchmark }}/config.json) - bash test/benchmark/bench_linux_and_aster.sh ${{ matrix.benchmark }} $BENCHMARK_TYPE $ASTER_SCHEME + - name: Run benchmarks + uses: nick-invision/retry@v3 # Retry the benchmark command in case of failure + with: + timeout_minutes: 20 + max_attempts: 3 + command: | + make install_osdk + bash test/benchmark/bench_linux_and_aster.sh "${{ matrix.benchmarks }}" + BENCHMARK_ARTIFACT=results_$(echo "${{ matrix.benchmarks }}" | tr '/' '-') + echo "BENCHMARK_ARTIFACT=$BENCHMARK_ARTIFACT" >> $GITHUB_ENV - - name: Set up benchmark configuration - run: | - ALERT_THRESHOLD=$(jq -r '.alert_threshold' test/benchmark/${{ matrix.benchmark }}/config.json) - ALERT_TOOL=$(jq -r '.alert_tool' test/benchmark/${{ matrix.benchmark }}/config.json) - TITLE=$(jq -r '.title' test/benchmark/${{ matrix.benchmark }}/config.json) - DESCRIPTION=$(jq -r '.description' test/benchmark/${{ matrix.benchmark }}/config.json) + - name: Store benchmark results + uses: actions/upload-artifact@v4 + with: + name: ${{ env.BENCHMARK_ARTIFACT }} + path: | + result_*.json - if [ "$ALERT_THRESHOLD" = "null" ]; then - ALERT_THRESHOLD="130%" - fi - if [ "$ALERT_TOOL" = "null" ]; then - ALERT_TOOL="customSmallerIsBetter" - fi - if [ "$TITLE" = "null" ]; then - TITLE="${{ matrix.benchmark }}" - fi + Matrix: + runs-on: ubuntu-latest + needs: Benchmarks # Must run after the Benchmarks job. + if: always() # Always run regardless of whether the previous job was successful or not. + outputs: + benchmarks: ${{ steps.set-matrix.outputs.benchmarks }} + steps: + - name: Download results + uses: actions/download-artifact@v4 + with: + pattern: results_* + path: ./results + merge-multiple: true - echo "ALERT_THRESHOLD=$ALERT_THRESHOLD" >> $GITHUB_ENV - echo "ALERT_TOOL=$ALERT_TOOL" >> $GITHUB_ENV - echo "TITLE=$TITLE" >> $GITHUB_ENV - echo "DESCRIPTION=$DESCRIPTION" >> $GITHUB_ENV + - name: Set matrix for benchmark results + id: set-matrix + run: | + benchmarks=$(ls results/result_*.json | sed 's/.*result_//' | sed 's/\.json//' | jq -R -s -c 'split("\n")[:-1]') + echo benchmarks=$benchmarks >> $GITHUB_OUTPUT - BENCHMARK_NAME=$(basename "${{ matrix.benchmark }}") - echo "BENCHMARK_NAME=$BENCHMARK_NAME" >> $GITHUB_ENV - BENCHMARK_STORE_DIR=$(dirname "${{ matrix.benchmark }}") - echo "BENCHMARK_STORE_DIR=$BENCHMARK_STORE_DIR" >> $GITHUB_ENV - BENCHMARK_SUMMARY=test/benchmark/${BENCHMARK_STORE_DIR}/summary.json - echo "BENCHMARK_SUMMARY=$BENCHMARK_SUMMARY" >> $GITHUB_ENV + Results: + runs-on: ubuntu-latest + needs: Matrix + strategy: + matrix: + benchmark: ${{ fromJson(needs.Matrix.outputs.benchmarks) }} + max-parallel: 1 + fail-fast: false - - name: Store benchmark results - uses: asterinas/github-action-benchmark@v4 - with: - name: ${{ env.BENCHMARK_NAME }} - tool: ${{ env.ALERT_TOOL }} - output-file-path: result_${{ env.BENCHMARK_NAME }}.json - benchmark-data-dir-path: ${{ env.BENCHMARK_STORE_DIR }} - github-token: ${{ secrets.BENCHMARK_SECRET }} - gh-repository: 'github.com/asterinas/benchmark' - auto-push: true - alert-threshold: ${{ env.ALERT_THRESHOLD }} - comment-on-alert: true - fail-on-alert: true - chart-title: ${{ env.TITLE }} - chart-description: ${{ env.DESCRIPTION }} - summary-json-path: ${{ env.BENCHMARK_SUMMARY }} + steps: + - uses: actions/checkout@v4 + - name: Download Benchmark Results + uses: actions/download-artifact@v4 + with: + pattern: results_* + path: ./results + merge-multiple: true + + - name: Set up benchmark configuration + run: | + BENCHMARK_DIR=$(echo ${{ matrix.benchmark }} | sed 's/-/\//g') + BENCHMARK_SUITE=$(echo $BENCHMARK_DIR | awk -F'/' '{print $1}') + BENCHMARK_NAME=$(basename "$BENCHMARK_DIR") + BENCH_RESULT="test/benchmark/${BENCHMARK_DIR}/bench_result.json" + [ -f "$BENCH_RESULT" ] || BENCH_RESULT="test/benchmark/${BENCHMARK_DIR}.json" + ALERT_THRESHOLD=$(jq -r '.alert.threshold // "130%"' "$BENCH_RESULT") + ALERT_TOOL=$(jq -r 'if .alert.bigger_is_better == true then "customBiggerIsBetter" else "customSmallerIsBetter" end' "$BENCH_RESULT") + TITLE=$(jq -r '.chart.title // "Undefined"' "$BENCH_RESULT") + DESCRIPTION=$(jq -r '.chart.description // "No description provided"' "$BENCH_RESULT") + echo "BENCHMARK_SUITE=$BENCHMARK_SUITE" >> $GITHUB_ENV + echo "BENCHMARK_NAME=$BENCHMARK_NAME" >> $GITHUB_ENV + echo "ALERT_THRESHOLD=$ALERT_THRESHOLD" >> $GITHUB_ENV + echo "ALERT_TOOL=$ALERT_TOOL" >> $GITHUB_ENV + echo "TITLE=$TITLE" >> $GITHUB_ENV + echo "DESCRIPTION=$DESCRIPTION" >> $GITHUB_ENV + + - name: Store benchmark results + uses: asterinas/github-action-benchmark@v4 + with: + name: ${{ env.BENCHMARK_NAME }} + tool: ${{ env.ALERT_TOOL }} + output-file-path: results/result_${{ matrix.benchmark }}.json + benchmark-data-dir-path: ${{ env.BENCHMARK_SUITE }} + github-token: ${{ secrets.BENCHMARK_SECRET }} + gh-repository: 'github.com/asterinas/benchmark' + auto-push: true + alert-threshold: ${{ env.ALERT_THRESHOLD }} + comment-on-alert: true + fail-on-alert: true + chart-title: ${{ env.TITLE }} + chart-description: ${{ env.DESCRIPTION }} + summary-json-path: test/benchmark/${{ env.BENCHMARK_SUITE }}/summary.json diff --git a/test/benchmark/bench_linux_and_aster.sh b/test/benchmark/bench_linux_and_aster.sh index 456b28243..3fe637885 100755 --- a/test/benchmark/bench_linux_and_aster.sh +++ b/test/benchmark/bench_linux_and_aster.sh @@ -8,61 +8,83 @@ set -o pipefail # Ensure all dependencies are installed command -v jq >/dev/null 2>&1 || { echo >&2 "jq is not installed. Aborting."; exit 1; } -# Script directory -BENCHMARK_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" +# Set up paths +BENCHMARK_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" +source "${BENCHMARK_ROOT}/common/prepare_host.sh" +RESULT_TEMPLATE="${BENCHMARK_ROOT}/result_template.json" -# Source the prepare_host.sh script -source "${BENCHMARK_DIR}/common/prepare_host.sh" +# Parse benchmark results +parse_raw_results() { + local search_pattern="$1" + local result_index="$2" + local result_file="$3" -# Parse the results from the benchmark output -parse_results() { - local benchmark="$1" - local search_pattern="$2" - local result_index="$3" - local linux_output="$4" - local aster_output="$5" - local result_template="$6" - local result_file="$7" - - # Extract numeric result from a specific field in the matching line + # Extract and sanitize numeric results local linux_result aster_result - linux_result=$(awk "/${search_pattern}/ {result=\$$result_index} END {print result}" "${linux_output}" | tr -d '\r' | sed 's/[^0-9.]*//g') - aster_result=$(awk "/${search_pattern}/ {result=\$$result_index} END {print result}" "${aster_output}" | tr -d '\r' | sed 's/[^0-9.]*//g') - + linux_result=$(awk "/${search_pattern}/ {result=\$$result_index} END {print result}" "${LINUX_OUTPUT}" | tr -d '\r' | sed 's/[^0-9.]*//g') + aster_result=$(awk "/${search_pattern}/ {result=\$$result_index} END {print result}" "${ASTER_OUTPUT}" | tr -d '\r' | sed 's/[^0-9.]*//g') + + # Ensure both results are valid if [ -z "${linux_result}" ] || [ -z "${aster_result}" ]; then echo "Error: Failed to parse the results from the benchmark output" >&2 exit 1 fi - echo "Updating the result template with extracted values..." + # Write the results into the template jq --arg linux_result "${linux_result}" --arg aster_result "${aster_result}" \ '(.[] | select(.extra == "linux_result") | .value) |= $linux_result | (.[] | select(.extra == "aster_result") | .value) |= $aster_result' \ - "${result_template}" > "${result_file}" + "${RESULT_TEMPLATE}" > "${result_file}" } -# Run the benchmark on Linux and Asterinas +# Generate a new result template based on unit and legend +generate_template() { + local unit="$1" + local legend="$2" + + # Replace placeholders with actual system names + local linux_legend=${legend//"{system}"/"Linux"} + local asterinas_legend=${legend//"{system}"/"Asterinas"} + + # Generate the result template JSON + jq -n --arg linux "$linux_legend" --arg aster "$asterinas_legend" --arg unit "$unit" '[ + { "name": $linux, "unit": $unit, "value": 0, "extra": "linux_result" }, + { "name": $aster, "unit": $unit, "value": 0, "extra": "aster_result" } + ]' > "${RESULT_TEMPLATE}" +} + +# Extract the result file path based on benchmark location +extract_result_file() { + local bench_result="$1" + local relative_path="${bench_result#*/benchmark/}" + local first_dir="${relative_path%%/*}" + local filename=$(basename "$bench_result") + + # Handle different naming conventions for result files + if [[ "$filename" == bench_* ]]; then + local second_part=$(dirname "$bench_result" | awk -F"/benchmark/$first_dir/" '{print $2}' | cut -d'/' -f1) + echo "result_${first_dir}-${second_part}.json" + else + echo "result_${relative_path//\//-}" + fi +} + +# Run the specified benchmark with optional scheme run_benchmark() { local benchmark="$1" - local benchmark_type="$2" + local run_mode="$2" local aster_scheme="$3" - local search_pattern="$4" - local result_index="$5" - local linux_output="${BENCHMARK_DIR}/linux_output.txt" - local aster_output="${BENCHMARK_DIR}/aster_output.txt" - local result_template="${BENCHMARK_DIR}/${benchmark}/result_template.json" - local benchmark_name=$(basename "${benchmark}") - local benchmark_root=$(dirname "${benchmark}") - local result_file="result_${benchmark_name}.json" - echo "Preparing libraries..." prepare_libs + # Set up Asterinas scheme if specified local aster_scheme_cmd="" if [ -n "$aster_scheme" ] && [ "$aster_scheme" != "null" ]; then aster_scheme_cmd="SCHEME=${aster_scheme}" fi + + # Prepare commands for Asterinas and Linux local asterinas_cmd="make run BENCHMARK=${benchmark} ${aster_scheme_cmd} ENABLE_KVM=1 RELEASE_LTO=1 NETDEV=tap VHOST=on 2>&1" local linux_cmd="/usr/local/qemu/bin/qemu-system-x86_64 \ --no-reboot \ @@ -72,65 +94,97 @@ run_benchmark() { -cpu Icelake-Server,-pcid,+x2apic \ --enable-kvm \ -kernel ${LINUX_KERNEL} \ - -initrd ${BENCHMARK_DIR}/../build/initramfs.cpio.gz \ - -drive if=none,format=raw,id=x0,file=${BENCHMARK_DIR}/../build/ext2.img \ + -initrd ${BENCHMARK_ROOT}/../build/initramfs.cpio.gz \ + -drive if=none,format=raw,id=x0,file=${BENCHMARK_ROOT}/../build/ext2.img \ -device virtio-blk-pci,bus=pcie.0,addr=0x6,drive=x0,serial=vext2,disable-legacy=on,disable-modern=off,queue-size=64,num-queues=1,request-merging=off,backend_defaults=off,discard=off,write-zeroes=off,event_idx=off,indirect_desc=off,queue_reset=off \ -append 'console=ttyS0 rdinit=/benchmark/common/bench_runner.sh ${benchmark} linux mitigations=off hugepages=0 transparent_hugepage=never quiet' \ - -netdev tap,id=net01,script=${BENCHMARK_DIR}/../../tools/net/qemu-ifup.sh,downscript=${BENCHMARK_DIR}/../../tools/net/qemu-ifdown.sh,vhost=on \ + -netdev tap,id=net01,script=${BENCHMARK_ROOT}/../../tools/net/qemu-ifup.sh,downscript=${BENCHMARK_ROOT}/../../tools/net/qemu-ifdown.sh,vhost=on \ -device virtio-net-pci,netdev=net01,disable-legacy=on,disable-modern=off,csum=off,guest_csum=off,ctrl_guest_offloads=off,guest_tso4=off,guest_tso6=off,guest_ecn=off,guest_ufo=off,host_tso4=off,host_tso6=off,host_ecn=off,host_ufo=off,mrg_rxbuf=off,ctrl_vq=off,ctrl_rx=off,ctrl_vlan=off,ctrl_rx_extra=off,guest_announce=off,ctrl_mac_addr=off,host_ufo=off,guest_uso4=off,guest_uso6=off,host_uso=off \ -nographic \ 2>&1" - case "${benchmark_type}" in + + # Run the benchmark depending on the mode + case "${run_mode}" in "guest_only") echo "Running benchmark ${benchmark} on Asterinas..." - eval "$asterinas_cmd" | tee ${aster_output} + eval "$asterinas_cmd" | tee ${ASTER_OUTPUT} prepare_fs echo "Running benchmark ${benchmark} on Linux..." - eval "$linux_cmd" | tee ${linux_output} + eval "$linux_cmd" | tee ${LINUX_OUTPUT} ;; "host_guest") echo "Running benchmark ${benchmark} on host and guest..." - bash "${BENCHMARK_DIR}/common/host_guest_bench_runner.sh" \ - "${BENCHMARK_DIR}/${benchmark}" \ + bash "${BENCHMARK_ROOT}/common/host_guest_bench_runner.sh" \ + "${BENCHMARK_ROOT}/${benchmark}" \ "${asterinas_cmd}" \ "${linux_cmd}" \ - "${aster_output}" \ - "${linux_output}" + "${ASTER_OUTPUT}" \ + "${LINUX_OUTPUT}" ;; *) - echo "Error: Unknown benchmark type '${benchmark_type}'" >&2 + echo "Error: Unknown benchmark type '${run_mode}'" >&2 exit 1 ;; esac - - echo "Parsing results..." - parse_results "$benchmark" "$search_pattern" "$result_index" "$linux_output" "$aster_output" "$result_template" "$result_file" - - echo "Cleaning up..." - rm -f "${linux_output}" - rm -f "${aster_output}" } -# Main +# Parse the benchmark configuration +parse_results() { + local bench_result="$1" -BENCHMARK="$1" -if [ -z "$2" ] || [ "$2" = "null" ]; then - BENCHMARK_TYPE="guest_only" -else - BENCHMARK_TYPE="$2" -fi -ASTER_SCHEME="$3" + local search_pattern=$(jq -r '.result_extraction.search_pattern // empty' "$bench_result") + local result_index=$(jq -r '.result_extraction.result_index // empty' "$bench_result") + local unit=$(jq -r '.chart.unit // empty' "$bench_result") + local legend=$(jq -r '.chart.legend // {system}' "$bench_result") -echo "Running benchmark ${BENCHMARK}..." -pwd -if [ ! -d "$BENCHMARK_DIR/$BENCHMARK" ]; then - echo "Error: Benchmark directory not found" >&2 - exit 1 -fi + generate_template "$unit" "$legend" + parse_raw_results "$search_pattern" "$result_index" "$(extract_result_file "$bench_result")" +} -search_pattern=$(jq -r '.search_pattern' "$BENCHMARK_DIR/$BENCHMARK/config.json") -result_index=$(jq -r '.result_index' "$BENCHMARK_DIR/$BENCHMARK/config.json") +# Clean up temporary files +cleanup() { + echo "Cleaning up..." + rm -f "${LINUX_OUTPUT}" "${ASTER_OUTPUT}" "${RESULT_TEMPLATE}" +} -run_benchmark "$BENCHMARK" "$BENCHMARK_TYPE" "$ASTER_SCHEME" "$search_pattern" "$result_index" +# Main function to coordinate the benchmark run +main() { + local benchmark="$1" + if [[ -z "${BENCHMARK_ROOT}/${benchmark}" ]]; then + echo "Error: No benchmark specified" >&2 + exit 1 + fi + echo "Running benchmark $benchmark..." -echo "Benchmark completed successfully." + # Determine the run mode (host-only or host-guest) + local run_mode="guest_only" + [[ -f "${BENCHMARK_ROOT}/${benchmark}/host.sh" ]] && run_mode="host_guest" + + local bench_result="${BENCHMARK_ROOT}/${benchmark}/bench_result.json" + local aster_scheme + if [[ -f "$bench_result" ]]; then + aster_scheme=$(jq -r '.runtime_config.aster_scheme // ""' "$bench_result") + else + for job in "${BENCHMARK_ROOT}/${benchmark}"/bench_results/*; do + [[ -f "$job" ]] && aster_scheme=$(jq -r '.runtime_config.aster_scheme // ""' "$job") && break + done + fi + + # Run the benchmark + run_benchmark "$benchmark" "$run_mode" "$aster_scheme" + + # Parse results if benchmark configuration exists + if [[ -f "$bench_result" ]]; then + parse_results "$bench_result" + else + for job in "${BENCHMARK_ROOT}/${benchmark}"/bench_results/*; do + [[ -f "$job" ]] && parse_results "$job" + done + fi + + # Cleanup temporary files + cleanup + echo "Benchmark completed successfully." +} + +main "$@" diff --git a/test/benchmark/common/bench_runner.sh b/test/benchmark/common/bench_runner.sh index 6ab9f5d64..dceaa16f9 100755 --- a/test/benchmark/common/bench_runner.sh +++ b/test/benchmark/common/bench_runner.sh @@ -5,12 +5,12 @@ set -e -BENCHMARK_DIR="/benchmark" +BENCHMARK_ROOT="/benchmark" READY_MESSAGE="The VM is ready for the benchmark." -BENCH_NAME=$1 +BENCHMARK_NAME=$1 SYSTEM="${2:-asterinas}" -echo "Running benchmark: ${BENCH_NAME} on ${SYSTEM}" +echo "Running benchmark: ${BENCHMARK_NAME} on ${SYSTEM}" print_help() { echo "Usage: $0 " @@ -19,17 +19,17 @@ print_help() { } # Validate arguments -check_bench_name() { - if [ -z "${BENCH_NAME}" ] || [ -z "${SYSTEM}" ]; then +check_benchmark_name() { + if [ -z "${BENCHMARK_NAME}" ] || [ -z "${SYSTEM}" ]; then echo "Error: Invalid arguments." print_help exit 1 fi - local full_path="${BENCHMARK_DIR}/${BENCH_NAME}" + local full_path="${BENCHMARK_ROOT}/${BENCHMARK_NAME}" if ! [ -d "${full_path}" ]; then - echo "Directory '${BENCH_NAME}' does not exist in the benchmark directory." + echo "Directory '${BENCHMARK_NAME}' does not exist in the benchmark directory." print_help exit 1 fi @@ -63,7 +63,7 @@ prepare_system() { main() { # Check if the benchmark name is valid - check_bench_name + check_benchmark_name # Prepare the system prepare_system @@ -73,9 +73,9 @@ main() { echo "${READY_MESSAGE}" # Run the benchmark - BENCH_SCRIPT=${BENCHMARK_DIR}/${BENCH_NAME}/run.sh - chmod +x ${BENCH_SCRIPT} - ${BENCH_SCRIPT} + BENCHMARK_SCRIPT=${BENCHMARK_ROOT}/${BENCHMARK_NAME}/run.sh + chmod +x ${BENCHMARK_SCRIPT} + ${BENCHMARK_SCRIPT} # Shutdown explicitly if running on Linux if [ "$SYSTEM" = "linux" ]; then diff --git a/test/benchmark/common/host_guest_bench_runner.sh b/test/benchmark/common/host_guest_bench_runner.sh index d0dedb96e..5ab1ad5ea 100755 --- a/test/benchmark/common/host_guest_bench_runner.sh +++ b/test/benchmark/common/host_guest_bench_runner.sh @@ -14,8 +14,8 @@ LINUX_OUTPUT=$5 READY_MESSAGE="The VM is ready for the benchmark." # Import the common functions -BENCHMARK_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)/../" -source "${BENCHMARK_DIR}/common/prepare_host.sh" +BENCHMARK_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)/../" +source "${BENCHMARK_ROOT}/common/prepare_host.sh" if [[ "$BENCHMARK_PATH" =~ "iperf" ]]; then # Persist Iperf port diff --git a/test/benchmark/common/prepare_host.sh b/test/benchmark/common/prepare_host.sh index 4be7a9c60..2ba1d9446 100644 --- a/test/benchmark/common/prepare_host.sh +++ b/test/benchmark/common/prepare_host.sh @@ -5,14 +5,17 @@ set -e set -o pipefail -# Set BENCHMARK_DIR to the parent directory of the current directory if it is not set -BENCHMARK_DIR="${BENCHMARK_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." &>/dev/null && pwd)}" +# Set BENCHMARK_ROOT to the parent directory of the current directory if it is not set +BENCHMARK_ROOT="${BENCHMARK_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." &>/dev/null && pwd)}" +# Set the log file +LINUX_OUTPUT="${BENCHMARK_ROOT}/linux_output.txt" +ASTER_OUTPUT="${BENCHMARK_ROOT}/aster_output.txt" # Dependencies for Linux LINUX_DEPENDENCIES_DIR="/opt/linux_binary_cache" LINUX_KERNEL="${LINUX_DEPENDENCIES_DIR}/vmlinuz" LINUX_KERNEL_VERSION="5.15.0-105" -LINUX_MODULES_DIR="${BENCHMARK_DIR}/../build/initramfs/lib/modules/${LINUX_KERNEL_VERSION}/kernel" -WGET_SCRIPT="${BENCHMARK_DIR}/../../tools/atomic_wget.sh" +LINUX_MODULES_DIR="${BENCHMARK_ROOT}/../build/initramfs/lib/modules/${LINUX_KERNEL_VERSION}/kernel" +WGET_SCRIPT="${BENCHMARK_ROOT}/../../tools/atomic_wget.sh" # Prepare Linux kernel and modules prepare_libs() { @@ -39,6 +42,6 @@ prepare_libs() { # Prepare fs for Linux prepare_fs() { # Disable unsupported ext2 features of Asterinas on Linux to ensure fairness - mke2fs -F -O ^ext_attr -O ^resize_inode -O ^dir_index ${BENCHMARK_DIR}/../build/ext2.img + mke2fs -F -O ^ext_attr -O ^resize_inode -O ^dir_index ${BENCHMARK_ROOT}/../build/ext2.img make initramfs } \ No newline at end of file