Skip to content

feat[bench]: stronger benchmarking scripts #52

feat[bench]: stronger benchmarking scripts

feat[bench]: stronger benchmarking scripts #52

Workflow file for this run

name: Performance Tests
on:
workflow_dispatch:
inputs:
benchmark_tier:
description: 'Benchmark tier to run'
required: false
default: 'smoke'
type: choice
options:
- smoke
- standard
- stress
pull_request:
branches: [main]
paths:
- 'server/**'
- 'bench/**'
- '.github/workflows/perf.yml'
env:
CARGO_TERM_COLOR: always
jobs:
benchmark:
name: Performance Benchmark
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install build dependencies
run: |
sudo apt-get update
sudo apt-get install -y libopenslide-dev protobuf-compiler
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Cache Cargo
uses: actions/cache@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-perf-${{ hashFiles('**/Cargo.lock') }}
restore-keys: ${{ runner.os }}-cargo-
- name: Build server and tests (release)
run: |
cargo build --release
cargo test --test perf_tests --no-run --release
- name: Create test directories
run: |
mkdir -p /tmp/pathcollab/slides
mkdir -p bench/load_tests/results
- name: Start server in background
run: |
HOST=127.0.0.1 \
PORT=8080 \
SLIDES_DIR=/tmp/pathcollab/slides \
RUST_LOG=warn \
./target/release/pathcollab &
# Wait for server to be ready
for i in {1..30}; do
if curl -s http://127.0.0.1:8080/health > /dev/null 2>&1; then
echo "Server is ready!"
break
fi
echo "Waiting for server... ($i/30)"
sleep 1
done
# Verify health
curl -s http://127.0.0.1:8080/health
- name: Determine benchmark tier
id: tier
run: |
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
echo "tier=${{ github.event.inputs.benchmark_tier }}" >> $GITHUB_OUTPUT
else
echo "tier=smoke" >> $GITHUB_OUTPUT
fi
- name: Run benchmark
run: |
cd server
cargo test --test perf_tests bench_${{ steps.tier.outputs.tier }} --release -- --ignored --nocapture 2>&1 | tee /tmp/benchmark_results.txt
timeout-minutes: 10
- name: Check benchmark results
run: |
echo "=== Benchmark Results ==="
cat /tmp/benchmark_results.txt
# Check if test passed
if grep -q "OVERALL: PASS" /tmp/benchmark_results.txt; then
echo "✅ Benchmark passed"
else
echo "❌ Benchmark failed"
exit 1
fi
- name: Extract JSON results
if: always()
run: |
# Extract JSON line for machine parsing
grep "^JSON:" /tmp/benchmark_results.txt | sed 's/^JSON: //' > bench/load_tests/results/benchmark.json || true
if [ -f bench/load_tests/results/benchmark.json ]; then
echo "=== JSON Results ==="
cat bench/load_tests/results/benchmark.json
fi
- name: Collect server metrics
if: always()
run: |
echo "=== Server Metrics ==="
curl -s http://127.0.0.1:8080/metrics || true
echo ""
echo "=== Prometheus Metrics ==="
curl -s http://127.0.0.1:8080/metrics/prometheus | head -50 || true
- name: Upload benchmark results
if: always()
uses: actions/upload-artifact@v4
with:
name: benchmark-results-${{ steps.tier.outputs.tier }}
path: |
bench/load_tests/results/
/tmp/benchmark_results.txt
retention-days: 30