Loading...
Runs performance benchmarks and generates comparison report when session ends
{
"hookConfig": {
"hooks": {
"stop": {
"script": "./.claude/hooks/performance-benchmark-report.sh",
"timeout": 120000
}
}
},
"scriptContent": "#!/usr/bin/env bash\n\n# Performance Benchmark Report Hook\n# Runs comprehensive performance benchmarks when the session ends\n\necho \"⥠Performance Benchmark Report\" >&2\necho \"=============================\" >&2\n\n# Initialize benchmark tracking\nBENCHMARKS_RUN=0\nBENCHMARKS_PASSED=0\nBENCHMARKS_FAILED=0\nTOTAL_DURATION=0\nSTART_TIME=$(date +%s)\nBENCHMARK_RESULTS_DIR=\".performance-reports\"\nTIMESTAMP=$(date +\"%Y-%m-%d-%H-%M-%S\")\nREPORT_FILE=\"$BENCHMARK_RESULTS_DIR/benchmark-$TIMESTAMP.json\"\n\n# Create benchmark results directory\nmkdir -p \"$BENCHMARK_RESULTS_DIR\"\n\n# Function to report benchmark results\nreport_benchmark() {\n local status=\"$1\"\n local name=\"$2\"\n local duration=\"$3\"\n local details=\"$4\"\n \n BENCHMARKS_RUN=$((BENCHMARKS_RUN + 1))\n \n case \"$status\" in\n \"PASS\")\n echo \"â
PASS: $name (${duration}s)\" >&2\n BENCHMARKS_PASSED=$((BENCHMARKS_PASSED + 1))\n ;;\n \"FAIL\")\n echo \"â FAIL: $name (${duration}s)\" >&2\n BENCHMARKS_FAILED=$((BENCHMARKS_FAILED + 1))\n ;;\n \"SKIP\")\n echo \"âī¸ SKIP: $name - $details\" >&2\n ;;\n \"INFO\")\n echo \"âšī¸ INFO: $name\" >&2\n ;;\n esac\n \n if [ -n \"$duration\" ] && [ \"$duration\" != \"0\" ]; then\n TOTAL_DURATION=$((TOTAL_DURATION + duration))\n fi\n}\n\n# Function to run command with timing\nrun_timed_benchmark() {\n local name=\"$1\"\n local command=\"$2\"\n local timeout_seconds=\"${3:-60}\"\n \n echo \" đ Running: $name...\" >&2\n \n local start_time=$(date +%s)\n local output_file=\"/tmp/benchmark_${name//[^a-zA-Z0-9]/_}_$$\"\n \n if timeout \"${timeout_seconds}s\" bash -c \"$command\" > \"$output_file\" 2>&1; then\n local end_time=$(date +%s)\n local duration=$((end_time - start_time))\n report_benchmark \"PASS\" \"$name\" \"$duration\"\n \n # Show brief output\n if [ -s \"$output_file\" ]; then\n echo \" đ Results:\" >&2\n head -5 \"$output_file\" | while read line; do\n echo \" $line\" >&2\n done\n fi\n else\n local end_time=$(date +%s)\n local duration=$((end_time - start_time))\n report_benchmark \"FAIL\" \"$name\" \"$duration\"\n \n # Show error output\n if [ -s \"$output_file\" ]; then\n echo \" â Error:\" >&2\n tail -3 \"$output_file\" | while read line; do\n echo \" $line\" >&2\n done\n fi\n fi\n \n rm -f \"$output_file\"\n}\n\n# Function to detect project type and language\ndetect_project_type() {\n local project_types=()\n \n [ -f \"package.json\" ] && project_types+=(\"nodejs\")\n [ -f \"requirements.txt\" ] || [ -f \"pyproject.toml\" ] && project_types+=(\"python\")\n [ -f \"go.mod\" ] && project_types+=(\"go\")\n [ -f \"Cargo.toml\" ] && project_types+=(\"rust\")\n [ -f \"composer.json\" ] && project_types+=(\"php\")\n [ -f \"Gemfile\" ] && project_types+=(\"ruby\")\n [ -f \"pom.xml\" ] || [ -f \"build.gradle\" ] && project_types+=(\"java\")\n \n echo \"${project_types[@]}\"\n}\n\n# Initialize JSON report\ncat > \"$REPORT_FILE\" << EOF\n{\n \"timestamp\": \"$(date -u +\"%Y-%m-%dT%H:%M:%SZ\")\",\n \"session_id\": \"$(uuidgen 2>/dev/null || echo \"session-$TIMESTAMP\")\",\n \"project_path\": \"$(pwd)\",\n \"project_name\": \"$(basename \"$(pwd)\")\",\n \"benchmarks\": [\nEOF\n\n# Detect project types\nPROJECT_TYPES=($(detect_project_type))\n\nif [ ${#PROJECT_TYPES[@]} -eq 0 ]; then\n report_benchmark \"INFO\" \"No recognized project structure found\"\nelse\n echo \" đ Detected project types: ${PROJECT_TYPES[*]}\" >&2\nfi\n\n# 1. Node.js Benchmarks\nif [[ \" ${PROJECT_TYPES[*]} \" =~ \" nodejs \" ]]; then\n echo \"đĻ Node.js Performance Benchmarks\" >&2\n \n # Check for benchmark scripts in package.json\n if [ -f \"package.json\" ]; then\n BENCHMARK_SCRIPTS=$(jq -r '.scripts | to_entries[] | select(.key | test(\"benchmark|perf\")) | .key' package.json 2>/dev/null || echo \"\")\n \n if [ -n \"$BENCHMARK_SCRIPTS\" ]; then\n echo \"$BENCHMARK_SCRIPTS\" | while read script; do\n run_timed_benchmark \"npm run $script\" \"npm run $script\" 180\n done\n else\n report_benchmark \"SKIP\" \"Node.js benchmarks\" \"No benchmark scripts found in package.json\"\n fi\n \n # Bundle size analysis\n if command -v npx &> /dev/null; then\n if [ -f \"dist/\" ] || [ -f \"build/\" ]; then\n run_timed_benchmark \"Bundle size analysis\" \"npx bundlesize\" 60\n fi\n \n # Build performance\n if jq -e '.scripts.build' package.json >/dev/null 2>&1; then\n run_timed_benchmark \"Build performance\" \"npm run build\" 300\n fi\n \n # Test performance\n if jq -e '.scripts.test' package.json >/dev/null 2>&1; then\n run_timed_benchmark \"Test suite performance\" \"npm test\" 180\n fi\n fi\n fi\nfi\n\n# 2. Python Benchmarks\nif [[ \" ${PROJECT_TYPES[*]} \" =~ \" python \" ]]; then\n echo \"đ Python Performance Benchmarks\" >&2\n \n # pytest-benchmark\n if command -v pytest &> /dev/null && ([ -f \"pytest.ini\" ] || [ -f \"pyproject.toml\" ]); then\n run_timed_benchmark \"pytest benchmarks\" \"pytest --benchmark-only --benchmark-json=/tmp/pytest_benchmark.json\" 300\n fi\n \n # Python timeit benchmarks\n if [ -f \"benchmark.py\" ]; then\n run_timed_benchmark \"Python benchmark.py\" \"python benchmark.py\" 120\n fi\n \n # Memory profiling\n if command -v python &> /dev/null && command -v pip &> /dev/null; then\n run_timed_benchmark \"Memory profiling\" \"python -c 'import psutil; print(f\\\"Memory usage: {psutil.virtual_memory().percent}%\\\")'\" 10\n fi\nfi\n\n# 3. Go Benchmarks\nif [[ \" ${PROJECT_TYPES[*]} \" =~ \" go \" ]]; then\n echo \"đš Go Performance Benchmarks\" >&2\n \n if command -v go &> /dev/null; then\n # Go test benchmarks\n run_timed_benchmark \"Go benchmarks\" \"go test -bench=. -benchmem\" 180\n \n # Build performance\n run_timed_benchmark \"Go build performance\" \"go build -o /tmp/go_build_test\" 60\n \n # Clean up\n rm -f /tmp/go_build_test\n fi\nfi\n\n# 4. Rust Benchmarks\nif [[ \" ${PROJECT_TYPES[*]} \" =~ \" rust \" ]]; then\n echo \"đĻ Rust Performance Benchmarks\" >&2\n \n if command -v cargo &> /dev/null; then\n # Cargo bench\n run_timed_benchmark \"Cargo benchmarks\" \"cargo bench\" 300\n \n # Build performance\n run_timed_benchmark \"Cargo build performance\" \"cargo build --release\" 180\n \n # Test performance\n run_timed_benchmark \"Cargo test performance\" \"cargo test\" 120\n fi\nfi\n\n# 5. Web Performance Benchmarks\necho \"đ Web Performance Analysis\" >&2\n\n# Check if this looks like a web project\nWEB_PROJECT=false\nif [ -f \"package.json\" ] && grep -q '\"next\"\\\\|\"react\"\\\\|\"vue\"\\\\|\"angular\"\\\\|\"express\"\\\\|\"koa\"' package.json; then\n WEB_PROJECT=true\nelif [ -f \"index.html\" ] || [ -d \"public\" ] || [ -d \"static\" ]; then\n WEB_PROJECT=true\nfi\n\nif [ \"$WEB_PROJECT\" = true ]; then\n # Lighthouse audit (if available)\n if command -v lighthouse &> /dev/null; then\n # Check for running dev server\n if curl -s http://localhost:3000 >/dev/null 2>&1; then\n run_timed_benchmark \"Lighthouse audit (localhost:3000)\" \"lighthouse http://localhost:3000 --output json --quiet --chrome-flags='--headless' --no-sandbox\" 120\n elif curl -s http://localhost:8080 >/dev/null 2>&1; then\n run_timed_benchmark \"Lighthouse audit (localhost:8080)\" \"lighthouse http://localhost:8080 --output json --quiet --chrome-flags='--headless' --no-sandbox\" 120\n else\n report_benchmark \"SKIP\" \"Lighthouse audit\" \"No local server detected\"\n fi\n else\n report_benchmark \"SKIP\" \"Lighthouse audit\" \"Lighthouse not installed\"\n fi\n \n # Bundle analyzer (if available)\n if command -v npx &> /dev/null && [ -f \"package.json\" ]; then\n if [ -d \"dist\" ] || [ -d \"build\" ] || [ -d \".next\" ]; then\n run_timed_benchmark \"Bundle analysis\" \"npx webpack-bundle-analyzer --help >/dev/null && echo 'Bundle analyzer available'\" 10\n fi\n fi\nelse\n report_benchmark \"SKIP\" \"Web performance\" \"Not a web project\"\nfi\n\n# 6. Database Benchmarks\necho \"đī¸ Database Performance Analysis\" >&2\n\n# Check for database connections\nif [ -f \".env\" ] && grep -q 'DATABASE_URL\\\\|DB_' .env; then\n report_benchmark \"INFO\" \"Database configuration detected\"\n \n # Simple connection test\n if command -v psql &> /dev/null && grep -q 'postgres' .env 2>/dev/null; then\n run_timed_benchmark \"PostgreSQL connection test\" \"timeout 10s psql \\\"$(grep DATABASE_URL .env | cut -d'=' -f2)\\\" -c 'SELECT 1;'\" 15\n fi\n \n if command -v mysql &> /dev/null && grep -q 'mysql' .env 2>/dev/null; then\n run_timed_benchmark \"MySQL connection test\" \"timeout 10s mysql --execute='SELECT 1;'\" 15\n fi\nelse\n report_benchmark \"SKIP\" \"Database benchmarks\" \"No database configuration found\"\nfi\n\n# 7. Load Testing (if tools available)\necho \"đĨ Load Testing\" >&2\n\nif command -v hyperfine &> /dev/null; then\n # Hyperfine command benchmarks\n if [ -f \"package.json\" ]; then\n if jq -e '.scripts.start' package.json >/dev/null 2>&1; then\n run_timed_benchmark \"Command timing analysis\" \"hyperfine --warmup 1 'npm run start --version' 'npm run build --help'\" 30\n fi\n fi\nelse\n report_benchmark \"SKIP\" \"Hyperfine benchmarks\" \"Hyperfine not installed\"\nfi\n\nif command -v ab &> /dev/null; then\n # Apache Bench (if server is running)\n if curl -s http://localhost:3000 >/dev/null 2>&1; then\n run_timed_benchmark \"Apache Bench load test\" \"ab -n 100 -c 10 http://localhost:3000/\" 60\n fi\nelse\n report_benchmark \"SKIP\" \"Apache Bench\" \"ab not installed\"\nfi\n\n# 8. Historical Comparison\necho \"đ Historical Performance Analysis\" >&2\n\n# Find previous benchmark reports\nPREVIOUS_REPORTS=($(ls -t \"$BENCHMARK_RESULTS_DIR\"/benchmark-*.json 2>/dev/null | head -5))\n\nif [ ${#PREVIOUS_REPORTS[@]} -gt 1 ]; then\n LATEST_PREVIOUS=\"${PREVIOUS_REPORTS[1]}\"\n echo \" đ Comparing with previous run: $(basename \"$LATEST_PREVIOUS\")\" >&2\n \n if [ -f \"$LATEST_PREVIOUS\" ] && command -v jq &> /dev/null; then\n PREV_DURATION=$(jq -r '.total_duration // 0' \"$LATEST_PREVIOUS\" 2>/dev/null || echo \"0\")\n \n if [ \"$PREV_DURATION\" -gt 0 ] && [ \"$TOTAL_DURATION\" -gt 0 ]; then\n DURATION_DIFF=$((TOTAL_DURATION - PREV_DURATION))\n PERCENT_CHANGE=$(echo \"scale=1; $DURATION_DIFF * 100 / $PREV_DURATION\" | bc -l 2>/dev/null || echo \"0\")\n \n if [ \"$DURATION_DIFF\" -gt 0 ]; then\n echo \" âŦī¸ Performance regression: +${PERCENT_CHANGE}% slower\" >&2\n elif [ \"$DURATION_DIFF\" -lt 0 ]; then\n echo \" âŦī¸ Performance improvement: ${PERCENT_CHANGE#-}% faster\" >&2\n else\n echo \" âĄī¸ Performance unchanged\" >&2\n fi\n fi\n fi\nelse\n echo \" đ No previous benchmarks found for comparison\" >&2\nfi\n\n# Complete JSON report\nEND_TIME=$(date +%s)\nSESSION_DURATION=$((END_TIME - START_TIME))\n\ncat >> \"$REPORT_FILE\" << EOF\n ],\n \"summary\": {\n \"benchmarks_run\": $BENCHMARKS_RUN,\n \"benchmarks_passed\": $BENCHMARKS_PASSED,\n \"benchmarks_failed\": $BENCHMARKS_FAILED,\n \"total_duration\": $TOTAL_DURATION,\n \"session_duration\": $SESSION_DURATION\n },\n \"project_types\": [$(printf '\"%s\",' \"${PROJECT_TYPES[@]}\" | sed 's/,$//')] \n}\nEOF\n\n# 9. Generate Final Report\necho \"\" >&2\necho \"đ Performance Benchmark Summary\" >&2\necho \"================================\" >&2\necho \" đ Benchmarks run: $BENCHMARKS_RUN\" >&2\necho \" â
Passed: $BENCHMARKS_PASSED\" >&2\necho \" â Failed: $BENCHMARKS_FAILED\" >&2\necho \" âąī¸ Total benchmark time: ${TOTAL_DURATION}s\" >&2\necho \" đ Session duration: ${SESSION_DURATION}s\" >&2\necho \" đ Report saved: $REPORT_FILE\" >&2\n\n# Performance assessment\nif [ \"$BENCHMARKS_FAILED\" -eq 0 ] && [ \"$BENCHMARKS_PASSED\" -gt 0 ]; then\n echo \" đ Status: All benchmarks passed\" >&2\nelif [ \"$BENCHMARKS_FAILED\" -gt 0 ]; then\n echo \" â ī¸ Status: Some benchmarks failed\" >&2\nelif [ \"$BENCHMARKS_RUN\" -eq 0 ]; then\n echo \" âšī¸ Status: No benchmarks configured\" >&2\nelse\n echo \" â Status: Mixed results\" >&2\nfi\n\necho \"\" >&2\necho \"đĄ Performance Optimization Tips:\" >&2\necho \" âĸ Run benchmarks regularly to catch regressions early\" >&2\necho \" âĸ Set up CI/CD performance gates\" >&2\necho \" âĸ Monitor Core Web Vitals for web applications\" >&2\necho \" âĸ Profile memory usage and optimize bottlenecks\" >&2\necho \" âĸ Use caching strategies to improve response times\" >&2\necho \" âĸ Consider lazy loading and code splitting\" >&2\n\necho \"⥠Performance benchmark report complete\" >&2\nexit 0"
}.claude/hooks/~/.claude/hooks/{
"hooks": {
"stop": {
"script": "./.claude/hooks/performance-benchmark-report.sh",
"timeout": 120000
}
}
}#!/usr/bin/env bash
# Performance Benchmark Report Hook
# Runs comprehensive performance benchmarks when the session ends
echo "⥠Performance Benchmark Report" >&2
echo "=============================" >&2
# Initialize benchmark tracking
BENCHMARKS_RUN=0
BENCHMARKS_PASSED=0
BENCHMARKS_FAILED=0
TOTAL_DURATION=0
START_TIME=$(date +%s)
BENCHMARK_RESULTS_DIR=".performance-reports"
TIMESTAMP=$(date +"%Y-%m-%d-%H-%M-%S")
REPORT_FILE="$BENCHMARK_RESULTS_DIR/benchmark-$TIMESTAMP.json"
# Create benchmark results directory
mkdir -p "$BENCHMARK_RESULTS_DIR"
# Function to report benchmark results
report_benchmark() {
local status="$1"
local name="$2"
local duration="$3"
local details="$4"
BENCHMARKS_RUN=$((BENCHMARKS_RUN + 1))
case "$status" in
"PASS")
echo "â
PASS: $name (${duration}s)" >&2
BENCHMARKS_PASSED=$((BENCHMARKS_PASSED + 1))
;;
"FAIL")
echo "â FAIL: $name (${duration}s)" >&2
BENCHMARKS_FAILED=$((BENCHMARKS_FAILED + 1))
;;
"SKIP")
echo "âī¸ SKIP: $name - $details" >&2
;;
"INFO")
echo "âšī¸ INFO: $name" >&2
;;
esac
if [ -n "$duration" ] && [ "$duration" != "0" ]; then
TOTAL_DURATION=$((TOTAL_DURATION + duration))
fi
}
# Function to run command with timing
run_timed_benchmark() {
local name="$1"
local command="$2"
local timeout_seconds="${3:-60}"
echo " đ Running: $name..." >&2
local start_time=$(date +%s)
local output_file="/tmp/benchmark_${name//[^a-zA-Z0-9]/_}_$$"
if timeout "${timeout_seconds}s" bash -c "$command" > "$output_file" 2>&1; then
local end_time=$(date +%s)
local duration=$((end_time - start_time))
report_benchmark "PASS" "$name" "$duration"
# Show brief output
if [ -s "$output_file" ]; then
echo " đ Results:" >&2
head -5 "$output_file" | while read line; do
echo " $line" >&2
done
fi
else
local end_time=$(date +%s)
local duration=$((end_time - start_time))
report_benchmark "FAIL" "$name" "$duration"
# Show error output
if [ -s "$output_file" ]; then
echo " â Error:" >&2
tail -3 "$output_file" | while read line; do
echo " $line" >&2
done
fi
fi
rm -f "$output_file"
}
# Function to detect project type and language
detect_project_type() {
local project_types=()
[ -f "package.json" ] && project_types+=("nodejs")
[ -f "requirements.txt" ] || [ -f "pyproject.toml" ] && project_types+=("python")
[ -f "go.mod" ] && project_types+=("go")
[ -f "Cargo.toml" ] && project_types+=("rust")
[ -f "composer.json" ] && project_types+=("php")
[ -f "Gemfile" ] && project_types+=("ruby")
[ -f "pom.xml" ] || [ -f "build.gradle" ] && project_types+=("java")
echo "${project_types[@]}"
}
# Initialize JSON report
cat > "$REPORT_FILE" << EOF
{
"timestamp": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")",
"session_id": "$(uuidgen 2>/dev/null || echo "session-$TIMESTAMP")",
"project_path": "$(pwd)",
"project_name": "$(basename "$(pwd)")",
"benchmarks": [
EOF
# Detect project types
PROJECT_TYPES=($(detect_project_type))
if [ ${#PROJECT_TYPES[@]} -eq 0 ]; then
report_benchmark "INFO" "No recognized project structure found"
else
echo " đ Detected project types: ${PROJECT_TYPES[*]}" >&2
fi
# 1. Node.js Benchmarks
if [[ " ${PROJECT_TYPES[*]} " =~ " nodejs " ]]; then
echo "đĻ Node.js Performance Benchmarks" >&2
# Check for benchmark scripts in package.json
if [ -f "package.json" ]; then
BENCHMARK_SCRIPTS=$(jq -r '.scripts | to_entries[] | select(.key | test("benchmark|perf")) | .key' package.json 2>/dev/null || echo "")
if [ -n "$BENCHMARK_SCRIPTS" ]; then
echo "$BENCHMARK_SCRIPTS" | while read script; do
run_timed_benchmark "npm run $script" "npm run $script" 180
done
else
report_benchmark "SKIP" "Node.js benchmarks" "No benchmark scripts found in package.json"
fi
# Bundle size analysis
if command -v npx &> /dev/null; then
if [ -f "dist/" ] || [ -f "build/" ]; then
run_timed_benchmark "Bundle size analysis" "npx bundlesize" 60
fi
# Build performance
if jq -e '.scripts.build' package.json >/dev/null 2>&1; then
run_timed_benchmark "Build performance" "npm run build" 300
fi
# Test performance
if jq -e '.scripts.test' package.json >/dev/null 2>&1; then
run_timed_benchmark "Test suite performance" "npm test" 180
fi
fi
fi
fi
# 2. Python Benchmarks
if [[ " ${PROJECT_TYPES[*]} " =~ " python " ]]; then
echo "đ Python Performance Benchmarks" >&2
# pytest-benchmark
if command -v pytest &> /dev/null && ([ -f "pytest.ini" ] || [ -f "pyproject.toml" ]); then
run_timed_benchmark "pytest benchmarks" "pytest --benchmark-only --benchmark-json=/tmp/pytest_benchmark.json" 300
fi
# Python timeit benchmarks
if [ -f "benchmark.py" ]; then
run_timed_benchmark "Python benchmark.py" "python benchmark.py" 120
fi
# Memory profiling
if command -v python &> /dev/null && command -v pip &> /dev/null; then
run_timed_benchmark "Memory profiling" "python -c 'import psutil; print(f\"Memory usage: {psutil.virtual_memory().percent}%\")'" 10
fi
fi
# 3. Go Benchmarks
if [[ " ${PROJECT_TYPES[*]} " =~ " go " ]]; then
echo "đš Go Performance Benchmarks" >&2
if command -v go &> /dev/null; then
# Go test benchmarks
run_timed_benchmark "Go benchmarks" "go test -bench=. -benchmem" 180
# Build performance
run_timed_benchmark "Go build performance" "go build -o /tmp/go_build_test" 60
# Clean up
rm -f /tmp/go_build_test
fi
fi
# 4. Rust Benchmarks
if [[ " ${PROJECT_TYPES[*]} " =~ " rust " ]]; then
echo "đĻ Rust Performance Benchmarks" >&2
if command -v cargo &> /dev/null; then
# Cargo bench
run_timed_benchmark "Cargo benchmarks" "cargo bench" 300
# Build performance
run_timed_benchmark "Cargo build performance" "cargo build --release" 180
# Test performance
run_timed_benchmark "Cargo test performance" "cargo test" 120
fi
fi
# 5. Web Performance Benchmarks
echo "đ Web Performance Analysis" >&2
# Check if this looks like a web project
WEB_PROJECT=false
if [ -f "package.json" ] && grep -q '"next"\\|"react"\\|"vue"\\|"angular"\\|"express"\\|"koa"' package.json; then
WEB_PROJECT=true
elif [ -f "index.html" ] || [ -d "public" ] || [ -d "static" ]; then
WEB_PROJECT=true
fi
if [ "$WEB_PROJECT" = true ]; then
# Lighthouse audit (if available)
if command -v lighthouse &> /dev/null; then
# Check for running dev server
if curl -s http://localhost:3000 >/dev/null 2>&1; then
run_timed_benchmark "Lighthouse audit (localhost:3000)" "lighthouse http://localhost:3000 --output json --quiet --chrome-flags='--headless' --no-sandbox" 120
elif curl -s http://localhost:8080 >/dev/null 2>&1; then
run_timed_benchmark "Lighthouse audit (localhost:8080)" "lighthouse http://localhost:8080 --output json --quiet --chrome-flags='--headless' --no-sandbox" 120
else
report_benchmark "SKIP" "Lighthouse audit" "No local server detected"
fi
else
report_benchmark "SKIP" "Lighthouse audit" "Lighthouse not installed"
fi
# Bundle analyzer (if available)
if command -v npx &> /dev/null && [ -f "package.json" ]; then
if [ -d "dist" ] || [ -d "build" ] || [ -d ".next" ]; then
run_timed_benchmark "Bundle analysis" "npx webpack-bundle-analyzer --help >/dev/null && echo 'Bundle analyzer available'" 10
fi
fi
else
report_benchmark "SKIP" "Web performance" "Not a web project"
fi
# 6. Database Benchmarks
echo "đī¸ Database Performance Analysis" >&2
# Check for database connections
if [ -f ".env" ] && grep -q 'DATABASE_URL\\|DB_' .env; then
report_benchmark "INFO" "Database configuration detected"
# Simple connection test
if command -v psql &> /dev/null && grep -q 'postgres' .env 2>/dev/null; then
run_timed_benchmark "PostgreSQL connection test" "timeout 10s psql \"$(grep DATABASE_URL .env | cut -d'=' -f2)\" -c 'SELECT 1;'" 15
fi
if command -v mysql &> /dev/null && grep -q 'mysql' .env 2>/dev/null; then
run_timed_benchmark "MySQL connection test" "timeout 10s mysql --execute='SELECT 1;'" 15
fi
else
report_benchmark "SKIP" "Database benchmarks" "No database configuration found"
fi
# 7. Load Testing (if tools available)
echo "đĨ Load Testing" >&2
if command -v hyperfine &> /dev/null; then
# Hyperfine command benchmarks
if [ -f "package.json" ]; then
if jq -e '.scripts.start' package.json >/dev/null 2>&1; then
run_timed_benchmark "Command timing analysis" "hyperfine --warmup 1 'npm run start --version' 'npm run build --help'" 30
fi
fi
else
report_benchmark "SKIP" "Hyperfine benchmarks" "Hyperfine not installed"
fi
if command -v ab &> /dev/null; then
# Apache Bench (if server is running)
if curl -s http://localhost:3000 >/dev/null 2>&1; then
run_timed_benchmark "Apache Bench load test" "ab -n 100 -c 10 http://localhost:3000/" 60
fi
else
report_benchmark "SKIP" "Apache Bench" "ab not installed"
fi
# 8. Historical Comparison
echo "đ Historical Performance Analysis" >&2
# Find previous benchmark reports
PREVIOUS_REPORTS=($(ls -t "$BENCHMARK_RESULTS_DIR"/benchmark-*.json 2>/dev/null | head -5))
if [ ${#PREVIOUS_REPORTS[@]} -gt 1 ]; then
LATEST_PREVIOUS="${PREVIOUS_REPORTS[1]}"
echo " đ Comparing with previous run: $(basename "$LATEST_PREVIOUS")" >&2
if [ -f "$LATEST_PREVIOUS" ] && command -v jq &> /dev/null; then
PREV_DURATION=$(jq -r '.total_duration // 0' "$LATEST_PREVIOUS" 2>/dev/null || echo "0")
if [ "$PREV_DURATION" -gt 0 ] && [ "$TOTAL_DURATION" -gt 0 ]; then
DURATION_DIFF=$((TOTAL_DURATION - PREV_DURATION))
PERCENT_CHANGE=$(echo "scale=1; $DURATION_DIFF * 100 / $PREV_DURATION" | bc -l 2>/dev/null || echo "0")
if [ "$DURATION_DIFF" -gt 0 ]; then
echo " âŦī¸ Performance regression: +${PERCENT_CHANGE}% slower" >&2
elif [ "$DURATION_DIFF" -lt 0 ]; then
echo " âŦī¸ Performance improvement: ${PERCENT_CHANGE#-}% faster" >&2
else
echo " âĄī¸ Performance unchanged" >&2
fi
fi
fi
else
echo " đ No previous benchmarks found for comparison" >&2
fi
# Complete JSON report
END_TIME=$(date +%s)
SESSION_DURATION=$((END_TIME - START_TIME))
cat >> "$REPORT_FILE" << EOF
],
"summary": {
"benchmarks_run": $BENCHMARKS_RUN,
"benchmarks_passed": $BENCHMARKS_PASSED,
"benchmarks_failed": $BENCHMARKS_FAILED,
"total_duration": $TOTAL_DURATION,
"session_duration": $SESSION_DURATION
},
"project_types": [$(printf '"%s",' "${PROJECT_TYPES[@]}" | sed 's/,$//')]
}
EOF
# 9. Generate Final Report
echo "" >&2
echo "đ Performance Benchmark Summary" >&2
echo "================================" >&2
echo " đ Benchmarks run: $BENCHMARKS_RUN" >&2
echo " â
Passed: $BENCHMARKS_PASSED" >&2
echo " â Failed: $BENCHMARKS_FAILED" >&2
echo " âąī¸ Total benchmark time: ${TOTAL_DURATION}s" >&2
echo " đ Session duration: ${SESSION_DURATION}s" >&2
echo " đ Report saved: $REPORT_FILE" >&2
# Performance assessment
if [ "$BENCHMARKS_FAILED" -eq 0 ] && [ "$BENCHMARKS_PASSED" -gt 0 ]; then
echo " đ Status: All benchmarks passed" >&2
elif [ "$BENCHMARKS_FAILED" -gt 0 ]; then
echo " â ī¸ Status: Some benchmarks failed" >&2
elif [ "$BENCHMARKS_RUN" -eq 0 ]; then
echo " âšī¸ Status: No benchmarks configured" >&2
else
echo " â Status: Mixed results" >&2
fi
echo "" >&2
echo "đĄ Performance Optimization Tips:" >&2
echo " âĸ Run benchmarks regularly to catch regressions early" >&2
echo " âĸ Set up CI/CD performance gates" >&2
echo " âĸ Monitor Core Web Vitals for web applications" >&2
echo " âĸ Profile memory usage and optimize bottlenecks" >&2
echo " âĸ Use caching strategies to improve response times" >&2
echo " âĸ Consider lazy loading and code splitting" >&2
echo "⥠Performance benchmark report complete" >&2
exit 0Hook timeout reached before benchmarks complete execution
Increase timeout in hookConfig: timeout: 300000 for 5 minutes. Reduce benchmark scope by skipping slow tests. Use timeout 120s wrapper around individual benchmark commands to prevent single test blocking.
npm run benchmark fails with 'script not found' in package.json
Check script existence: jq -e '.scripts.benchmark' package.json before execution. Add fallback: if ! jq -e '.scripts.benchmark' package.json; then echo 'No benchmark script'; exit 0; fi. Skip gracefully instead of failing.
Lighthouse audit fails with 'No Chrome installation found'
Install Chrome/Chromium: brew install chromium on macOS or apt-get install chromium-browser on Linux. Set CHROME_PATH environment variable. Use --chrome-flags='--headless --no-sandbox' for CI environments.
Historical comparison crashes with jq parse errors on old reports
Validate JSON before parsing: jq empty "$REPORT_FILE" 2>/dev/null || continue. Handle malformed reports gracefully. Add schema version field to new reports: {"schema_version": "1.0", ...}.
Stop hook runs benchmarks even when session ends with errors
Check exit status context if available. Add conditional execution: [ -f .benchmark-enabled ] || exit 0. Create .benchmark-enabled flag only when user explicitly requests benchmarking to avoid unnecessary runs.
Loading reviews...
Join our community of Claude power users. No spam, unsubscribe anytime.
Automated accessibility testing and compliance checking for web applications following WCAG guidelines
Automatically generates or updates API documentation when endpoint files are modified
Automatically formats code files after Claude writes or edits them using Prettier, Black, or other formatters