CI/CD Integration
import { Aside } from ‘@astrojs/starlight/components’;
This guide covers the complete CI/CD pipeline: lint → execute → upload → regression check. Templates are provided for all four supported CI platforms.
Pipeline Stages
Section titled “Pipeline Stages”A complete performance pipeline has four stages:
- Lint (
perf-lint) — catch script anti-patterns before they reach production - Execute (
perf-containers) — run the test with the correct Docker image - Upload (
perf-results-db-cli) — store results centrally - Regression check (
perf-compare) — fail the build if performance has degraded
Required Secrets/Variables
Section titled “Required Secrets/Variables”Set these in your CI/CD secrets store before configuring pipelines:
| Secret/Variable | Description |
|---|---|
TARGET_HOST | URL of the system under test (staging) |
PERF_RESULTS_DB_URL | URL of your perf-results-db instance |
PERF_RESULTS_DB_API_KEY | API key for perf-results-db |
PERF_RESULTS_DB_PROJECT_ID | Project UUID from perf-results-db |
PERF_COMPARE_LICENSE_KEY | perf-compare Pro license (for statistical method) |
GitHub Actions
Section titled “GitHub Actions”Complete pipeline
Section titled “Complete pipeline”name: Performance Tests
on: push: branches: [main, develop] paths: - 'tests/performance/**' schedule: - cron: '0 2 * * *' # nightly at 02:00 UTC workflow_dispatch: inputs: vus: description: 'Virtual users' default: '100' duration: description: 'Test duration (e.g. 5m)' default: '5m'
jobs: performance: runs-on: ubuntu-latest timeout-minutes: 30
steps: - uses: actions/checkout@v4
# Stage 1: Lint - name: Lint performance scripts uses: markslilley/perf-lint-action@v1 with: path: tests/performance/ tool: k6 fail-on: error
# Stage 2: Execute - name: Run load test run: | docker run --rm \ -v ${{ github.workspace }}/tests/performance:/tests \ -e TARGET_HOST=${{ vars.TARGET_HOST }} \ ghcr.io/markslilley/perf-k6:latest \ k6 run /tests/load-test.js \ -e VUS=${{ github.event.inputs.vus || '100' }} \ -e DURATION=${{ github.event.inputs.duration || '5m' }} \ --out json=/tests/results.json
# Stage 3: Upload - name: Upload results if: always() uses: markslilley/perf-results-db-action@v1 with: url: ${{ vars.PERF_RESULTS_DB_URL }} api-key: ${{ secrets.PERF_RESULTS_DB_API_KEY }} project-id: ${{ vars.PERF_RESULTS_DB_PROJECT_ID }} file: tests/performance/results.json tool: k6 tags: branch=${{ github.ref_name }},trigger=${{ github.event_name }}
# Stage 4: Regression check - name: Check for regressions run: | npx @martkos-it/perf-compare \ --url ${{ vars.PERF_RESULTS_DB_URL }} \ --project ${{ vars.PERF_RESULTS_DB_PROJECT_ID }} \ --method statistical \ --baseline 10 \ --current 3 \ --alpha 0.05 env: PERF_RESULTS_DB_API_KEY: ${{ secrets.PERF_RESULTS_DB_API_KEY }} PERF_COMPARE_LICENSE_KEY: ${{ secrets.PERF_COMPARE_LICENSE_KEY }} PERF_COMPARE_CONFIG_DIR: /tmp/perf-compare-cache
# Publish report as artifact - name: Generate report if: always() run: | npx @martkos-it/perf-reporting generate \ --file tests/performance/results.json \ --tool k6 \ --output report.html
- uses: actions/upload-artifact@v4 if: always() with: name: perf-report-${{ github.run_number }} path: report.html retention-days: 30GitLab CI
Section titled “GitLab CI”stages: - lint - perf-test - analyse
variables: VUS: "100" DURATION: "5m"
perf-lint: stage: lint image: python:3.12-slim script: - pip install --quiet perf-lint-tool - perf-lint check tests/performance/ --format json > lint-results.json - perf-lint check tests/performance/ # human output for CI log artifacts: paths: - lint-results.json expire_in: 7 days rules: - changes: - tests/performance/**
load-test: stage: perf-test image: ghcr.io/markslilley/perf-k6:latest variables: GIT_STRATEGY: fetch script: - k6 run tests/performance/load-test.js -e TARGET_HOST=${TARGET_HOST} -e VUS=${VUS} -e DURATION=${DURATION} --out json=results.json artifacts: paths: - results.json expire_in: 14 days
upload-and-check: stage: analyse image: node:20-alpine dependencies: - load-test script: # Upload - npx perf-results-db-cli upload --url ${PERF_RESULTS_DB_URL} --api-key ${PERF_RESULTS_DB_API_KEY} --project-id ${PERF_RESULTS_DB_PROJECT_ID} --file results.json --tool k6 --tags "branch=${CI_COMMIT_BRANCH},pipeline=${CI_PIPELINE_ID}" # Regression check - npx @martkos-it/perf-compare --url ${PERF_RESULTS_DB_URL} --project ${PERF_RESULTS_DB_PROJECT_ID} --method statistical --baseline 10 --current 3Jenkins (Declarative)
Section titled “Jenkins (Declarative)”pipeline { agent { docker { image 'node:20-alpine' args '-v /var/run/docker.sock:/var/run/docker.sock' } }
environment { TARGET_HOST = credentials('target-host') DB_URL = credentials('perf-db-url') DB_API_KEY = credentials('perf-db-api-key') DB_PROJECT_ID = credentials('perf-db-project-id') COMPARE_LICENSE_KEY = credentials('perf-compare-license') }
stages { stage('Lint') { steps { sh 'pip install perf-lint-tool && perf-lint check tests/performance/' } }
stage('Load Test') { steps { sh ''' docker run --rm \ -v ${WORKSPACE}/tests/performance:/tests \ -e TARGET_HOST=${TARGET_HOST} \ ghcr.io/markslilley/perf-k6:latest \ k6 run /tests/load-test.js \ -e VUS=${VUS:-100} -e DURATION=${DURATION:-5m} \ --out json=/tests/results.json ''' } }
stage('Upload & Check') { steps { sh ''' npx perf-results-db-cli upload \ --url ${DB_URL} --api-key ${DB_API_KEY} \ --project-id ${DB_PROJECT_ID} \ --file tests/performance/results.json --tool k6 ''' sh ''' PERF_RESULTS_DB_API_KEY=${DB_API_KEY} \ PERF_COMPARE_LICENSE_KEY=${COMPARE_LICENSE_KEY} \ npx @martkos-it/perf-compare \ --url ${DB_URL} --project ${DB_PROJECT_ID} \ --method statistical --baseline 10 --current 3 ''' } } }
post { always { sh ''' npx @martkos-it/perf-reporting generate \ --file tests/performance/results.json --tool k6 \ --output perf-report.html ''' archiveArtifacts artifacts: 'perf-report.html', fingerprint: true } }}Azure DevOps
Section titled “Azure DevOps”trigger: branches: include: - main paths: include: - tests/performance
schedules: - cron: '0 2 * * *' displayName: Nightly performance run branches: include: - main
pool: vmImage: ubuntu-latest
variables: VUS: 100 DURATION: 5m
steps: - task: UsePythonVersion@0 inputs: versionSpec: '3.12'
- script: pip install perf-lint-tool && perf-lint check tests/performance/ displayName: Lint
- script: | docker run --rm \ -v $(Build.SourcesDirectory)/tests/performance:/tests \ -e TARGET_HOST=$(TARGET_HOST) \ ghcr.io/markslilley/perf-k6:latest \ k6 run /tests/load-test.js \ -e VUS=$(VUS) -e DURATION=$(DURATION) \ --out json=/tests/results.json displayName: Load test
- task: NodeTool@0 inputs: versionSpec: '20.x'
- script: | npx perf-results-db-cli upload \ --url $(PERF_RESULTS_DB_URL) \ --api-key $(PERF_RESULTS_DB_API_KEY) \ --project-id $(PERF_RESULTS_DB_PROJECT_ID) \ --file tests/performance/results.json --tool k6 displayName: Upload results
- script: | PERF_RESULTS_DB_API_KEY=$(PERF_RESULTS_DB_API_KEY) \ PERF_COMPARE_LICENSE_KEY=$(PERF_COMPARE_LICENSE_KEY) \ npx @martkos-it/perf-compare \ --url $(PERF_RESULTS_DB_URL) \ --project $(PERF_RESULTS_DB_PROJECT_ID) \ --method statistical --baseline 10 --current 3 displayName: Regression check
- script: | npx @martkos-it/perf-reporting generate \ --file tests/performance/results.json --tool k6 \ --output $(Build.ArtifactStagingDirectory)/perf-report.html displayName: Generate report condition: always()
- task: PublishBuildArtifacts@1 inputs: PathtoPublish: $(Build.ArtifactStagingDirectory) ArtifactName: perf-report condition: always()Gate on nightly, not on every commit: Statistical regression detection requires enough runs to be meaningful. Use commit-triggered runs for smoke tests only; full load tests should be nightly or on release branches.
Separate staging environments for CI performance tests: Never run load tests against production from CI without explicit controls. Always target a staging or load-test environment.
Parallelise where safe: Lint and smoke tests can run in parallel. Load tests should run sequentially to avoid resource contention affecting results.