CI/CD Integration
Integrate ReAPI tests into your continuous integration and deployment pipelines to ensure API quality at every stage of development.
Integration Strategies
Pre-commit Hooks
Run fast, critical tests before code commits:
# .pre-commit-config.yaml
repos:
- repo: local
hooks:
- id: reapi-smoke-tests
name: ReAPI Smoke Tests
entry: reapi run --runner smoke-tests --environment dev
language: system
pass_filenames: false
stages: [commit]Pull Request Validation
Validate API changes in pull requests:
# .github/workflows/pr-validation.yml
name: API Tests - Pull Request
on:
pull_request:
branches: [main, develop]
jobs:
api-tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Run API Contract Tests
run: |
reapi run --runner api-contracts \
--environment staging \
--output junit \
--fail-fast
env:
REAPI_API_KEY: ${{ secrets.REAPI_API_KEY }}
- name: Publish Test Results
uses: dorny/test-reporter@v1
if: always()
with:
name: API Test Results
path: 'test-results.xml'
reporter: java-junitDeployment Pipeline Integration
# .github/workflows/deploy.yml
name: Deploy with API Testing
on:
push:
branches: [main]
jobs:
deploy-and-test:
runs-on: ubuntu-latest
steps:
- name: Deploy to Staging
run: ./deploy.sh staging
- name: Run Staging Tests
run: |
reapi run --runner full-regression \
--environment staging \
--wait-for-completion \
--timeout 1800
env:
REAPI_API_KEY: ${{ secrets.REAPI_API_KEY }}
- name: Deploy to Production
if: success()
run: ./deploy.sh production
- name: Run Production Smoke Tests
run: |
reapi run --runner smoke-tests \
--environment production \
--wait-for-completion
env:
REAPI_API_KEY: ${{ secrets.REAPI_PROD_API_KEY }}CI/CD Platform Examples
GitHub Actions
name: API Testing Pipeline
on:
schedule:
- cron: '0 */4 * * *' # Every 4 hours
workflow_dispatch: # Manual trigger
jobs:
test-environments:
strategy:
matrix:
environment: [dev, staging, prod]
runner: [smoke-tests, regression]
exclude:
- environment: prod
runner: regression
runs-on: ubuntu-latest
steps:
- name: Run ReAPI Tests
run: |
reapi run \
--runner ${{ matrix.runner }} \
--environment ${{ matrix.environment }} \
--output json > results-${{ matrix.environment }}-${{ matrix.runner }}.json
env:
REAPI_API_KEY: ${{ secrets.REAPI_API_KEY }}
- name: Upload Results
uses: actions/upload-artifact@v3
with:
name: test-results
path: results-*.jsonJenkins Pipeline
pipeline {
agent any
parameters {
choice(
name: 'ENVIRONMENT',
choices: ['dev', 'staging', 'prod'],
description: 'Target environment'
)
choice(
name: 'TEST_SUITE',
choices: ['smoke', 'regression', 'performance'],
description: 'Test suite to run'
)
}
stages {
stage('API Tests') {
steps {
script {
def result = sh(
script: """
reapi run \
--runner ${params.TEST_SUITE} \
--environment ${params.ENVIRONMENT} \
--output junit \
--timeout 3600
""",
returnStatus: true
)
if (result != 0) {
currentBuild.result = 'UNSTABLE'
}
}
}
post {
always {
junit 'test-results.xml'
archiveArtifacts artifacts: 'test-results.xml'
}
}
}
}
post {
failure {
emailext (
subject: "API Tests Failed - ${params.ENVIRONMENT}",
body: "API tests failed in ${params.ENVIRONMENT} environment. Check Jenkins for details.",
to: "${env.TEAM_EMAIL}"
)
}
}
}GitLab CI/CD
# .gitlab-ci.yml
stages:
- test
- deploy
- verify
variables:
REAPI_API_KEY: $REAPI_API_KEY
api-tests:
stage: test
script:
- reapi run --runner unit-tests --environment dev --output junit
artifacts:
reports:
junit: test-results.xml
when: always
expire_in: 1 week
only:
- merge_requests
- main
deploy-staging:
stage: deploy
script:
- ./deploy.sh staging
environment:
name: staging
url: https://api-staging.example.com
only:
- main
verify-staging:
stage: verify
script:
- reapi run --runner integration-tests --environment staging
dependencies:
- deploy-staging
only:
- main
deploy-production:
stage: deploy
script:
- ./deploy.sh production
environment:
name: production
url: https://api.example.com
when: manual
only:
- main
verify-production:
stage: verify
script:
- reapi run --runner smoke-tests --environment production
dependencies:
- deploy-production
only:
- mainAdvanced Integration Patterns
Multi-Environment Testing
#!/bin/bash
# test-all-environments.sh
environments=("dev" "staging" "prod")
runners=("smoke-tests" "api-contracts")
for env in "${environments[@]}"; do
for runner in "${runners[@]}"; do
echo "Running $runner tests in $env environment..."
reapi run \
--runner "$runner" \
--environment "$env" \
--output json \
--timeout 1800 > "results-${env}-${runner}.json"
if [ $? -ne 0 ]; then
echo "❌ Tests failed in $env environment"
exit 1
else
echo "✅ Tests passed in $env environment"
fi
done
done
echo "🎉 All tests passed across all environments!"Conditional Test Execution
# Only run expensive tests on specific branches
name: Conditional API Testing
on:
push:
branches: ['**']
jobs:
determine-tests:
runs-on: ubuntu-latest
outputs:
run-performance: ${{ steps.check.outputs.run-performance }}
run-integration: ${{ steps.check.outputs.run-integration }}
steps:
- id: check
run: |
if [[ "${{ github.ref }}" == "refs/heads/main" ]] || [[ "${{ github.ref }}" == "refs/heads/release/*" ]]; then
echo "run-performance=true" >> $GITHUB_OUTPUT
echo "run-integration=true" >> $GITHUB_OUTPUT
else
echo "run-performance=false" >> $GITHUB_OUTPUT
echo "run-integration=true" >> $GITHUB_OUTPUT
fi
integration-tests:
needs: determine-tests
if: needs.determine-tests.outputs.run-integration == 'true'
runs-on: ubuntu-latest
steps:
- name: Run Integration Tests
run: reapi run --runner integration --environment staging
performance-tests:
needs: determine-tests
if: needs.determine-tests.outputs.run-performance == 'true'
runs-on: ubuntu-latest
steps:
- name: Run Performance Tests
run: reapi run --runner performance --environment staging --timeout 7200Test Result Analysis
# analyze-test-results.py
import json
import sys
from datetime import datetime
def analyze_results(results_file):
with open(results_file, 'r') as f:
results = json.load(f)
total_tests = results['summary']['total']
passed_tests = results['summary']['passed']
failed_tests = results['summary']['failed']
success_rate = (passed_tests / total_tests) * 100
print(f"📊 Test Results Summary:")
print(f" Total: {total_tests}")
print(f" Passed: {passed_tests}")
print(f" Failed: {failed_tests}")
print(f" Success Rate: {success_rate:.1f}%")
# Set exit code based on success rate
if success_rate < 95:
print("❌ Success rate below threshold (95%)")
sys.exit(1)
elif failed_tests > 0:
print("⚠️ Some tests failed but within acceptable range")
sys.exit(0)
else:
print("✅ All tests passed!")
sys.exit(0)
if __name__ == "__main__":
analyze_results(sys.argv[1])Monitoring and Alerting
Test Failure Notifications
# Slack notification on test failures
- name: Notify Slack on Failure
if: failure()
uses: 8398a7/action-slack@v3
with:
status: failure
text: |
🚨 API Tests Failed!
Environment: ${{ matrix.environment }}
Runner: ${{ matrix.runner }}
Branch: ${{ github.ref }}
Commit: ${{ github.sha }}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK }}Performance Regression Detection
# Compare current performance with baseline
reapi run --runner performance-baseline --environment staging --output json > current-results.json
python compare-performance.py baseline-results.json current-results.json
if [ $? -ne 0 ]; then
echo "Performance regression detected!"
exit 1
fiDashboard Integration
// Send results to monitoring dashboard
const results = JSON.parse(fs.readFileSync('test-results.json'));
const metrics = {
timestamp: new Date().toISOString(),
environment: process.env.ENVIRONMENT,
total_tests: results.summary.total,
passed_tests: results.summary.passed,
failed_tests: results.summary.failed,
success_rate: (results.summary.passed / results.summary.total) * 100,
avg_response_time: results.performance.avg_response_time
};
// Send to monitoring system
await fetch('https://monitoring.example.com/api/metrics', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(metrics)
});Best Practices
Pipeline Design
- Run fast tests early in the pipeline
- Use parallel execution for independent test suites
- Implement proper timeout and retry mechanisms
- Cache dependencies and test data when possible
Environment Management
- Use separate API keys for different environments
- Implement proper secret management
- Test against production-like environments
- Maintain environment parity
Result Handling
- Generate structured test reports (JUnit, JSON)
- Archive test results for historical analysis
- Implement proper failure notification
- Track test performance and trends over time
Security Considerations
- Store API keys and secrets securely
- Use least-privilege access for CI/CD systems
- Implement proper access controls for test environments
- Regular rotation of API keys and credentials
More CI/CD integration patterns and advanced examples coming soon…