Programmatic API Access
ReAPI provides REST APIs for programmatic access to test execution, result retrieval, and resource management, enabling deep integration with your development tools and workflows.
Authentication
API Key Authentication
# Set API key in headers
curl -H "Authorization: Bearer your-api-key" \
-H "Content-Type: application/json" \
"https://api.reapi.com/v1/workspaces"SDK Authentication
// JavaScript/Node.js SDK
const ReAPI = require('@reapi/sdk');
const client = new ReAPI({
apiKey: process.env.REAPI_API_KEY,
workspace: 'your-workspace-id'
});# Python SDK
from reapi import ReAPIClient
client = ReAPIClient(
api_key=os.environ['REAPI_API_KEY'],
workspace='your-workspace-id'
)Test Execution API
Running Tests
# Run a test runner
curl -X POST "https://api.reapi.com/v1/runs" \
-H "Authorization: Bearer your-api-key" \
-H "Content-Type: application/json" \
-d '{
"runner": "smoke-tests",
"environment": "staging",
"timeout": 1800,
"variables": {
"testUser": "api-test-user",
"debugMode": true
}
}'
# Response
{
"runId": "run_abc123def456",
"status": "queued",
"runner": "smoke-tests",
"environment": "staging",
"createdAt": "2024-01-15T10:30:00Z"
}Checking Run Status
# Get run status
curl -H "Authorization: Bearer your-api-key" \
"https://api.reapi.com/v1/runs/run_abc123def456"
# Response
{
"runId": "run_abc123def456",
"status": "running",
"progress": {
"completed": 15,
"total": 25,
"percentage": 60
},
"startedAt": "2024-01-15T10:30:15Z",
"estimatedCompletion": "2024-01-15T10:45:00Z"
}Retrieving Results
# Get detailed results
curl -H "Authorization: Bearer your-api-key" \
"https://api.reapi.com/v1/runs/run_abc123def456/results"
# Response
{
"runId": "run_abc123def456",
"status": "completed",
"result": "passed",
"summary": {
"total": 25,
"passed": 24,
"failed": 1,
"skipped": 0
},
"duration": 847,
"completedAt": "2024-01-15T10:44:07Z",
"details": {
"testCases": [
{
"name": "User Login Flow",
"status": "passed",
"duration": 1.2,
"assertions": 5
}
]
}
}SDK Examples
JavaScript/Node.js SDK
const ReAPI = require('@reapi/sdk');
class APITestManager {
constructor(apiKey, workspace) {
this.client = new ReAPI({ apiKey, workspace });
}
async runTests(runner, environment, options = {}) {
try {
// Start test run
const run = await this.client.runs.create({
runner,
environment,
timeout: options.timeout || 1800,
variables: options.variables || {}
});
console.log(`✅ Test run started: ${run.runId}`);
// Wait for completion if requested
if (options.waitForCompletion) {
return await this.waitForCompletion(run.runId);
}
return run;
} catch (error) {
console.error('❌ Failed to start test run:', error.message);
throw error;
}
}
async waitForCompletion(runId, maxWaitTime = 3600) {
const startTime = Date.now();
while (Date.now() - startTime < maxWaitTime * 1000) {
const run = await this.client.runs.get(runId);
if (run.status === 'completed') {
const results = await this.client.runs.getResults(runId);
return results;
} else if (run.status === 'failed') {
throw new Error(`Test run failed: ${run.error}`);
}
// Wait 10 seconds before checking again
await new Promise(resolve => setTimeout(resolve, 10000));
}
throw new Error(`Test run timed out after ${maxWaitTime} seconds`);
}
async getTestHistory(runner, environment, limit = 10) {
return await this.client.runs.list({
runner,
environment,
limit,
orderBy: 'createdAt',
order: 'desc'
});
}
}
// Usage example
async function main() {
const testManager = new APITestManager(
process.env.REAPI_API_KEY,
'your-workspace-id'
);
try {
const results = await testManager.runTests('smoke-tests', 'staging', {
waitForCompletion: true,
variables: {
testUser: 'automated-test-user',
timeout: 30000
}
});
console.log('Test Results:', {
status: results.result,
passed: results.summary.passed,
failed: results.summary.failed,
duration: results.duration
});
if (results.result !== 'passed') {
process.exit(1);
}
} catch (error) {
console.error('Test execution failed:', error.message);
process.exit(1);
}
}
main();Python SDK
import os
import time
import sys
from reapi import ReAPIClient
class APITestManager:
def __init__(self, api_key, workspace):
self.client = ReAPIClient(api_key=api_key, workspace=workspace)
def run_tests(self, runner, environment, **options):
"""Run tests and optionally wait for completion"""
try:
# Start test run
run = self.client.runs.create(
runner=runner,
environment=environment,
timeout=options.get('timeout', 1800),
variables=options.get('variables', {})
)
print(f"✅ Test run started: {run['runId']}")
# Wait for completion if requested
if options.get('wait_for_completion', False):
return self.wait_for_completion(run['runId'])
return run
except Exception as error:
print(f"❌ Failed to start test run: {error}")
raise
def wait_for_completion(self, run_id, max_wait_time=3600):
"""Wait for test run to complete"""
start_time = time.time()
while time.time() - start_time < max_wait_time:
run = self.client.runs.get(run_id)
if run['status'] == 'completed':
results = self.client.runs.get_results(run_id)
return results
elif run['status'] == 'failed':
raise Exception(f"Test run failed: {run.get('error', 'Unknown error')}")
# Wait 10 seconds before checking again
time.sleep(10)
raise Exception(f"Test run timed out after {max_wait_time} seconds")
def get_test_history(self, runner, environment, limit=10):
"""Get test run history"""
return self.client.runs.list(
runner=runner,
environment=environment,
limit=limit,
order_by='createdAt',
order='desc'
)
# Usage example
def main():
test_manager = APITestManager(
api_key=os.environ['REAPI_API_KEY'],
workspace='your-workspace-id'
)
try:
results = test_manager.run_tests(
runner='smoke-tests',
environment='staging',
wait_for_completion=True,
variables={
'testUser': 'automated-test-user',
'timeout': 30000
}
)
print("Test Results:", {
'status': results['result'],
'passed': results['summary']['passed'],
'failed': results['summary']['failed'],
'duration': results['duration']
})
if results['result'] != 'passed':
sys.exit(1)
except Exception as error:
print(f"Test execution failed: {error}")
sys.exit(1)
if __name__ == "__main__":
main()Resource Management API
Managing Test Cases
# List test cases
curl -H "Authorization: Bearer your-api-key" \
"https://api.reapi.com/v1/test-cases?folder=authentication&tags=smoke"
# Get test case details
curl -H "Authorization: Bearer your-api-key" \
"https://api.reapi.com/v1/test-cases/test_case_id"
# Update test case tags
curl -X PATCH "https://api.reapi.com/v1/test-cases/test_case_id" \
-H "Authorization: Bearer your-api-key" \
-H "Content-Type: application/json" \
-d '{"tags": ["smoke", "critical", "auth"]}'Managing Environments
# List environments
curl -H "Authorization: Bearer your-api-key" \
"https://api.reapi.com/v1/environments"
# Get environment configuration
curl -H "Authorization: Bearer your-api-key" \
"https://api.reapi.com/v1/environments/staging"
# Update environment variables
curl -X PATCH "https://api.reapi.com/v1/environments/staging" \
-H "Authorization: Bearer your-api-key" \
-H "Content-Type: application/json" \
-d '{
"variables": {
"apiTimeout": 30000,
"testUser": "updated-test-user"
}
}'Managing Deployments
# List deployments
curl -H "Authorization: Bearer your-api-key" \
"https://api.reapi.com/v1/deployments"
# Create deployment
curl -X POST "https://api.reapi.com/v1/deployments" \
-H "Authorization: Bearer your-api-key" \
-H "Content-Type: application/json" \
-d '{
"name": "nightly-regression",
"runner": "regression-suite",
"environment": "staging",
"schedule": "0 2 * * *",
"enabled": true
}'
# Update deployment schedule
curl -X PATCH "https://api.reapi.com/v1/deployments/deployment_id" \
-H "Authorization: Bearer your-api-key" \
-H "Content-Type: application/json" \
-d '{"schedule": "0 */6 * * *"}'Advanced Integration Examples
Custom Dashboard Integration
// dashboard-integration.js
const express = require('express');
const ReAPI = require('@reapi/sdk');
const app = express();
const reapi = new ReAPI({
apiKey: process.env.REAPI_API_KEY,
workspace: process.env.REAPI_WORKSPACE
});
// Dashboard endpoint for test status
app.get('/api/test-status', async (req, res) => {
try {
const environments = ['dev', 'staging', 'prod'];
const runners = ['smoke-tests', 'regression'];
const status = {};
for (const env of environments) {
status[env] = {};
for (const runner of runners) {
// Get latest run for each runner/environment combination
const runs = await reapi.runs.list({
runner,
environment: env,
limit: 1,
orderBy: 'createdAt',
order: 'desc'
});
if (runs.length > 0) {
const latestRun = runs[0];
status[env][runner] = {
status: latestRun.status,
result: latestRun.result,
lastRun: latestRun.completedAt,
duration: latestRun.duration
};
}
}
}
res.json(status);
} catch (error) {
res.status(500).json({ error: error.message });
}
});
// Trigger tests endpoint
app.post('/api/trigger-tests', async (req, res) => {
const { runner, environment, variables } = req.body;
try {
const run = await reapi.runs.create({
runner,
environment,
variables: variables || {}
});
res.json({ runId: run.runId, status: 'started' });
} catch (error) {
res.status(500).json({ error: error.message });
}
});
app.listen(3000, () => {
console.log('Dashboard API listening on port 3000');
});Automated Performance Monitoring
# performance_monitor.py
import os
import time
import json
from datetime import datetime, timedelta
from reapi import ReAPIClient
class PerformanceMonitor:
def __init__(self, api_key, workspace):
self.client = ReAPIClient(api_key=api_key, workspace=workspace)
self.performance_thresholds = {
'avg_response_time': 2000, # 2 seconds
'p95_response_time': 5000, # 5 seconds
'error_rate': 0.05 # 5%
}
def run_performance_check(self, runner='performance-tests', environment='staging'):
"""Run performance tests and analyze results"""
# Start performance test run
run = self.client.runs.create(
runner=runner,
environment=environment,
timeout=3600 # 1 hour timeout for performance tests
)
print(f"Started performance test run: {run['runId']}")
# Wait for completion
results = self.wait_for_completion(run['runId'])
# Analyze performance metrics
analysis = self.analyze_performance(results)
# Generate report
self.generate_report(analysis)
# Check for regressions
if self.has_performance_regression(analysis):
self.alert_performance_regression(analysis)
return False
return True
def analyze_performance(self, results):
"""Analyze performance test results"""
analysis = {
'timestamp': datetime.utcnow().isoformat(),
'total_requests': 0,
'avg_response_time': 0,
'p95_response_time': 0,
'p99_response_time': 0,
'error_rate': 0,
'throughput': 0
}
# Extract performance metrics from test results
if 'performance' in results:
perf_data = results['performance']
analysis.update({
'avg_response_time': perf_data.get('avgResponseTime', 0),
'p95_response_time': perf_data.get('p95ResponseTime', 0),
'p99_response_time': perf_data.get('p99ResponseTime', 0),
'error_rate': perf_data.get('errorRate', 0),
'throughput': perf_data.get('requestsPerSecond', 0)
})
return analysis
def has_performance_regression(self, analysis):
"""Check if performance has regressed beyond thresholds"""
regressions = []
if analysis['avg_response_time'] > self.performance_thresholds['avg_response_time']:
regressions.append(f"Average response time: {analysis['avg_response_time']}ms")
if analysis['p95_response_time'] > self.performance_thresholds['p95_response_time']:
regressions.append(f"P95 response time: {analysis['p95_response_time']}ms")
if analysis['error_rate'] > self.performance_thresholds['error_rate']:
regressions.append(f"Error rate: {analysis['error_rate'] * 100:.2f}%")
return len(regressions) > 0
def wait_for_completion(self, run_id, max_wait_time=3600):
"""Wait for test run to complete"""
start_time = time.time()
while time.time() - start_time < max_wait_time:
run = self.client.runs.get(run_id)
if run['status'] == 'completed':
return self.client.runs.get_results(run_id)
elif run['status'] == 'failed':
raise Exception(f"Performance test failed: {run.get('error')}")
time.sleep(30) # Check every 30 seconds
raise Exception("Performance test timed out")
# Usage
if __name__ == "__main__":
monitor = PerformanceMonitor(
api_key=os.environ['REAPI_API_KEY'],
workspace='your-workspace-id'
)
success = monitor.run_performance_check()
if not success:
print("❌ Performance regression detected!")
exit(1)
else:
print("✅ Performance tests passed!")Error Handling and Best Practices
Rate Limiting
// Handle rate limiting with exponential backoff
class ReAPIClientWithRetry {
constructor(apiKey, workspace) {
this.client = new ReAPI({ apiKey, workspace });
this.maxRetries = 3;
}
async makeRequest(operation, maxRetries = this.maxRetries) {
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
return await operation();
} catch (error) {
if (error.status === 429 && attempt < maxRetries) {
// Rate limited - wait with exponential backoff
const delay = Math.pow(2, attempt) * 1000; // 2s, 4s, 8s
console.log(`Rate limited. Retrying in ${delay}ms...`);
await new Promise(resolve => setTimeout(resolve, delay));
continue;
}
throw error;
}
}
}
async runTests(runner, environment, options = {}) {
return this.makeRequest(() =>
this.client.runs.create({ runner, environment, ...options })
);
}
}Error Recovery
# Comprehensive error handling
class RobustReAPIClient:
def __init__(self, api_key, workspace):
self.client = ReAPIClient(api_key=api_key, workspace=workspace)
def run_tests_with_retry(self, runner, environment, max_retries=3):
"""Run tests with automatic retry on failure"""
for attempt in range(max_retries):
try:
run = self.client.runs.create(
runner=runner,
environment=environment,
timeout=1800
)
results = self.wait_for_completion(run['runId'])
# If tests failed due to infrastructure issues, retry
if self.should_retry(results):
print(f"Retrying due to infrastructure issues (attempt {attempt + 1})")
continue
return results
except Exception as error:
print(f"Attempt {attempt + 1} failed: {error}")
if attempt == max_retries - 1:
raise Exception(f"All {max_retries} attempts failed")
# Wait before retrying
time.sleep(30 * (attempt + 1))
raise Exception("Maximum retries exceeded")
def should_retry(self, results):
"""Determine if test failure warrants a retry"""
if results['result'] == 'passed':
return False
# Retry if failure rate is very high (likely infrastructure issue)
failure_rate = results['summary']['failed'] / results['summary']['total']
return failure_rate > 0.8Best Practices
API Usage
- Implement proper error handling and retry logic
- Use appropriate timeout values for different operations
- Implement rate limiting and backoff strategies
- Cache authentication tokens when possible
Security
- Store API keys securely using environment variables
- Use least-privilege API keys for different use cases
- Implement proper logging without exposing sensitive data
- Regularly rotate API keys and monitor usage
Performance
- Use pagination for large result sets
- Implement caching for frequently accessed data
- Use bulk operations when available
- Monitor API usage and optimize bottlenecks
Integration Design
- Design integrations to be resilient to API changes
- Implement proper monitoring and alerting
- Use structured logging for debugging
- Document API usage patterns and dependencies
More programmatic API examples and advanced integration patterns coming soon…