Performance test reporting in Testing Fundamentals - Build an Automation Script
import os import time import pytest REPORT_PATH = 'performance_report.html' # Simulate running load test and generating report # In real scenario, this would trigger Locust or other tool def run_load_test(): # Simulate test duration time.sleep(5) # wait 5 seconds to simulate test # Simulate report generation with open(REPORT_PATH, 'w') as f: f.write('<html><body>') f.write('<h1>Performance Test Report</h1>') f.write('<p>Average Response Time: 200ms</p>') f.write('<p>Throughput: 50 requests/sec</p>') f.write('<p>Error Rate: 0.5%</p>') f.write('<p>Test Duration: 5 minutes</p>') f.write('</body></html>') def test_performance_report_generation(): # Run the load test simulation run_load_test() # Wait for report file to exist (explicit wait) timeout = 10 poll_interval = 1 waited = 0 while not os.path.exists(REPORT_PATH) and waited < timeout: time.sleep(poll_interval) waited += poll_interval # Assert report file exists assert os.path.exists(REPORT_PATH), 'Report file was not generated' # Read report content with open(REPORT_PATH, 'r') as f: content = f.read() # Assert key metrics present assert 'Average Response Time' in content, 'Average Response Time metric missing' assert 'Throughput' in content, 'Throughput metric missing' assert 'Error Rate' in content, 'Error Rate metric missing' assert 'Test Duration' in content, 'Test Duration metric missing' # Extract error rate value import re match = re.search(r'Error Rate: ([0-9.]+)%', content) assert match is not None, 'Error Rate value not found' error_rate = float(match.group(1)) # Assert error rate below 1% assert error_rate < 1.0, f'Error rate too high: {error_rate}%' # Cleanup os.remove(REPORT_PATH)
This test script simulates running a performance load test and generating an HTML report.
The run_load_test() function waits 5 seconds to mimic test duration and creates a simple HTML report file with key metrics.
The test test_performance_report_generation() waits explicitly for the report file to appear, then reads its content.
It asserts the presence of average response time, throughput, error rate, and test duration in the report.
Using a regular expression, it extracts the error rate value and asserts it is below 1%.
Finally, it cleans up by deleting the report file.
This approach uses explicit waits, file checks, content parsing, and pytest assertions to verify performance test reporting.
Now add data-driven testing with 3 different load levels: 50, 100, and 200 virtual users