import unittest
import xml.etree.ElementTree as ET
import os
import time
import xmlrunner
class SampleTests(unittest.TestCase):
def test_pass(self):
self.assertTrue(True)
def test_fail(self):
self.assertTrue(False)
def run_tests_and_generate_report(report_path):
with open(report_path, 'wb') as output:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output), exit=False)
def wait_for_file(file_path, timeout=10):
start = time.time()
while time.time() - start < timeout:
if os.path.exists(file_path):
return True
time.sleep(0.5)
return False
def validate_report(report_path):
assert os.path.exists(report_path), f"Report file {report_path} does not exist"
tree = ET.parse(report_path)
root = tree.getroot()
# Check test suite name
suite_name = root.attrib.get('name')
assert suite_name == 'SampleTests', f"Expected suite name 'SampleTests', got '{suite_name}'"
# Collect test cases
testcases = root.findall('testcase')
testcase_names = {tc.attrib['name'] for tc in testcases}
expected_tests = {'test_pass', 'test_fail'}
assert testcase_names == expected_tests, f"Test cases mismatch. Expected {expected_tests}, got {testcase_names}"
# Check pass/fail status
failures = root.findall('testcase/failure')
failed_tests = {f.getparent().attrib['name'] for f in failures} if failures else set()
assert 'test_fail' in failed_tests, "Failed test 'test_fail' not found in report"
assert 'test_pass' not in failed_tests, "Passed test 'test_pass' incorrectly marked as failed"
# Check summary counts
tests = int(root.attrib.get('tests', 0))
failures_count = int(root.attrib.get('failures', 0))
assert tests == 2, f"Expected 2 tests, got {tests}"
assert failures_count == 1, f"Expected 1 failure, got {failures_count}"
if __name__ == '__main__':
report_file = 'test-reports/sample-tests.xml'
os.makedirs(os.path.dirname(report_file), exist_ok=True)
run_tests_and_generate_report(report_file)
if wait_for_file(report_file):
validate_report(report_file)
print('Test report validation passed.')
else:
raise FileNotFoundError(f'Test report {report_file} was not generated in time.')This script defines two simple tests: one that passes and one that fails.
The run_tests_and_generate_report function runs these tests and generates an XML report using xmlrunner.
The wait_for_file function waits up to 10 seconds for the report file to appear, simulating waiting for pipeline artifact generation.
The validate_report function parses the XML report and checks:
- The test suite name matches the test class name.
- All expected test cases are listed.
- The pass/fail status is correct for each test.
- The summary counts for total tests and failures are accurate.
This approach ensures the test report generated in a pipeline is complete and accurate.