tests/run-tests.py: Add list of passed/skipped tests to _result.json.

The output `_result.json` file generated by `run-tests.py` currently
contains a list of failed tests.  This commit adds to the output a list of
passed and skipped tests, and so now provides full information about which
tests were run and what their results were.

Signed-off-by: Damien George <damien@micropython.org>
This commit is contained in:
Damien George
2025-05-13 17:16:11 +10:00
parent e39243c382
commit 7a55cb6b36

View File

@@ -605,7 +605,7 @@ class PyboardNodeRunner:
def run_tests(pyb, tests, args, result_dir, num_threads=1): def run_tests(pyb, tests, args, result_dir, num_threads=1):
test_count = ThreadSafeCounter() test_count = ThreadSafeCounter()
testcase_count = ThreadSafeCounter() testcase_count = ThreadSafeCounter()
passed_count = ThreadSafeCounter() passed_tests = ThreadSafeCounter([])
failed_tests = ThreadSafeCounter([]) failed_tests = ThreadSafeCounter([])
skipped_tests = ThreadSafeCounter([]) skipped_tests = ThreadSafeCounter([])
@@ -896,7 +896,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
if skip_it: if skip_it:
print("skip ", test_file) print("skip ", test_file)
skipped_tests.append(test_name) skipped_tests.append((test_name, test_file))
return return
# Run the test on the MicroPython target. # Run the test on the MicroPython target.
@@ -911,7 +911,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
# start-up code (eg boot.py) when preparing to run the next test. # start-up code (eg boot.py) when preparing to run the next test.
pyb.read_until(1, b"raw REPL; CTRL-B to exit\r\n") pyb.read_until(1, b"raw REPL; CTRL-B to exit\r\n")
print("skip ", test_file) print("skip ", test_file)
skipped_tests.append(test_name) skipped_tests.append((test_name, test_file))
return return
# Look at the output of the test to see if unittest was used. # Look at the output of the test to see if unittest was used.
@@ -994,7 +994,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
# Print test summary, update counters, and save .exp/.out files if needed. # Print test summary, update counters, and save .exp/.out files if needed.
if test_passed: if test_passed:
print("pass ", test_file, extra_info) print("pass ", test_file, extra_info)
passed_count.increment() passed_tests.append((test_name, test_file))
rm_f(filename_expected) rm_f(filename_expected)
rm_f(filename_mupy) rm_f(filename_mupy)
else: else:
@@ -1035,17 +1035,30 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
print(line) print(line)
sys.exit(1) sys.exit(1)
passed_tests = sorted(passed_tests.value)
skipped_tests = sorted(skipped_tests.value)
failed_tests = sorted(failed_tests.value)
print( print(
"{} tests performed ({} individual testcases)".format( "{} tests performed ({} individual testcases)".format(
test_count.value, testcase_count.value test_count.value, testcase_count.value
) )
) )
print("{} tests passed".format(passed_count.value)) print("{} tests passed".format(len(passed_tests)))
skipped_tests = sorted(skipped_tests.value)
if len(skipped_tests) > 0: if len(skipped_tests) > 0:
print("{} tests skipped: {}".format(len(skipped_tests), " ".join(skipped_tests))) print(
failed_tests = sorted(failed_tests.value) "{} tests skipped: {}".format(
len(skipped_tests), " ".join(test[0] for test in skipped_tests)
)
)
if len(failed_tests) > 0:
print(
"{} tests failed: {}".format(
len(failed_tests), " ".join(test[0] for test in failed_tests)
)
)
# Serialize regex added by append_filter. # Serialize regex added by append_filter.
def to_json(obj): def to_json(obj):
@@ -1055,21 +1068,18 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
with open(os.path.join(result_dir, RESULTS_FILE), "w") as f: with open(os.path.join(result_dir, RESULTS_FILE), "w") as f:
json.dump( json.dump(
{"args": vars(args), "failed_tests": [test[1] for test in failed_tests]}, {
"args": vars(args),
"passed_tests": [test[1] for test in passed_tests],
"skipped_tests": [test[1] for test in skipped_tests],
"failed_tests": [test[1] for test in failed_tests],
},
f, f,
default=to_json, default=to_json,
) )
if len(failed_tests) > 0: # Return True only if all tests succeeded.
print( return len(failed_tests) == 0
"{} tests failed: {}".format(
len(failed_tests), " ".join(test[0] for test in failed_tests)
)
)
return False
# all tests succeeded
return True
class append_filter(argparse.Action): class append_filter(argparse.Action):