tests/run-tests.py: Change _results.json to have a combined result list.

The `_results.json` output of `run-tests.py` was recently changed in
7a55cb6b36 to add a list of passed and
skipped tests.

The way this was done turned out to be not general enough, because we want
to add another type of result, namely tests that are skipped because they
are too large.

Instead of having separate lists in `_results.json` for each kind of result
(pass, fail, skip, skip too large, etc), this commit changes the output
form of `_results.json` so that it stores a single list of 3-tuples of all
tests that were run:

    [(test_name, result, reason), ...]

That's more general and allows adding a reason for skipped and failed
tests.  At the moment this reason is just an empty string, but can be
improved in the future.

Signed-off-by: Damien George <damien@micropython.org>
This commit is contained in:
Damien George
2025-05-28 10:21:46 +10:00
parent c0111e63b3
commit 4dff9cbf1a

View File

@@ -605,9 +605,7 @@ class PyboardNodeRunner:
def run_tests(pyb, tests, args, result_dir, num_threads=1): def run_tests(pyb, tests, args, result_dir, num_threads=1):
test_count = ThreadSafeCounter() test_count = ThreadSafeCounter()
testcase_count = ThreadSafeCounter() testcase_count = ThreadSafeCounter()
passed_tests = ThreadSafeCounter([]) test_results = ThreadSafeCounter([])
failed_tests = ThreadSafeCounter([])
skipped_tests = ThreadSafeCounter([])
skip_tests = set() skip_tests = set()
skip_native = False skip_native = False
@@ -896,7 +894,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
if skip_it: if skip_it:
print("skip ", test_file) print("skip ", test_file)
skipped_tests.append((test_name, test_file)) test_results.append((test_name, test_file, "skip", ""))
return return
# Run the test on the MicroPython target. # Run the test on the MicroPython target.
@@ -911,7 +909,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
# start-up code (eg boot.py) when preparing to run the next test. # start-up code (eg boot.py) when preparing to run the next test.
pyb.read_until(1, b"raw REPL; CTRL-B to exit\r\n") pyb.read_until(1, b"raw REPL; CTRL-B to exit\r\n")
print("skip ", test_file) print("skip ", test_file)
skipped_tests.append((test_name, test_file)) test_results.append((test_name, test_file, "skip", ""))
return return
# Look at the output of the test to see if unittest was used. # Look at the output of the test to see if unittest was used.
@@ -994,7 +992,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
# Print test summary, update counters, and save .exp/.out files if needed. # Print test summary, update counters, and save .exp/.out files if needed.
if test_passed: if test_passed:
print("pass ", test_file, extra_info) print("pass ", test_file, extra_info)
passed_tests.append((test_name, test_file)) test_results.append((test_name, test_file, "pass", ""))
rm_f(filename_expected) rm_f(filename_expected)
rm_f(filename_mupy) rm_f(filename_mupy)
else: else:
@@ -1006,7 +1004,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
rm_f(filename_expected) # in case left over from previous failed run rm_f(filename_expected) # in case left over from previous failed run
with open(filename_mupy, "wb") as f: with open(filename_mupy, "wb") as f:
f.write(output_mupy) f.write(output_mupy)
failed_tests.append((test_name, test_file)) test_results.append((test_name, test_file, "fail", ""))
test_count.increment() test_count.increment()
@@ -1035,9 +1033,10 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
print(line) print(line)
sys.exit(1) sys.exit(1)
passed_tests = sorted(passed_tests.value) test_results = test_results.value
skipped_tests = sorted(skipped_tests.value) passed_tests = list(r for r in test_results if r[2] == "pass")
failed_tests = sorted(failed_tests.value) skipped_tests = list(r for r in test_results if r[2] == "skip")
failed_tests = list(r for r in test_results if r[2] == "fail")
print( print(
"{} tests performed ({} individual testcases)".format( "{} tests performed ({} individual testcases)".format(
@@ -1069,9 +1068,11 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
with open(os.path.join(result_dir, RESULTS_FILE), "w") as f: with open(os.path.join(result_dir, RESULTS_FILE), "w") as f:
json.dump( json.dump(
{ {
# The arguments passed on the command-line.
"args": vars(args), "args": vars(args),
"passed_tests": [test[1] for test in passed_tests], # A list of all results of the form [(test, result, reason), ...].
"skipped_tests": [test[1] for test in skipped_tests], "results": list(test[1:] for test in test_results),
# A list of failed tests. This is deprecated, use the "results" above instead.
"failed_tests": [test[1] for test in failed_tests], "failed_tests": [test[1] for test in failed_tests],
}, },
f, f,
@@ -1248,7 +1249,7 @@ the last matching regex is used:
results_file = os.path.join(args.result_dir, RESULTS_FILE) results_file = os.path.join(args.result_dir, RESULTS_FILE)
if os.path.exists(results_file): if os.path.exists(results_file):
with open(results_file, "r") as f: with open(results_file, "r") as f:
tests = json.load(f)["failed_tests"] tests = list(test[0] for test in json.load(f)["results"] if test[1] == "fail")
else: else:
tests = [] tests = []
elif len(args.files) == 0: elif len(args.files) == 0: