tests/run-tests.py: Remove --write-exp and --list-tests options.

Removing the now-unused (see previous commit for details) `--write-exp` and
`--list-tests` options helps to simplify the rather complex logic in
`run-tests.py`.

Signed-off-by: Damien George <damien@micropython.org>
This commit is contained in:
Damien George
2024-09-04 17:13:55 +10:00
parent 067ef81cd0
commit 1be38e8077
3 changed files with 4 additions and 198 deletions

View File

@@ -1,94 +0,0 @@
#
# This is minimal MicroPython variant of run-tests.py script, which uses
# .exp files as generated by run-tests.py --write-exp. It is useful to run
# testsuite on systems which have neither CPython3 nor unix shell.
# This script is intended to be run by the same interpreter executable
# which is to be tested, so should use minimal language functionality.
#
import sys
import os
tests = ["basics", "micropython", "float", "import", "io", " misc", "unicode", "extmod", "unix"]
if sys.platform == "win32":
MICROPYTHON = "micropython.exe"
else:
MICROPYTHON = "micropython"
def should_skip(test):
if test.startswith("native"):
return True
if test.startswith("viper"):
return True
test_count = 0
passed_count = 0
skip_count = 0
for suite in tests:
# print("Running in: %s" % suite)
if sys.platform == "win32":
# dir /b prints only contained filenames, one on a line
# http://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/dir.mspx
r = os.system("dir /b %s/*.py >tests.lst" % suite)
else:
r = os.system("ls %s/*.py | xargs -n1 basename >tests.lst" % suite)
assert r == 0
with open("tests.lst") as f:
testcases = f.readlines()
testcases = [l[:-1] for l in testcases]
assert testcases, "No tests found in dir '%s', which is implausible" % suite
# print(testcases)
for t in testcases:
if t == "native_check.py":
continue
qtest = "%s/%s" % (suite, t)
if should_skip(t):
print("skip " + qtest)
skip_count += 1
continue
exp = None
try:
f = open(qtest + ".exp")
exp = f.read()
f.close()
except OSError:
pass
if exp is not None:
# print("run " + qtest)
r = os.system(MICROPYTHON + " %s >.tst.out" % qtest)
if r == 0:
f = open(".tst.out")
out = f.read()
f.close()
else:
out = "CRASH"
if out == "SKIP\n":
print("skip " + qtest)
skip_count += 1
else:
if out == exp:
print("pass " + qtest)
passed_count += 1
else:
print("FAIL " + qtest)
test_count += 1
else:
skip_count += 1
print("%s tests performed" % test_count)
print("%s tests passed" % passed_count)
if test_count != passed_count:
print("%s tests failed" % (test_count - passed_count))
if skip_count:
print("%s tests skipped" % skip_count)

View File

@@ -1,73 +0,0 @@
#!/bin/sh
#
# This is plain shell variant of run-tests.py script, which uses .exp files
# as generated by run-tests.py --write-exp. It is useful to run testsuite
# on embedded systems which don't have CPython3.
#
RM="rm -f"
MP_PY=micropython
numtests=0
numtestcases=0
numpassed=0
numskipped=0
numfailed=0
nameskipped=
namefailed=
if [ $# -eq 0 ]
then
tests="basics/*.py micropython/*.py float/*.py import/*.py io/*.py misc/*.py unicode/*.py extmod/*.py unix/*.py"
else
tests="$@"
fi
for infile in $tests
do
basename=`basename $infile .py`
outfile=${basename}.py.out
expfile=$infile.exp
$MP_PY $infile > $outfile
numtestcases=$(expr $numtestcases + $(cat $expfile | wc -l))
if grep -q "SKIP\|SyntaxError: invalid micropython decorator" $outfile
then
# we don't count tests that explicitly ask to be skipped
# we don't count tests that fail due to unsupported decorator
echo "skip $infile"
$RM $outfile
numskipped=$(expr $numskipped + 1)
nameskipped="$nameskipped $basename"
else
diff --brief $expfile $outfile > /dev/null
if [ $? -eq 0 ]
then
echo "pass $infile"
$RM $outfile
numpassed=$(expr $numpassed + 1)
else
echo "FAIL $infile"
numfailed=$(expr $numfailed + 1)
namefailed="$namefailed $basename"
fi
fi
numtests=$(expr $numtests + 1)
done
echo "$numtests tests performed ($numtestcases individual testcases)"
echo "$numpassed tests passed"
if [ $numskipped != 0 ]
then
echo "$numskipped tests skipped -$nameskipped"
fi
if [ $numfailed != 0 ]
then
echo "$numfailed tests failed -$namefailed"
exit 1
else
exit 0
fi

View File

@@ -465,9 +465,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
upy_float_precision = 32 upy_float_precision = 32
# If we're asked to --list-tests, we can't assume that there's a if True:
# connection to target, so we can't run feature checks usefully.
if not (args.list_tests or args.write_exp):
# Even if we run completely different tests in a different directory, # Even if we run completely different tests in a different directory,
# we need to access feature_checks from the same directory as the # we need to access feature_checks from the same directory as the
# run-tests.py script itself so use base_path. # run-tests.py script itself so use base_path.
@@ -797,11 +795,6 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
skip_it |= skip_io_module and is_io_module skip_it |= skip_io_module and is_io_module
skip_it |= skip_fstring and is_fstring skip_it |= skip_fstring and is_fstring
if args.list_tests:
if not skip_it:
print(test_file)
return
if skip_it: if skip_it:
print("skip ", test_file) print("skip ", test_file)
skipped_tests.append(test_name) skipped_tests.append(test_name)
@@ -821,18 +814,12 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
cwd=os.path.dirname(test_file), cwd=os.path.dirname(test_file),
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
) )
if args.write_exp:
with open(test_file_expected, "wb") as f:
f.write(output_expected)
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
output_expected = b"CPYTHON3 CRASH" output_expected = b"CPYTHON3 CRASH"
# canonical form for all host platforms is to use \n for end-of-line # canonical form for all host platforms is to use \n for end-of-line
output_expected = output_expected.replace(b"\r\n", b"\n") output_expected = output_expected.replace(b"\r\n", b"\n")
if args.write_exp:
return
# run MicroPython # run MicroPython
output_mupy = run_micropython(pyb, args, test_file, test_file_abspath) output_mupy = run_micropython(pyb, args, test_file, test_file_abspath)
@@ -861,7 +848,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
test_count.increment() test_count.increment()
if pyb or args.list_tests: if pyb:
num_threads = 1 num_threads = 1
if num_threads > 1: if num_threads > 1:
@@ -871,10 +858,6 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
for test in tests: for test in tests:
run_one_test(test) run_one_test(test)
# Leave RESULTS_FILE untouched here for future runs.
if args.list_tests:
return True
print( print(
"{} tests performed ({} individual testcases)".format( "{} tests performed ({} individual testcases)".format(
test_count.value, testcase_count.value test_count.value, testcase_count.value
@@ -983,14 +966,6 @@ the last matching regex is used:
dest="filters", dest="filters",
help="include test by regex on path/name.py", help="include test by regex on path/name.py",
) )
cmd_parser.add_argument(
"--write-exp",
action="store_true",
help="use CPython to generate .exp files to run tests w/o CPython",
)
cmd_parser.add_argument(
"--list-tests", action="store_true", help="list tests instead of running them"
)
cmd_parser.add_argument( cmd_parser.add_argument(
"--emit", default="bytecode", help="MicroPython emitter to use (bytecode or native)" "--emit", default="bytecode", help="MicroPython emitter to use (bytecode or native)"
) )
@@ -1062,9 +1037,7 @@ the last matching regex is used:
"rp2", "rp2",
"zephyr", "zephyr",
) )
if args.list_tests: if args.target in LOCAL_TARGETS:
pyb = None
elif args.target in LOCAL_TARGETS:
pyb = None pyb = None
if args.target == "webassembly": if args.target == "webassembly":
pyb = PyboardNodeRunner() pyb = PyboardNodeRunner()
@@ -1080,7 +1053,7 @@ the last matching regex is used:
raise ValueError("target must be one of %s" % ", ".join(LOCAL_TARGETS + EXTERNAL_TARGETS)) raise ValueError("target must be one of %s" % ", ".join(LOCAL_TARGETS + EXTERNAL_TARGETS))
# Automatically detect the native architecture for mpy-cross if not given. # Automatically detect the native architecture for mpy-cross if not given.
if not (args.list_tests or args.write_exp) and not args.mpy_cross_flags: if not args.mpy_cross_flags:
output = run_feature_check(pyb, args, "target_info.py") output = run_feature_check(pyb, args, "target_info.py")
arch = str(output, "ascii").strip() arch = str(output, "ascii").strip()
if arch != "None": if arch != "None":