diff options
author | Heejin Ahn <aheejin@gmail.com> | 2019-11-20 16:37:09 -0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2019-11-20 16:37:09 -0800 |
commit | 21888c253f775047bcfac28e8110abdcad9d6bcb (patch) | |
tree | 18b7cf5d6b76a554401d7edb3601fe7be9ae8739 /check.py | |
parent | f17f8f927c96c2e6ec25b4c2dbb8b9d70f7af9b7 (diff) | |
download | binaryen-21888c253f775047bcfac28e8110abdcad9d6bcb.tar.gz binaryen-21888c253f775047bcfac28e8110abdcad9d6bcb.tar.bz2 binaryen-21888c253f775047bcfac28e8110abdcad9d6bcb.zip |
Simplify test scripts (NFC) (#2457)
This makes test scripts simpler by reducing loop depths and extracting
repeating code into methods or variables.
- `get_tests` returns a list of tests with specified extensions. This
includes files with a full path rather than just file names.
- Reduces loop depths by using early exits and `get_tests`.
Diffstat (limited to 'check.py')
-rwxr-xr-x | check.py | 485 |
1 files changed, 230 insertions, 255 deletions
@@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import glob import os import shutil import subprocess @@ -27,9 +28,9 @@ from scripts.test.shared import ( WASM_CTOR_EVAL, WASM_OPT, WASM_SHELL, WASM_METADCE, WASM_DIS, WASM_REDUCE, binary_format_check, delete_from_orbit, fail, fail_with_error, fail_if_not_identical, fail_if_not_contained, has_vanilla_emcc, - has_vanilla_llvm, minify_check, options, tests, requested, warnings, + has_vanilla_llvm, minify_check, options, requested, warnings, has_shell_timeout, fail_if_not_identical_to_file, with_pass_debug, - validate_binary + validate_binary, get_test_dir, get_tests ) # For shared.num_failures. Cannot import directly because modifications made in @@ -101,175 +102,156 @@ def run_wasm_opt_tests(): print('\n[ checking wasm-opt passes... ]\n') - for t in sorted(os.listdir(os.path.join(options.binaryen_test, 'passes'))): - if t.endswith(('.wast', '.wasm')): - print('..', t) - binary = '.wasm' in t - base = os.path.basename(t).replace('.wast', '').replace('.wasm', '') - passname = base - if passname.isdigit(): - passname = open(os.path.join(options.binaryen_test, 'passes', passname + '.passes')).read().strip() - opts = [('--' + p if not p.startswith('O') else '-' + p) for p in passname.split('_')] - t = os.path.join(options.binaryen_test, 'passes', t) - actual = '' - for module, asserts in split_wast(t): - assert len(asserts) == 0 - write_wast('split.wast', module) - cmd = WASM_OPT + opts + ['split.wast', '--print'] - curr = run_command(cmd) - actual += curr - # also check debug mode output is valid - debugged = run_command(cmd + ['--debug'], stderr=subprocess.PIPE) - fail_if_not_contained(actual, debugged) - - # also check pass-debug mode - def check(): - pass_debug = run_command(cmd) - fail_if_not_identical(curr, pass_debug) - with_pass_debug(check) - - expected_file = os.path.join(options.binaryen_test, 'passes', - base + ('.bin' if binary else '') + '.txt') - fail_if_not_identical_to_file(actual, expected_file) - - if 'emit-js-wrapper' in t: - with open('a.js') as actual: - fail_if_not_identical_to_file(actual.read(), t + '.js') - if 'emit-spec-wrapper' in t: - with open('a.wat') as actual: - fail_if_not_identical_to_file(actual.read(), t + '.wat') + for t in get_tests(get_test_dir('passes'), ['.wast', '.wasm']): + print('..', os.path.basename(t)) + binary = '.wasm' in t + base = os.path.basename(t).replace('.wast', '').replace('.wasm', '') + passname = base + if passname.isdigit(): + passname = open(os.path.join(get_test_dir('passes'), passname + '.passes')).read().strip() + opts = [('--' + p if not p.startswith('O') else '-' + p) for p in passname.split('_')] + actual = '' + for module, asserts in split_wast(t): + assert len(asserts) == 0 + write_wast('split.wast', module) + cmd = WASM_OPT + opts + ['split.wast', '--print'] + curr = run_command(cmd) + actual += curr + # also check debug mode output is valid + debugged = run_command(cmd + ['--debug'], stderr=subprocess.PIPE) + fail_if_not_contained(actual, debugged) + + # also check pass-debug mode + def check(): + pass_debug = run_command(cmd) + fail_if_not_identical(curr, pass_debug) + with_pass_debug(check) + + expected_file = os.path.join(get_test_dir('passes'), base + ('.bin' if binary else '') + '.txt') + fail_if_not_identical_to_file(actual, expected_file) + + if 'emit-js-wrapper' in t: + with open('a.js') as actual: + fail_if_not_identical_to_file(actual.read(), t + '.js') + if 'emit-spec-wrapper' in t: + with open('a.wat') as actual: + fail_if_not_identical_to_file(actual.read(), t + '.wat') print('\n[ checking wasm-opt parsing & printing... ]\n') - for t in sorted(os.listdir(os.path.join(options.binaryen_test, 'print'))): - if t.endswith('.wast'): - print('..', t) - wasm = os.path.basename(t).replace('.wast', '') - cmd = WASM_OPT + [os.path.join(options.binaryen_test, 'print', t), '--print', '-all'] - print(' ', ' '.join(cmd)) - actual, err = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True).communicate() - expected_file = os.path.join(options.binaryen_test, 'print', wasm + '.txt') - fail_if_not_identical_to_file(actual, expected_file) - cmd = WASM_OPT + [os.path.join(options.binaryen_test, 'print', t), '--print-minified', '-all'] - print(' ', ' '.join(cmd)) - actual, err = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True).communicate() - fail_if_not_identical(actual.strip(), open(os.path.join(options.binaryen_test, 'print', wasm + '.minified.txt')).read().strip()) + for t in get_tests(get_test_dir('print'), ['.wast']): + print('..', os.path.basename(t)) + wasm = os.path.basename(t).replace('.wast', '') + cmd = WASM_OPT + [t, '--print', '-all'] + print(' ', ' '.join(cmd)) + actual, err = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True).communicate() + expected_file = os.path.join(get_test_dir('print'), wasm + '.txt') + fail_if_not_identical_to_file(actual, expected_file) + cmd = WASM_OPT + [os.path.join(get_test_dir('print'), t), '--print-minified', '-all'] + print(' ', ' '.join(cmd)) + actual, err = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True).communicate() + fail_if_not_identical(actual.strip(), open(os.path.join(get_test_dir('print'), wasm + '.minified.txt')).read().strip()) print('\n[ checking wasm-opt testcases... ]\n') - for t in tests: - if t.endswith('.wast') and not t.startswith('spec'): - print('..', t) - t = os.path.join(options.binaryen_test, t) - f = t + '.from-wast' - cmd = WASM_OPT + [t, '--print', '-all'] - actual = run_command(cmd) - actual = actual.replace('printing before:\n', '') + for t in get_tests(options.binaryen_test, ['.wast']): + print('..', os.path.basename(t)) + f = t + '.from-wast' + cmd = WASM_OPT + [t, '--print', '-all'] + actual = run_command(cmd) + actual = actual.replace('printing before:\n', '') - fail_if_not_identical_to_file(actual, f) + fail_if_not_identical_to_file(actual, f) - binary_format_check(t, wasm_as_args=['-g']) # test with debuginfo - binary_format_check(t, wasm_as_args=[], binary_suffix='.fromBinary.noDebugInfo') # test without debuginfo + binary_format_check(t, wasm_as_args=['-g']) # test with debuginfo + binary_format_check(t, wasm_as_args=[], binary_suffix='.fromBinary.noDebugInfo') # test without debuginfo - minify_check(t) + minify_check(t) print('\n[ checking wasm-opt debugInfo read-write... ]\n') - test_dir = os.path.join(options.binaryen_root, 'test') - for t in os.listdir(test_dir): - if t.endswith('.fromasm') and 'debugInfo' in t: - print('..', t) - t = os.path.join(test_dir, t) - f = t + '.read-written' - run_command(WASM_AS + [t, '--source-map=a.map', '-o', 'a.wasm', '-g']) - run_command(WASM_OPT + ['a.wasm', '--input-source-map=a.map', '-o', 'b.wasm', '--output-source-map=b.map', '-g']) - actual = run_command(WASM_DIS + ['b.wasm', '--source-map=b.map']) - fail_if_not_identical_to_file(actual, f) + for t in get_tests(options.binaryen_test, ['.fromasm']): + if 'debugInfo' not in t: + continue + print('..', os.path.basename(t)) + f = t + '.read-written' + run_command(WASM_AS + [t, '--source-map=a.map', '-o', 'a.wasm', '-g']) + run_command(WASM_OPT + ['a.wasm', '--input-source-map=a.map', '-o', 'b.wasm', '--output-source-map=b.map', '-g']) + actual = run_command(WASM_DIS + ['b.wasm', '--source-map=b.map']) + fail_if_not_identical_to_file(actual, f) def run_wasm_dis_tests(): print('\n[ checking wasm-dis on provided binaries... ]\n') - for t in tests: - if t.endswith('.wasm') and not t.startswith('spec'): - print('..', t) - t = os.path.join(options.binaryen_test, t) - cmd = WASM_DIS + [t] - if os.path.isfile(t + '.map'): - cmd += ['--source-map', t + '.map'] + for t in get_tests(options.binaryen_test, ['.wasm']): + print('..', os.path.basename(t)) + cmd = WASM_DIS + [t] + if os.path.isfile(t + '.map'): + cmd += ['--source-map', t + '.map'] - actual = run_command(cmd) - fail_if_not_identical_to_file(actual, t + '.fromBinary') + actual = run_command(cmd) + fail_if_not_identical_to_file(actual, t + '.fromBinary') - # also verify there are no validation errors - def check(): - cmd = WASM_OPT + [t, '-all'] - run_command(cmd) + # also verify there are no validation errors + def check(): + cmd = WASM_OPT + [t, '-all'] + run_command(cmd) - with_pass_debug(check) + with_pass_debug(check) - validate_binary(t) + validate_binary(t) def run_crash_tests(): print("\n[ checking we don't crash on tricky inputs... ]\n") - test_dir = os.path.join(options.binaryen_test, 'crash') - for t in os.listdir(test_dir): - if t.endswith(('.wast', '.wasm')): - print('..', t) - t = os.path.join(test_dir, t) - cmd = WASM_OPT + [t] - # expect a parse error to be reported - run_command(cmd, expected_err='parse exception:', err_contains=True, expected_status=1) + for t in get_tests(get_test_dir('crash'), ['.wast', '.wasm']): + print('..', os.path.basename(t)) + cmd = WASM_OPT + [t] + # expect a parse error to be reported + run_command(cmd, expected_err='parse exception:', err_contains=True, expected_status=1) def run_dylink_tests(): print("\n[ we emit dylink sections properly... ]\n") - for t in os.listdir(options.binaryen_test): - if t.startswith('dylib') and t.endswith('.wasm'): - print('..', t) - t = os.path.join(options.binaryen_test, t) - cmd = WASM_OPT + [t, '-o', 'a.wasm'] - run_command(cmd) - with open('a.wasm', 'rb') as output: - index = output.read().find(b'dylink') - print(' ', index) - assert index == 11, 'dylink section must be first, right after the magic number etc.' + dylink_tests = glob.glob(os.path.join(options.binaryen_test, 'dylib*.wasm')) + for t in sorted(dylink_tests): + print('..', os.path.basename(t)) + cmd = WASM_OPT + [t, '-o', 'a.wasm'] + run_command(cmd) + with open('a.wasm', 'rb') as output: + index = output.read().find(b'dylink') + print(' ', index) + assert index == 11, 'dylink section must be first, right after the magic number etc.' def run_ctor_eval_tests(): print('\n[ checking wasm-ctor-eval... ]\n') - test_dir = os.path.join(options.binaryen_test, 'ctor-eval') - for t in os.listdir(test_dir): - if t.endswith(('.wast', '.wasm')): - print('..', t) - t = os.path.join(test_dir, t) - ctors = open(t + '.ctors').read().strip() - cmd = WASM_CTOR_EVAL + [t, '-o', 'a.wast', '-S', '--ctors', ctors] - run_command(cmd) - actual = open('a.wast').read() - out = t + '.out' - fail_if_not_identical_to_file(actual, out) + for t in get_tests(get_test_dir('ctor-eval'), ['.wast', '.wasm']): + print('..', os.path.basename(t)) + ctors = open(t + '.ctors').read().strip() + cmd = WASM_CTOR_EVAL + [t, '-o', 'a.wast', '-S', '--ctors', ctors] + run_command(cmd) + actual = open('a.wast').read() + out = t + '.out' + fail_if_not_identical_to_file(actual, out) def run_wasm_metadce_tests(): print('\n[ checking wasm-metadce ]\n') - test_dir = os.path.join(options.binaryen_test, 'metadce') - for t in os.listdir(test_dir): - if t.endswith(('.wast', '.wasm')): - print('..', t) - t = os.path.join(test_dir, t) - graph = t + '.graph.txt' - cmd = WASM_METADCE + [t, '--graph-file=' + graph, '-o', 'a.wast', '-S', '-all'] - stdout = run_command(cmd) - expected = t + '.dced' - with open('a.wast') as seen: - fail_if_not_identical_to_file(seen.read(), expected) - fail_if_not_identical_to_file(stdout, expected + '.stdout') + for t in get_tests(get_test_dir('metadce'), ['.wast', '.wasm']): + print('..', os.path.basename(t)) + graph = t + '.graph.txt' + cmd = WASM_METADCE + [t, '--graph-file=' + graph, '-o', 'a.wast', '-S', '-all'] + stdout = run_command(cmd) + expected = t + '.dced' + with open('a.wast') as seen: + fail_if_not_identical_to_file(seen.read(), expected) + fail_if_not_identical_to_file(stdout, expected + '.stdout') def run_wasm_reduce_tests(): @@ -280,18 +262,15 @@ def run_wasm_reduce_tests(): print('\n[ checking wasm-reduce testcases]\n') # fixed testcases - test_dir = os.path.join(options.binaryen_test, 'reduce') - for t in os.listdir(test_dir): - if t.endswith('.wast'): - print('..', t) - t = os.path.join(test_dir, t) - # convert to wasm - run_command(WASM_AS + [t, '-o', 'a.wasm']) - run_command(WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec -all' % WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm', '--timeout=4']) - expected = t + '.txt' - run_command(WASM_DIS + ['c.wasm', '-o', 'a.wast']) - with open('a.wast') as seen: - fail_if_not_identical_to_file(seen.read(), expected) + for t in get_tests(get_test_dir('reduce'), ['.wast']): + print('..', os.path.basename(t)) + # convert to wasm + run_command(WASM_AS + [t, '-o', 'a.wasm']) + run_command(WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec -all' % WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm', '--timeout=4']) + expected = t + '.txt' + run_command(WASM_DIS + ['c.wasm', '-o', 'a.wast']) + with open('a.wast') as seen: + fail_if_not_identical_to_file(seen.read(), expected) # run on a nontrivial fuzz testcase, for general coverage # this is very slow in ThreadSanitizer, so avoid it there @@ -314,127 +293,125 @@ def run_spec_tests(): # FIXME we support old and new memory formats, for now, until 0xc, and so can't pass this old-style test. BLACKLIST = ['binary.wast'] # FIXME to update the spec to 0xd, we need to implement (register "name") for import.wast - spec_tests = [os.path.join('spec', t) for t in sorted(os.listdir(os.path.join(options.binaryen_test, 'spec'))) if t not in BLACKLIST] + spec_tests = get_tests(get_test_dir('spec'), ['.wast']) + spec_tests = [t for t in spec_tests if os.path.basename(t) not in BLACKLIST] else: spec_tests = options.spec_tests[:] - for t in spec_tests: - if t.startswith('spec') and t.endswith('.wast'): - print('..', t) - wast = os.path.join(options.binaryen_test, t) + for wast in spec_tests: + print('..', os.path.basename(wast)) - # skip checks for some tests - if os.path.basename(wast) in ['linking.wast', 'nop.wast', 'stack.wast', 'typecheck.wast', 'unwind.wast']: # FIXME - continue + # skip checks for some tests + if os.path.basename(wast) in ['linking.wast', 'nop.wast', 'stack.wast', 'typecheck.wast', 'unwind.wast']: # FIXME + continue + + def run_spec_test(wast): + cmd = WASM_SHELL + [wast] + # we must skip the stack machine portions of spec tests or apply other extra args + extra = {} + cmd = cmd + (extra.get(os.path.basename(wast)) or []) + return run_command(cmd, stderr=subprocess.PIPE) + + def run_opt_test(wast): + # check optimization validation + cmd = WASM_OPT + [wast, '-O', '-all'] + run_command(cmd) - def run_spec_test(wast): - cmd = WASM_SHELL + [wast] - # we must skip the stack machine portions of spec tests or apply other extra args - extra = { - } - cmd = cmd + (extra.get(os.path.basename(wast)) or []) - return run_command(cmd, stderr=subprocess.PIPE) - - def run_opt_test(wast): - # check optimization validation - cmd = WASM_OPT + [wast, '-O', '-all'] - run_command(cmd) - - def check_expected(actual, expected): - if expected and os.path.exists(expected): - expected = open(expected).read() - - # fix it up, our pretty (i32.const 83) must become compared to a homely 83 : i32 - def fix_expected(x): - x = x.strip() - if not x: - return x - v, t = x.split(' : ') - if v.endswith('.'): - v = v[:-1] # remove trailing '.' - return '(' + t + '.const ' + v + ')' - - def fix_actual(x): - if '[trap ' in x: - return '' + def check_expected(actual, expected): + if expected and os.path.exists(expected): + expected = open(expected).read() + + # fix it up, our pretty (i32.const 83) must become compared to a homely 83 : i32 + def fix_expected(x): + x = x.strip() + if not x: return x + v, t = x.split(' : ') + if v.endswith('.'): + v = v[:-1] # remove trailing '.' + return '(' + t + '.const ' + v + ')' + + def fix_actual(x): + if '[trap ' in x: + return '' + return x + + expected = '\n'.join(map(fix_expected, expected.split('\n'))) + actual = '\n'.join(map(fix_actual, actual.split('\n'))) + print(' (using expected output)') + actual = actual.strip() + expected = expected.strip() + if actual != expected: + fail(actual, expected) + + expected = os.path.join(get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log') + + # some spec tests should fail (actual process failure, not just assert_invalid) + try: + actual = run_spec_test(wast) + except Exception as e: + if ('wasm-validator error' in str(e) or 'parse exception' in str(e)) and '.fail.' in os.path.basename(wast): + print('<< test failed as expected >>') + continue # don't try all the binary format stuff TODO + else: + fail_with_error(str(e)) - expected = '\n'.join(map(fix_expected, expected.split('\n'))) - actual = '\n'.join(map(fix_actual, actual.split('\n'))) - print(' (using expected output)') - actual = actual.strip() - expected = expected.strip() - if actual != expected: - fail(actual, expected) - - expected = os.path.join(options.binaryen_test, 'spec', 'expected-output', os.path.basename(wast) + '.log') - - # some spec tests should fail (actual process failure, not just assert_invalid) - try: - actual = run_spec_test(wast) - except Exception as e: - if ('wasm-validator error' in str(e) or 'parse exception' in str(e)) and '.fail.' in t: - print('<< test failed as expected >>') - continue # don't try all the binary format stuff TODO - else: - fail_with_error(str(e)) - - check_expected(actual, expected) - - # skip binary checks for tests that reuse previous modules by name, as that's a wast-only feature - if os.path.basename(wast) in ['exports.wast']: # FIXME - continue + check_expected(actual, expected) - # we must ignore some binary format splits - splits_to_skip = { - 'func.wast': [2], - 'return.wast': [2] - } - - # check binary format. here we can verify execution of the final - # result, no need for an output verification - # some wast files cannot be split: - # * comments.wast: contains characters that are not valid utf-8, - # so our string splitting code fails there - if os.path.basename(wast) not in ['comments.wast']: - split_num = 0 - actual = '' - for module, asserts in split_wast(wast): - skip = splits_to_skip.get(os.path.basename(wast)) or [] - if split_num in skip: - print(' skipping split module', split_num - 1) - split_num += 1 - continue - print(' testing split module', split_num) + # skip binary checks for tests that reuse previous modules by name, as that's a wast-only feature + if os.path.basename(wast) in ['exports.wast']: # FIXME + continue + + # we must ignore some binary format splits + splits_to_skip = { + 'func.wast': [2], + 'return.wast': [2] + } + + # check binary format. here we can verify execution of the final + # result, no need for an output verification + # some wast files cannot be split: + # * comments.wast: contains characters that are not valid utf-8, + # so our string splitting code fails there + if os.path.basename(wast) not in ['comments.wast']: + split_num = 0 + actual = '' + for module, asserts in split_wast(wast): + skip = splits_to_skip.get(os.path.basename(wast)) or [] + if split_num in skip: + print(' skipping split module', split_num - 1) split_num += 1 - write_wast('split.wast', module, asserts) - run_spec_test('split.wast') # before binary stuff - just check it's still ok split out - run_opt_test('split.wast') # also that our optimizer doesn't break on it - result_wast = binary_format_check('split.wast', verify_final_result=False, original_wast=wast) - # add the asserts, and verify that the test still passes - open(result_wast, 'a').write('\n' + '\n'.join(asserts)) - actual += run_spec_test(result_wast) - # compare all the outputs to the expected output - check_expected(actual, os.path.join(options.binaryen_test, 'spec', 'expected-output', os.path.basename(wast) + '.log')) - else: - # handle unsplittable wast files - run_spec_test(wast) + continue + print(' testing split module', split_num) + split_num += 1 + write_wast('split.wast', module, asserts) + run_spec_test('split.wast') # before binary stuff - just check it's still ok split out + run_opt_test('split.wast') # also that our optimizer doesn't break on it + result_wast = binary_format_check('split.wast', verify_final_result=False, original_wast=wast) + # add the asserts, and verify that the test still passes + open(result_wast, 'a').write('\n' + '\n'.join(asserts)) + actual += run_spec_test(result_wast) + # compare all the outputs to the expected output + check_expected(actual, os.path.join(get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log')) + else: + # handle unsplittable wast files + run_spec_test(wast) def run_validator_tests(): print('\n[ running validation tests... ]\n') # Ensure the tests validate by default - cmd = WASM_AS + [os.path.join(options.binaryen_test, 'validator', 'invalid_export.wast')] + cmd = WASM_AS + [os.path.join(get_test_dir('validator'), 'invalid_export.wast')] run_command(cmd) - cmd = WASM_AS + [os.path.join(options.binaryen_test, 'validator', 'invalid_import.wast')] + cmd = WASM_AS + [os.path.join(get_test_dir('validator'), 'invalid_import.wast')] run_command(cmd) - cmd = WASM_AS + ['--validate=web', os.path.join(options.binaryen_test, 'validator', 'invalid_export.wast')] + cmd = WASM_AS + ['--validate=web', os.path.join(get_test_dir('validator'), 'invalid_export.wast')] run_command(cmd, expected_status=1) - cmd = WASM_AS + ['--validate=web', os.path.join(options.binaryen_test, 'validator', 'invalid_import.wast')] + cmd = WASM_AS + ['--validate=web', os.path.join(get_test_dir('validator'), 'invalid_import.wast')] run_command(cmd, expected_status=1) - cmd = WASM_AS + ['--validate=none', os.path.join(options.binaryen_test, 'validator', 'invalid_return.wast')] + cmd = WASM_AS + ['--validate=none', os.path.join(get_test_dir('validator'), 'invalid_return.wast')] run_command(cmd) - cmd = WASM_AS + [os.path.join(options.binaryen_test, 'validator', 'invalid_number.wast')] + cmd = WASM_AS + [os.path.join(get_test_dir('validator'), 'invalid_number.wast')] run_command(cmd, expected_status=1) @@ -459,16 +436,14 @@ def run_vanilla_tests(): print('____' + ' '.join(command)) subprocess.check_call(command) - for c in sorted(os.listdir(os.path.join(options.binaryen_test, 'wasm_backend'))): - if not c.endswith('cpp'): - continue - print('..', c) + for c in get_tests(get_test_dir('wasm_backend'), '.cpp'): + print('..', os.path.basename(c)) base = c.replace('.cpp', '').replace('.c', '') - expected = open(os.path.join(options.binaryen_test, 'wasm_backend', base + '.txt')).read() + expected = open(os.path.join(base + '.txt')).read() for opts in [[], ['-O1'], ['-O2']]: # only my code is a hack we used early in wasm backend dev, which somehow worked, but only with -O1 only = [] if opts != ['-O1'] or '_only' not in base else ['-s', 'ONLY_MY_CODE=1'] - command = [VANILLA_EMCC, '-o', 'a.wasm.js', os.path.join(options.binaryen_test, 'wasm_backend', c)] + opts + only + command = [VANILLA_EMCC, '-o', 'a.wasm.js', c] + opts + only print('....' + ' '.join(command)) if os.path.exists('a.wasm.js'): os.unlink('a.wasm.js') @@ -492,12 +467,12 @@ def run_gcc_tests(): fail_with_error('Native compiler (e.g. gcc/g++) was not found in PATH!') return - for t in sorted(os.listdir(os.path.join(options.binaryen_test, 'example'))): + for t in sorted(os.listdir(get_test_dir('example'))): output_file = 'example' cmd = ['-I' + os.path.join(options.binaryen_root, 'src'), '-g', '-pthread', '-o', output_file] if t.endswith('.txt'): # check if there is a trace in the file, if so, we should build it - out = subprocess.check_output([os.path.join(options.binaryen_root, 'scripts', 'clean_c_api_trace.py'), os.path.join(options.binaryen_test, 'example', t)]) + out = subprocess.check_output([os.path.join(options.binaryen_root, 'scripts', 'clean_c_api_trace.py'), os.path.join(get_test_dir('example'), t)]) if len(out) == 0: print(' (no trace in ', t, ')') continue @@ -505,10 +480,10 @@ def run_gcc_tests(): src = 'trace.cpp' with open(src, 'wb') as o: o.write(out) - expected = os.path.join(options.binaryen_test, 'example', t + '.txt') + expected = os.path.join(get_test_dir('example'), t + '.txt') else: - src = os.path.join(options.binaryen_test, 'example', t) - expected = os.path.join(options.binaryen_test, 'example', '.'.join(t.split('.')[:-1]) + '.txt') + src = os.path.join(get_test_dir('example'), t) + expected = os.path.join(get_test_dir('example'), '.'.join(t.split('.')[:-1]) + '.txt') if src.endswith(('.c', '.cpp')): # build the C file separately libpath = os.path.join(os.path.dirname(options.binaryen_bin), 'lib') |