diff options
Diffstat (limited to 'check.py')
-rwxr-xr-x | check.py | 287 |
1 files changed, 138 insertions, 149 deletions
@@ -22,35 +22,24 @@ import sys import unittest from collections import OrderedDict -from scripts.test.support import run_command, split_wast, write_wast -from scripts.test.shared import ( - BIN_DIR, NATIVECC, NATIVEXX, NODEJS, WASM_AS, - WASM_CTOR_EVAL, WASM_OPT, WASM_SHELL, WASM_METADCE, WASM_DIS, WASM_REDUCE, - binary_format_check, delete_from_orbit, fail, fail_with_error, - fail_if_not_identical, fail_if_not_contained, has_vanilla_emcc, - has_vanilla_llvm, minify_check, options, requested, warnings, - has_shell_timeout, fail_if_not_identical_to_file, with_pass_debug, - validate_binary, get_test_dir, get_tests -) - -# For shared.num_failures. Cannot import directly because modifications made in -# shared.py would not affect the version imported here. -from scripts.test import shared from scripts.test import asm2wasm +from scripts.test import binaryenjs from scripts.test import lld +from scripts.test import shared +from scripts.test import support from scripts.test import wasm2js -from scripts.test import binaryenjs -if options.interpreter: - print('[ using wasm interpreter at "%s" ]' % options.interpreter) - assert os.path.exists(options.interpreter), 'interpreter not found' + +if shared.options.interpreter: + print('[ using wasm interpreter at "%s" ]' % shared.options.interpreter) + assert os.path.exists(shared.options.interpreter), 'interpreter not found' def run_help_tests(): print('[ checking --help is useful... ]\n') not_executable_suffix = ['.txt', '.js', '.ilk', '.pdb', '.dll', '.wasm', '.manifest'] - bin_files = [os.path.join(options.binaryen_bin, f) for f in os.listdir(options.binaryen_bin)] + bin_files = [os.path.join(shared.options.binaryen_bin, f) for f in os.listdir(shared.options.binaryen_bin)] executables = [f for f in bin_files if os.path.isfile(f) and not any(f.endswith(s) for s in not_executable_suffix)] executables = sorted(executables) assert len(executables) @@ -83,144 +72,144 @@ def run_wasm_opt_tests(): print('\n[ checking wasm-opt -o notation... ]\n') for extra_args in [[], ['--no-validation']]: - wast = os.path.join(options.binaryen_test, 'hello_world.wast') - delete_from_orbit('a.wast') + wast = os.path.join(shared.options.binaryen_test, 'hello_world.wast') + shared.delete_from_orbit('a.wast') out = 'a.wast' - cmd = WASM_OPT + [wast, '-o', out, '-S'] + extra_args - run_command(cmd) - fail_if_not_identical_to_file(open(out).read(), wast) + cmd = shared.WASM_OPT + [wast, '-o', out, '-S'] + extra_args + support.run_command(cmd) + shared.fail_if_not_identical_to_file(open(out).read(), wast) print('\n[ checking wasm-opt binary reading/writing... ]\n') - shutil.copyfile(os.path.join(options.binaryen_test, 'hello_world.wast'), 'a.wast') - delete_from_orbit('a.wasm') - delete_from_orbit('b.wast') - run_command(WASM_OPT + ['a.wast', '-o', 'a.wasm']) + shutil.copyfile(os.path.join(shared.options.binaryen_test, 'hello_world.wast'), 'a.wast') + shared.delete_from_orbit('a.wasm') + shared.delete_from_orbit('b.wast') + support.run_command(shared.WASM_OPT + ['a.wast', '-o', 'a.wasm']) assert open('a.wasm', 'rb').read()[0] == 0, 'we emit binary by default' - run_command(WASM_OPT + ['a.wasm', '-o', 'b.wast', '-S']) + support.run_command(shared.WASM_OPT + ['a.wasm', '-o', 'b.wast', '-S']) assert open('b.wast', 'rb').read()[0] != 0, 'we emit text with -S' print('\n[ checking wasm-opt passes... ]\n') - for t in get_tests(get_test_dir('passes'), ['.wast', '.wasm']): + for t in shared.get_tests(shared.get_test_dir('passes'), ['.wast', '.wasm']): print('..', os.path.basename(t)) binary = '.wasm' in t base = os.path.basename(t).replace('.wast', '').replace('.wasm', '') passname = base if passname.isdigit(): - passname = open(os.path.join(get_test_dir('passes'), passname + '.passes')).read().strip() + passname = open(os.path.join(shared.get_test_dir('passes'), passname + '.passes')).read().strip() opts = [('--' + p if not p.startswith('O') else '-' + p) for p in passname.split('_')] actual = '' - for module, asserts in split_wast(t): + for module, asserts in support.split_wast(t): assert len(asserts) == 0 - write_wast('split.wast', module) - cmd = WASM_OPT + opts + ['split.wast', '--print'] - curr = run_command(cmd) + support.write_wast('split.wast', module) + cmd = shared.WASM_OPT + opts + ['split.wast', '--print'] + curr = support.run_command(cmd) actual += curr # also check debug mode output is valid - debugged = run_command(cmd + ['--debug'], stderr=subprocess.PIPE) - fail_if_not_contained(actual, debugged) + debugged = support.run_command(cmd + ['--debug'], stderr=subprocess.PIPE) + shared.fail_if_not_contained(actual, debugged) # also check pass-debug mode def check(): - pass_debug = run_command(cmd) - fail_if_not_identical(curr, pass_debug) - with_pass_debug(check) + pass_debug = support.run_command(cmd) + shared.fail_if_not_identical(curr, pass_debug) + shared.with_pass_debug(check) - expected_file = os.path.join(get_test_dir('passes'), base + ('.bin' if binary else '') + '.txt') - fail_if_not_identical_to_file(actual, expected_file) + expected_file = os.path.join(shared.get_test_dir('passes'), base + ('.bin' if binary else '') + '.txt') + shared.fail_if_not_identical_to_file(actual, expected_file) if 'emit-js-wrapper' in t: with open('a.js') as actual: - fail_if_not_identical_to_file(actual.read(), t + '.js') + shared.fail_if_not_identical_to_file(actual.read(), t + '.js') if 'emit-spec-wrapper' in t: with open('a.wat') as actual: - fail_if_not_identical_to_file(actual.read(), t + '.wat') + shared.fail_if_not_identical_to_file(actual.read(), t + '.wat') print('\n[ checking wasm-opt parsing & printing... ]\n') - for t in get_tests(get_test_dir('print'), ['.wast']): + for t in shared.get_tests(shared.get_test_dir('print'), ['.wast']): print('..', os.path.basename(t)) wasm = os.path.basename(t).replace('.wast', '') - cmd = WASM_OPT + [t, '--print', '-all'] + cmd = shared.WASM_OPT + [t, '--print', '-all'] print(' ', ' '.join(cmd)) actual, err = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True).communicate() - expected_file = os.path.join(get_test_dir('print'), wasm + '.txt') - fail_if_not_identical_to_file(actual, expected_file) - cmd = WASM_OPT + [os.path.join(get_test_dir('print'), t), '--print-minified', '-all'] + expected_file = os.path.join(shared.get_test_dir('print'), wasm + '.txt') + shared.fail_if_not_identical_to_file(actual, expected_file) + cmd = shared.WASM_OPT + [os.path.join(shared.get_test_dir('print'), t), '--print-minified', '-all'] print(' ', ' '.join(cmd)) actual, err = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True).communicate() - fail_if_not_identical(actual.strip(), open(os.path.join(get_test_dir('print'), wasm + '.minified.txt')).read().strip()) + shared.fail_if_not_identical(actual.strip(), open(os.path.join(shared.get_test_dir('print'), wasm + '.minified.txt')).read().strip()) print('\n[ checking wasm-opt testcases... ]\n') - for t in get_tests(options.binaryen_test, ['.wast']): + for t in shared.get_tests(shared.options.binaryen_test, ['.wast']): print('..', os.path.basename(t)) f = t + '.from-wast' - cmd = WASM_OPT + [t, '--print', '-all'] - actual = run_command(cmd) + cmd = shared.WASM_OPT + [t, '--print', '-all'] + actual = support.run_command(cmd) actual = actual.replace('printing before:\n', '') - fail_if_not_identical_to_file(actual, f) + shared.fail_if_not_identical_to_file(actual, f) - binary_format_check(t, wasm_as_args=['-g']) # test with debuginfo - binary_format_check(t, wasm_as_args=[], binary_suffix='.fromBinary.noDebugInfo') # test without debuginfo + shared.binary_format_check(t, wasm_as_args=['-g']) # test with debuginfo + shared.binary_format_check(t, wasm_as_args=[], binary_suffix='.fromBinary.noDebugInfo') # test without debuginfo - minify_check(t) + shared.minify_check(t) print('\n[ checking wasm-opt debugInfo read-write... ]\n') - for t in get_tests(options.binaryen_test, ['.fromasm']): + for t in shared.get_tests(shared.options.binaryen_test, ['.fromasm']): if 'debugInfo' not in t: continue print('..', os.path.basename(t)) f = t + '.read-written' - run_command(WASM_AS + [t, '--source-map=a.map', '-o', 'a.wasm', '-g']) - run_command(WASM_OPT + ['a.wasm', '--input-source-map=a.map', '-o', 'b.wasm', '--output-source-map=b.map', '-g']) - actual = run_command(WASM_DIS + ['b.wasm', '--source-map=b.map']) - fail_if_not_identical_to_file(actual, f) + support.run_command(shared.WASM_AS + [t, '--source-map=a.map', '-o', 'a.wasm', '-g']) + support.run_command(shared.WASM_OPT + ['a.wasm', '--input-source-map=a.map', '-o', 'b.wasm', '--output-source-map=b.map', '-g']) + actual = support.run_command(shared.WASM_DIS + ['b.wasm', '--source-map=b.map']) + shared.fail_if_not_identical_to_file(actual, f) def run_wasm_dis_tests(): print('\n[ checking wasm-dis on provided binaries... ]\n') - for t in get_tests(options.binaryen_test, ['.wasm']): + for t in shared.get_tests(shared.options.binaryen_test, ['.wasm']): print('..', os.path.basename(t)) - cmd = WASM_DIS + [t] + cmd = shared.WASM_DIS + [t] if os.path.isfile(t + '.map'): cmd += ['--source-map', t + '.map'] - actual = run_command(cmd) - fail_if_not_identical_to_file(actual, t + '.fromBinary') + actual = support.run_command(cmd) + shared.fail_if_not_identical_to_file(actual, t + '.fromBinary') # also verify there are no validation errors def check(): - cmd = WASM_OPT + [t, '-all'] - run_command(cmd) + cmd = shared.WASM_OPT + [t, '-all'] + support.run_command(cmd) - with_pass_debug(check) + shared.with_pass_debug(check) - validate_binary(t) + shared.validate_binary(t) def run_crash_tests(): print("\n[ checking we don't crash on tricky inputs... ]\n") - for t in get_tests(get_test_dir('crash'), ['.wast', '.wasm']): + for t in shared.get_tests(shared.get_test_dir('crash'), ['.wast', '.wasm']): print('..', os.path.basename(t)) - cmd = WASM_OPT + [t] + cmd = shared.WASM_OPT + [t] # expect a parse error to be reported - run_command(cmd, expected_err='parse exception:', err_contains=True, expected_status=1) + support.run_command(cmd, expected_err='parse exception:', err_contains=True, expected_status=1) def run_dylink_tests(): print("\n[ we emit dylink sections properly... ]\n") - dylink_tests = glob.glob(os.path.join(options.binaryen_test, 'dylib*.wasm')) + dylink_tests = glob.glob(os.path.join(shared.options.binaryen_test, 'dylib*.wasm')) for t in sorted(dylink_tests): print('..', os.path.basename(t)) - cmd = WASM_OPT + [t, '-o', 'a.wasm'] - run_command(cmd) + cmd = shared.WASM_OPT + [t, '-o', 'a.wasm'] + support.run_command(cmd) with open('a.wasm', 'rb') as output: index = output.read().find(b'dylink') print(' ', index) @@ -230,56 +219,56 @@ def run_dylink_tests(): def run_ctor_eval_tests(): print('\n[ checking wasm-ctor-eval... ]\n') - for t in get_tests(get_test_dir('ctor-eval'), ['.wast', '.wasm']): + for t in shared.get_tests(shared.get_test_dir('ctor-eval'), ['.wast', '.wasm']): print('..', os.path.basename(t)) ctors = open(t + '.ctors').read().strip() - cmd = WASM_CTOR_EVAL + [t, '-o', 'a.wast', '-S', '--ctors', ctors] - run_command(cmd) + cmd = shared.WASM_CTOR_EVAL + [t, '-o', 'a.wast', '-S', '--ctors', ctors] + support.run_command(cmd) actual = open('a.wast').read() out = t + '.out' - fail_if_not_identical_to_file(actual, out) + shared.fail_if_not_identical_to_file(actual, out) def run_wasm_metadce_tests(): print('\n[ checking wasm-metadce ]\n') - for t in get_tests(get_test_dir('metadce'), ['.wast', '.wasm']): + for t in shared.get_tests(shared.get_test_dir('metadce'), ['.wast', '.wasm']): print('..', os.path.basename(t)) graph = t + '.graph.txt' - cmd = WASM_METADCE + [t, '--graph-file=' + graph, '-o', 'a.wast', '-S', '-all'] - stdout = run_command(cmd) + cmd = shared.WASM_METADCE + [t, '--graph-file=' + graph, '-o', 'a.wast', '-S', '-all'] + stdout = support.run_command(cmd) expected = t + '.dced' with open('a.wast') as seen: - fail_if_not_identical_to_file(seen.read(), expected) - fail_if_not_identical_to_file(stdout, expected + '.stdout') + shared.fail_if_not_identical_to_file(seen.read(), expected) + shared.fail_if_not_identical_to_file(stdout, expected + '.stdout') def run_wasm_reduce_tests(): - if not has_shell_timeout(): + if not shared.has_shell_timeout(): print('\n[ skipping wasm-reduce testcases]\n') return print('\n[ checking wasm-reduce testcases]\n') # fixed testcases - for t in get_tests(get_test_dir('reduce'), ['.wast']): + for t in shared.get_tests(shared.get_test_dir('reduce'), ['.wast']): print('..', os.path.basename(t)) # convert to wasm - run_command(WASM_AS + [t, '-o', 'a.wasm']) - run_command(WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec -all' % WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm', '--timeout=4']) + support.run_command(shared.WASM_AS + [t, '-o', 'a.wasm']) + support.run_command(shared.WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec -all' % shared.WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm', '--timeout=4']) expected = t + '.txt' - run_command(WASM_DIS + ['c.wasm', '-o', 'a.wast']) + support.run_command(shared.WASM_DIS + ['c.wasm', '-o', 'a.wast']) with open('a.wast') as seen: - fail_if_not_identical_to_file(seen.read(), expected) + shared.fail_if_not_identical_to_file(seen.read(), expected) # run on a nontrivial fuzz testcase, for general coverage # this is very slow in ThreadSanitizer, so avoid it there if 'fsanitize=thread' not in str(os.environ): print('\n[ checking wasm-reduce fuzz testcase ]\n') - run_command(WASM_OPT + [os.path.join(options.binaryen_test, 'unreachable-import_wasm-only.asm.js'), '-ttf', '-Os', '-o', 'a.wasm', '-all']) + support.run_command(shared.WASM_OPT + [os.path.join(shared.options.binaryen_test, 'unreachable-import_wasm-only.asm.js'), '-ttf', '-Os', '-o', 'a.wasm', '-all']) before = os.stat('a.wasm').st_size - run_command(WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec -all' % WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm']) + support.run_command(shared.WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec -all' % shared.WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm']) after = os.stat('c.wasm').st_size # 0.65 is a custom threshold to check if we have shrunk the output # sufficiently @@ -289,14 +278,14 @@ def run_wasm_reduce_tests(): def run_spec_tests(): print('\n[ checking wasm-shell spec testcases... ]\n') - if not options.spec_tests: + if not shared.options.spec_tests: # FIXME we support old and new memory formats, for now, until 0xc, and so can't pass this old-style test. BLACKLIST = ['binary.wast'] # FIXME to update the spec to 0xd, we need to implement (register "name") for import.wast - spec_tests = get_tests(get_test_dir('spec'), ['.wast']) + spec_tests = shared.get_tests(shared.get_test_dir('spec'), ['.wast']) spec_tests = [t for t in spec_tests if os.path.basename(t) not in BLACKLIST] else: - spec_tests = options.spec_tests[:] + spec_tests = shared.options.spec_tests[:] for wast in spec_tests: print('..', os.path.basename(wast)) @@ -306,16 +295,16 @@ def run_spec_tests(): continue def run_spec_test(wast): - cmd = WASM_SHELL + [wast] + cmd = shared.WASM_SHELL + [wast] # we must skip the stack machine portions of spec tests or apply other extra args extra = {} cmd = cmd + (extra.get(os.path.basename(wast)) or []) - return run_command(cmd, stderr=subprocess.PIPE) + return support.run_command(cmd, stderr=subprocess.PIPE) def run_opt_test(wast): # check optimization validation - cmd = WASM_OPT + [wast, '-O', '-all'] - run_command(cmd) + cmd = shared.WASM_OPT + [wast, '-O', '-all'] + support.run_command(cmd) def check_expected(actual, expected): if expected and os.path.exists(expected): @@ -342,9 +331,9 @@ def run_spec_tests(): actual = actual.strip() expected = expected.strip() if actual != expected: - fail(actual, expected) + shared.fail(actual, expected) - expected = os.path.join(get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log') + expected = os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log') # some spec tests should fail (actual process failure, not just assert_invalid) try: @@ -354,7 +343,7 @@ def run_spec_tests(): print('<< test failed as expected >>') continue # don't try all the binary format stuff TODO else: - fail_with_error(str(e)) + shared.fail_with_error(str(e)) check_expected(actual, expected) @@ -376,7 +365,7 @@ def run_spec_tests(): if os.path.basename(wast) not in ['comments.wast']: split_num = 0 actual = '' - for module, asserts in split_wast(wast): + for module, asserts in support.split_wast(wast): skip = splits_to_skip.get(os.path.basename(wast)) or [] if split_num in skip: print(' skipping split module', split_num - 1) @@ -384,15 +373,15 @@ def run_spec_tests(): continue print(' testing split module', split_num) split_num += 1 - write_wast('split.wast', module, asserts) + support.write_wast('split.wast', module, asserts) run_spec_test('split.wast') # before binary stuff - just check it's still ok split out run_opt_test('split.wast') # also that our optimizer doesn't break on it - result_wast = binary_format_check('split.wast', verify_final_result=False, original_wast=wast) + result_wast = shared.binary_format_check('split.wast', verify_final_result=False, original_wast=wast) # add the asserts, and verify that the test still passes open(result_wast, 'a').write('\n' + '\n'.join(asserts)) actual += run_spec_test(result_wast) # compare all the outputs to the expected output - check_expected(actual, os.path.join(get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log')) + check_expected(actual, os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log')) else: # handle unsplittable wast files run_spec_test(wast) @@ -401,42 +390,42 @@ def run_spec_tests(): def run_validator_tests(): print('\n[ running validation tests... ]\n') # Ensure the tests validate by default - cmd = WASM_AS + [os.path.join(get_test_dir('validator'), 'invalid_export.wast')] - run_command(cmd) - cmd = WASM_AS + [os.path.join(get_test_dir('validator'), 'invalid_import.wast')] - run_command(cmd) - cmd = WASM_AS + ['--validate=web', os.path.join(get_test_dir('validator'), 'invalid_export.wast')] - run_command(cmd, expected_status=1) - cmd = WASM_AS + ['--validate=web', os.path.join(get_test_dir('validator'), 'invalid_import.wast')] - run_command(cmd, expected_status=1) - cmd = WASM_AS + ['--validate=none', os.path.join(get_test_dir('validator'), 'invalid_return.wast')] - run_command(cmd) - cmd = WASM_AS + [os.path.join(get_test_dir('validator'), 'invalid_number.wast')] - run_command(cmd, expected_status=1) + cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_export.wast')] + support.run_command(cmd) + cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_import.wast')] + support.run_command(cmd) + cmd = shared.WASM_AS + ['--validate=web', os.path.join(shared.get_test_dir('validator'), 'invalid_export.wast')] + support.run_command(cmd, expected_status=1) + cmd = shared.WASM_AS + ['--validate=web', os.path.join(shared.get_test_dir('validator'), 'invalid_import.wast')] + support.run_command(cmd, expected_status=1) + cmd = shared.WASM_AS + ['--validate=none', os.path.join(shared.get_test_dir('validator'), 'invalid_return.wast')] + support.run_command(cmd) + cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_number.wast')] + support.run_command(cmd, expected_status=1) def run_vanilla_tests(): - if not (has_vanilla_emcc and has_vanilla_llvm and 0): + if not (shared.has_vanilla_emcc and shared.has_vanilla_llvm and 0): print('\n[ skipping emcc WASM_BACKEND testcases...]\n') return print('\n[ checking emcc WASM_BACKEND testcases...]\n') try: - if has_vanilla_llvm: - os.environ['LLVM'] = BIN_DIR # use the vanilla LLVM + if shared.has_vanilla_llvm: + os.environ['LLVM'] = shared.BIN_DIR # use the vanilla LLVM else: # if we did not set vanilla llvm, then we must set this env var to make emcc use the wasm backend. # (if we are using vanilla llvm, things should just work) print('(not using vanilla llvm, so setting env var to tell emcc to use wasm backend)') os.environ['EMCC_WASM_BACKEND'] = '1' - VANILLA_EMCC = os.path.join(options.binaryen_test, 'emscripten', 'emcc') + VANILLA_EMCC = os.path.join(shared.options.binaryen_test, 'emscripten', 'emcc') # run emcc to make sure it sets itself up properly, if it was never run before command = [VANILLA_EMCC, '-v'] print('____' + ' '.join(command)) subprocess.check_call(command) - for c in get_tests(get_test_dir('wasm_backend'), '.cpp'): + for c in shared.get_tests(shared.get_test_dir('wasm_backend'), '.cpp'): print('..', os.path.basename(c)) base = c.replace('.cpp', '').replace('.c', '') expected = open(os.path.join(base + '.txt')).read() @@ -448,14 +437,14 @@ def run_vanilla_tests(): if os.path.exists('a.wasm.js'): os.unlink('a.wasm.js') subprocess.check_call(command) - if NODEJS: + if shared.NODEJS: print(' (check in node)') - cmd = [NODEJS, 'a.wasm.js'] - out = run_command(cmd) + cmd = [shared.NODEJS, 'a.wasm.js'] + out = support.run_command(cmd) if out.strip() != expected.strip(): - fail(out, expected) + shared.fail(out, expected) finally: - if has_vanilla_llvm: + if shared.has_vanilla_llvm: del os.environ['LLVM'] else: del os.environ['EMCC_WASM_BACKEND'] @@ -463,16 +452,16 @@ def run_vanilla_tests(): def run_gcc_tests(): print('\n[ checking native gcc testcases...]\n') - if not NATIVECC or not NATIVEXX: - fail_with_error('Native compiler (e.g. gcc/g++) was not found in PATH!') + if not shared.NATIVECC or not shared.NATIVEXX: + shared.fail_with_error('Native compiler (e.g. gcc/g++) was not found in PATH!') return - for t in sorted(os.listdir(get_test_dir('example'))): + for t in sorted(os.listdir(shared.get_test_dir('example'))): output_file = 'example' - cmd = ['-I' + os.path.join(options.binaryen_root, 'src'), '-g', '-pthread', '-o', output_file] + cmd = ['-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-pthread', '-o', output_file] if t.endswith('.txt'): # check if there is a trace in the file, if so, we should build it - out = subprocess.check_output([os.path.join(options.binaryen_root, 'scripts', 'clean_c_api_trace.py'), os.path.join(get_test_dir('example'), t)]) + out = subprocess.check_output([os.path.join(shared.options.binaryen_root, 'scripts', 'clean_c_api_trace.py'), os.path.join(shared.get_test_dir('example'), t)]) if len(out) == 0: print(' (no trace in ', t, ')') continue @@ -480,15 +469,15 @@ def run_gcc_tests(): src = 'trace.cpp' with open(src, 'wb') as o: o.write(out) - expected = os.path.join(get_test_dir('example'), t + '.txt') + expected = os.path.join(shared.get_test_dir('example'), t + '.txt') else: - src = os.path.join(get_test_dir('example'), t) - expected = os.path.join(get_test_dir('example'), '.'.join(t.split('.')[:-1]) + '.txt') + src = os.path.join(shared.get_test_dir('example'), t) + expected = os.path.join(shared.get_test_dir('example'), '.'.join(t.split('.')[:-1]) + '.txt') if src.endswith(('.c', '.cpp')): # build the C file separately - libpath = os.path.join(os.path.dirname(options.binaryen_bin), 'lib') - extra = [NATIVECC, src, '-c', '-o', 'example.o', - '-I' + os.path.join(options.binaryen_root, 'src'), '-g', '-L' + libpath, '-pthread'] + libpath = os.path.join(os.path.dirname(shared.options.binaryen_bin), 'lib') + extra = [shared.NATIVECC, src, '-c', '-o', 'example.o', + '-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-L' + libpath, '-pthread'] if src.endswith('.cpp'): extra += ['-std=c++11'] if os.environ.get('COMPILER_FLAGS'): @@ -504,7 +493,7 @@ def run_gcc_tests(): if os.environ.get('COMPILER_FLAGS'): for f in os.environ.get('COMPILER_FLAGS').split(' '): cmd.append(f) - cmd = [NATIVEXX, '-std=c++11'] + cmd + cmd = [shared.NATIVEXX, '-std=c++11'] + cmd print('link: ', ' '.join(cmd)) subprocess.check_call(cmd) print('run...', output_file) @@ -514,17 +503,17 @@ def run_gcc_tests(): # Also removes debug directory produced on Mac OS shutil.rmtree(output_file + '.dSYM') - fail_if_not_identical_to_file(actual, expected) + shared.fail_if_not_identical_to_file(actual, expected) def run_unittest(): print('\n[ checking unit tests...]\n') # equivalent to `python -m unittest discover -s ./test -v` - suite = unittest.defaultTestLoader.discover(os.path.dirname(options.binaryen_test)) - result = unittest.TextTestRunner(verbosity=2, failfast=options.abort_on_first_failure).run(suite) + suite = unittest.defaultTestLoader.discover(os.path.dirname(shared.options.binaryen_test)) + result = unittest.TextTestRunner(verbosity=2, failfast=shared.options.abort_on_first_failure).run(suite) shared.num_failures += len(result.errors) + len(result.failures) - if options.abort_on_first_failure and shared.num_failures: + if shared.options.abort_on_first_failure and shared.num_failures: raise Exception("unittest failed") @@ -552,20 +541,20 @@ TEST_SUITES = OrderedDict([ # Run all the tests def main(): - if options.list_suites: + if shared.options.list_suites: for suite in TEST_SUITES.keys(): print(suite) return 0 - for test in requested or TEST_SUITES.keys(): + for test in shared.requested or TEST_SUITES.keys(): TEST_SUITES[test]() # Check/display the results if shared.num_failures == 0: print('\n[ success! ]') - if warnings: - print('\n' + '\n'.join(warnings)) + if shared.warnings: + print('\n' + '\n'.join(shared.warnings)) if shared.num_failures > 0: print('\n[ ' + str(shared.num_failures) + ' failures! ]') |