def run_gcc_tests(): print('\n[ checking native gcc testcases...]\n') if not shared.NATIVECC or not shared.NATIVEXX: shared.fail_with_error('Native compiler (e.g. gcc/g++) was not found in PATH!') return for t in sorted(os.listdir(shared.get_test_dir('example'))): output_file = 'example' cmd = ['-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-pthread', '-o', output_file] if t.endswith('.txt'): # check if there is a trace in the file, if so, we should build it out = subprocess.check_output([os.path.join(shared.options.binaryen_root, 'scripts', 'clean_c_api_trace.py'), os.path.join(shared.get_test_dir('example'), t)]) if len(out) == 0: print(' (no trace in ', t, ')') continue print(' (will check trace in ', t, ')') src = 'trace.cpp' with open(src, 'wb') as o: o.write(out) expected = os.path.join(shared.get_test_dir('example'), t + '.txt') else: src = os.path.join(shared.get_test_dir('example'), t) expected = os.path.join(shared.get_test_dir('example'), '.'.join(t.split('.')[:-1]) + '.txt') if src.endswith(('.c', '.cpp')): # build the C file separately libpath = os.path.join(os.path.dirname(shared.options.binaryen_bin), 'lib') extra = [shared.NATIVECC, src, '-c', '-o', 'example.o', '-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-L' + libpath, '-pthread'] if src.endswith('.cpp'): extra += ['-std=c++11'] if os.environ.get('COMPILER_FLAGS'): for f in os.environ.get('COMPILER_FLAGS').split(' '): extra.append(f) print('build: ', ' '.join(extra)) subprocess.check_call(extra) # Link against the binaryen C library DSO, using an executable-relative rpath cmd = ['example.o', '-L' + libpath, '-lbinaryen'] + cmd + ['-Wl,-rpath,' + libpath] else: continue print(' ', t, src, expected) if os.environ.get('COMPILER_FLAGS'): for f in os.environ.get('COMPILER_FLAGS').split(' '): cmd.append(f) cmd = [shared.NATIVEXX, '-std=c++11'] + cmd print('link: ', ' '.join(cmd)) subprocess.check_call(cmd) print('run...', output_file) actual = subprocess.check_output([os.path.abspath(output_file)]).decode('utf-8') os.remove(output_file) if sys.platform == 'darwin': # Also removes debug directory produced on Mac OS shutil.rmtree(output_file + '.dSYM') shared.fail_if_not_identical_to_file(actual, expected)
def run_gcc_tests(): print('\n[ checking native gcc testcases...]\n') if not shared.NATIVECC or not shared.NATIVEXX: shared.fail_with_error( 'Native compiler (e.g. gcc/g++) was not found in PATH!') return # windows + gcc will need some work if shared.skip_if_on_windows('gcc'): return for t in sorted(os.listdir(shared.get_test_dir('example'))): output_file = 'example' cmd = [ '-I' + os.path.join(shared.options.binaryen_root, 't'), '-g', '-pthread', '-o', output_file ] if not t.endswith(('.c', '.cpp')): continue src = os.path.join(shared.get_test_dir('example'), t) expected = os.path.join(shared.get_test_dir('example'), '.'.join(t.split('.')[:-1]) + '.txt') # build the C file separately libpath = os.path.join(os.path.dirname(shared.options.binaryen_bin), 'lib') extra = [ shared.NATIVECC, src, '-c', '-o', 'example.o', '-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-L' + libpath, '-pthread' ] if src.endswith('.cpp'): extra += ['-std=c++' + str(shared.cxx_standard)] if os.environ.get('COMPILER_FLAGS'): for f in os.environ.get('COMPILER_FLAGS').split(' '): extra.append(f) print('build: ', ' '.join(extra)) subprocess.check_call(extra) # Link against the binaryen C library DSO, using an executable-relative rpath cmd = ['example.o', '-L' + libpath, '-lbinaryen' ] + cmd + ['-Wl,-rpath,' + libpath] print(' ', t, src, expected) if os.environ.get('COMPILER_FLAGS'): for f in os.environ.get('COMPILER_FLAGS').split(' '): cmd.append(f) cmd = [shared.NATIVEXX, '-std=c++' + str(shared.cxx_standard)] + cmd print('link: ', ' '.join(cmd)) subprocess.check_call(cmd) print('run...', output_file) actual = subprocess.check_output([os.path.abspath(output_file) ]).decode('utf-8') os.remove(output_file) shared.fail_if_not_identical_to_file(actual, expected)
def run_wasm_reduce_tests(): if not shared.has_shell_timeout(): print('\n[ skipping wasm-reduce testcases]\n') return print('\n[ checking wasm-reduce testcases]\n') # fixed testcases for t in shared.get_tests(shared.get_test_dir('reduce'), ['.wast']): print('..', os.path.basename(t)) # convert to wasm support.run_command(shared.WASM_AS + [t, '-o', 'a.wasm', '-all']) support.run_command(shared.WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec -all ' % shared.WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm', '--timeout=4']) expected = t + '.txt' support.run_command(shared.WASM_DIS + ['c.wasm', '-o', 'a.wat']) with open('a.wat') as seen: shared.fail_if_not_identical_to_file(seen.read(), expected) # run on a nontrivial fuzz testcase, for general coverage # this is very slow in ThreadSanitizer, so avoid it there if 'fsanitize=thread' not in str(os.environ): print('\n[ checking wasm-reduce fuzz testcase ]\n') # TODO: re-enable multivalue once it is better optimized support.run_command(shared.WASM_OPT + [os.path.join(shared.options.binaryen_test, 'signext.wast'), '-ttf', '-Os', '-o', 'a.wasm', '--detect-features', '--disable-multivalue']) before = os.stat('a.wasm').st_size support.run_command(shared.WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec --detect-features' % shared.WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm']) after = os.stat('c.wasm').st_size # This number is a custom threshold to check if we have shrunk the # output sufficiently assert after < 0.85 * before, [before, after]
def update_binaryen_js_tests(): if not (shared.MOZJS or shared.NODEJS): print('no vm to run binaryen.js tests') return if not os.path.exists(shared.BINARYEN_JS): print('no binaryen.js build to test') return print('\n[ checking binaryen.js testcases... ]\n') node_has_wasm = shared.NODEJS and support.node_has_webassembly(shared.NODEJS) for s in shared.get_tests(shared.get_test_dir('binaryen.js'), ['.js']): basename = os.path.basename(s) print(basename) f = open('a.js', 'w') f.write(open(shared.BINARYEN_JS).read()) if shared.NODEJS: f.write(support.node_test_glue()) test_src = open(s).read() f.write(test_src) f.close() if shared.MOZJS or node_has_wasm or 'WebAssembly.' not in test_src: cmd = [shared.MOZJS or shared.NODEJS, 'a.js'] if 'fatal' not in basename: out = support.run_command(cmd, stderr=subprocess.STDOUT) else: # expect an error - the specific error code will depend on the vm out = support.run_command(cmd, stderr=subprocess.STDOUT, expected_status=None) with open(s + '.txt', 'w') as o: o.write(out) else: print('Skipping ' + basename + ' because WebAssembly might not be supported')
def run_crash_tests(): print("\n[ checking we don't crash on tricky inputs... ]\n") for t in shared.get_tests(shared.get_test_dir('crash'), ['.wast', '.wasm']): print('..', os.path.basename(t)) cmd = shared.WASM_OPT + [t] # expect a parse error to be reported support.run_command(cmd, expected_err='parse exception:', err_contains=True, expected_status=1)
def update_reduce_tests(): print('\n[ checking wasm-reduce ]\n') for t in shared.get_tests(shared.get_test_dir('reduce'), ['.wast']): print('..', os.path.basename(t)) # convert to wasm support.run_command(shared.WASM_AS + [t, '-o', 'a.wasm']) print(support.run_command(shared.WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec' % shared.WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm'])) expected = t + '.txt' support.run_command(shared.WASM_DIS + ['c.wasm', '-o', expected])
def run_ctor_eval_tests(): print('\n[ checking wasm-ctor-eval... ]\n') for t in shared.get_tests(shared.get_test_dir('ctor-eval'), ['.wast', '.wasm']): print('..', os.path.basename(t)) ctors = open(t + '.ctors').read().strip() cmd = shared.WASM_CTOR_EVAL + [t, '-all', '-o', 'a.wat', '-S', '--ctors', ctors] support.run_command(cmd) actual = open('a.wat').read() out = t + '.out' shared.fail_if_not_identical_to_file(actual, out)
def update_lit_tests(): print('\n[ updating lit testcases... ]\n') script = os.path.join(shared.options.binaryen_root, 'scripts', 'update_lit_checks.py') lit_dir = shared.get_test_dir('lit') subprocess.check_output([ sys.executable, script, '--binaryen-bin=' + shared.options.binaryen_bin, os.path.join(lit_dir, '**', '*.wast'), os.path.join(lit_dir, '**', '*.wat') ])
def update_ctor_eval_tests(): print('\n[ checking wasm-ctor-eval... ]\n') for t in shared.get_tests(shared.get_test_dir('ctor-eval'), ['.wast', '.wasm']): print('..', os.path.basename(t)) ctors = open(t + '.ctors').read().strip() cmd = shared.WASM_CTOR_EVAL + [t, '-all', '-o', 'a.wast', '-S', '--ctors', ctors] support.run_command(cmd) actual = open('a.wast').read() out = t + '.out' with open(out, 'w') as o: o.write(actual)
def check_for_stale_files(): # TODO(sbc): Generalize and apply other test suites all_tests = [] for t in tests + spec_tests + wasm2js_tests: all_tests.append(os.path.basename(os.path.splitext(t)[0])) all_files = os.listdir(shared.get_test_dir('wasm2js')) for f in all_files: prefix = f.split('.')[0] if prefix not in all_tests: shared.fail_with_error('orphan test output: %s' % f)
def update_example_tests(): print('\n[ checking example testcases... ]\n') for t in shared.get_tests(shared.get_test_dir('example')): basename = os.path.basename(t) output_file = os.path.join(shared.options.binaryen_bin, 'example') libdir = os.path.join(shared.BINARYEN_INSTALL_DIR, 'lib') cmd = ['-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-pthread', '-o', output_file] if t.endswith('.txt'): # check if there is a trace in the file, if so, we should build it out = subprocess.Popen([os.path.join(shared.options.binaryen_root, 'scripts', 'clean_c_api_trace.py'), t], stdout=subprocess.PIPE).communicate()[0] if len(out) == 0: print(' (no trace in ', basename, ')') continue print(' (will check trace in ', basename, ')') src = 'trace.cpp' with open(src, 'wb') as o: o.write(out) expected = t + '.txt' else: src = t expected = os.path.splitext(t)[0] + '.txt' if not src.endswith(('.c', '.cpp')): continue # build the C file separately extra = [os.environ.get('CC') or 'gcc', src, '-c', '-o', 'example.o', '-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-L' + libdir, '-pthread'] print('build: ', ' '.join(extra)) if src.endswith('.cpp'): extra += ['-std=c++14'] print(os.getcwd()) subprocess.check_call(extra) # Link against the binaryen C library DSO, using rpath cmd = ['example.o', '-L' + libdir, '-lbinaryen', '-Wl,-rpath,' + os.path.abspath(libdir)] + cmd print(' ', basename, src, expected) if os.environ.get('COMPILER_FLAGS'): for f in os.environ.get('COMPILER_FLAGS').split(' '): cmd.append(f) cmd = [os.environ.get('CXX') or 'g++', '-std=c++14'] + cmd try: print('link: ', ' '.join(cmd)) subprocess.check_call(cmd) print('run...', output_file) proc = subprocess.Popen([output_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE) actual, err = proc.communicate() assert proc.returncode == 0, [proc.returncode, actual, err] with open(expected, 'wb') as o: o.write(actual) finally: os.remove(output_file) if sys.platform == 'darwin': # Also removes debug directory produced on Mac OS shutil.rmtree(output_file + '.dSYM')
def update_example_tests(): print('\n[ checking example testcases... ]\n') for src in shared.get_tests(shared.get_test_dir('example')): basename = os.path.basename(src) output_file = os.path.join(shared.options.binaryen_bin, 'example') libdir = os.path.join(shared.BINARYEN_INSTALL_DIR, 'lib') cmd = [ '-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-pthread', '-o', output_file ] if not src.endswith(('.c', '.cpp')): continue expected = os.path.splitext(src)[0] + '.txt' # windows + gcc will need some work if shared.skip_if_on_windows('gcc'): return # build the C file separately extra = [ os.environ.get('CC') or 'gcc', src, '-c', '-o', 'example.o', '-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-L' + libdir, '-pthread' ] print('build: ', ' '.join(extra)) if src.endswith('.cpp'): extra += ['-std=c++' + str(shared.cxx_standard)] print(os.getcwd()) subprocess.check_call(extra) # Link against the binaryen C library DSO, using rpath cmd = [ 'example.o', '-L' + libdir, '-lbinaryen', '-Wl,-rpath,' + os.path.abspath(libdir) ] + cmd print(' ', basename, src, expected) if os.environ.get('COMPILER_FLAGS'): for f in os.environ.get('COMPILER_FLAGS').split(' '): cmd.append(f) cmd = [ os.environ.get('CXX') or 'g++', '-std=c++' + str(shared.cxx_standard) ] + cmd try: print('link: ', ' '.join(cmd)) subprocess.check_call(cmd) print('run...', output_file) proc = subprocess.Popen([output_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE) actual, err = proc.communicate() assert proc.returncode == 0, [proc.returncode, actual, err] with open(expected, 'wb') as o: o.write(actual) finally: os.remove(output_file)
def update_spec_tests(): print('\n[ updating wasm-shell spec testcases... ]\n') for t in shared.options.spec_tests: print('..', os.path.basename(t)) cmd = shared.WASM_SHELL + [t] expected = os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(t) + '.log') if os.path.isfile(expected): stdout = support.run_command(cmd, stderr=subprocess.PIPE) with open(expected, 'w') as o: o.write(stdout)
def run_wasm_metadce_tests(): print('\n[ checking wasm-metadce ]\n') for t in shared.get_tests(shared.get_test_dir('metadce'), ['.wast', '.wasm']): print('..', os.path.basename(t)) graph = t + '.graph.txt' cmd = shared.WASM_METADCE + [t, '--graph-file=' + graph, '-o', 'a.wat', '-S', '-all'] stdout = support.run_command(cmd) expected = t + '.dced' with open('a.wat') as seen: shared.fail_if_not_identical_to_file(seen.read(), expected) shared.fail_if_not_identical_to_file(stdout, expected + '.stdout')
def run_validator_tests(): print('\n[ running validation tests... ]\n') # Ensure the tests validate by default cmd = shared.WASM_AS + [ os.path.join(shared.get_test_dir('validator'), 'invalid_export.wast') ] support.run_command(cmd) cmd = shared.WASM_AS + [ os.path.join(shared.get_test_dir('validator'), 'invalid_import.wast') ] support.run_command(cmd) cmd = shared.WASM_AS + [ '--validate=web', os.path.join(shared.get_test_dir('validator'), 'invalid_export.wast') ] support.run_command(cmd, expected_status=1) cmd = shared.WASM_AS + [ '--validate=web', os.path.join(shared.get_test_dir('validator'), 'invalid_import.wast') ] support.run_command(cmd, expected_status=1) cmd = shared.WASM_AS + [ '--validate=none', os.path.join(shared.get_test_dir('validator'), 'invalid_return.wast') ] support.run_command(cmd) cmd = shared.WASM_AS + [ os.path.join(shared.get_test_dir('validator'), 'invalid_number.wast') ] support.run_command(cmd, expected_status=1)
def update_metadce_tests(): print('\n[ checking wasm-metadce... ]\n') for t in shared.get_tests(shared.get_test_dir('metadce'), ['.wast', '.wasm']): print('..', os.path.basename(t)) graph = t + '.graph.txt' cmd = shared.WASM_METADCE + [t, '--graph-file=' + graph, '-o', 'a.wast', '-S', '-all'] stdout = support.run_command(cmd) actual = open('a.wast').read() out = t + '.dced' with open(out, 'w') as o: o.write(actual) with open(out + '.stdout', 'w') as o: o.write(stdout)
def run_vanilla_tests(): if not (shared.has_vanilla_emcc and shared.has_vanilla_llvm and 0): print('\n[ skipping emcc WASM_BACKEND testcases...]\n') return print('\n[ checking emcc WASM_BACKEND testcases...]\n') try: if shared.has_vanilla_llvm: os.environ['LLVM'] = shared.BIN_DIR # use the vanilla LLVM else: # if we did not set vanilla llvm, then we must set this env var to make emcc use the wasm backend. # (if we are using vanilla llvm, things should just work) print( '(not using vanilla llvm, so setting env var to tell emcc to use wasm backend)' ) os.environ['EMCC_WASM_BACKEND'] = '1' VANILLA_EMCC = os.path.join(shared.options.binaryen_test, 'emscripten', 'emcc') # run emcc to make sure it sets itself up properly, if it was never run before command = [VANILLA_EMCC, '-v'] print('____' + ' '.join(command)) subprocess.check_call(command) for c in shared.get_tests(shared.get_test_dir('wasm_backend'), '.cpp'): print('..', os.path.basename(c)) base = c.replace('.cpp', '').replace('.c', '') expected = open(os.path.join(base + '.txt')).read() for opts in [[], ['-O1'], ['-O2']]: # only my code is a hack we used early in wasm backend dev, which somehow worked, but only with -O1 only = [] if opts != ['-O1'] or '_only' not in base else [ '-s', 'ONLY_MY_CODE=1' ] command = [VANILLA_EMCC, '-o', 'a.wasm.js', c] + opts + only print('....' + ' '.join(command)) if os.path.exists('a.wasm.js'): os.unlink('a.wasm.js') subprocess.check_call(command) if shared.NODEJS: print(' (check in node)') cmd = [shared.NODEJS, 'a.wasm.js'] out = support.run_command(cmd) if out.strip() != expected.strip(): shared.fail(out, expected) finally: if shared.has_vanilla_llvm: del os.environ['LLVM'] else: del os.environ['EMCC_WASM_BACKEND']
def update_spec_tests(): print('\n[ updating wasm-shell spec testcases... ]\n') for t in shared.options.spec_tests: print('..', os.path.basename(t)) cmd = shared.WASM_SHELL + [t] expected = os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(t) + '.log') if os.path.isfile(expected): stdout = support.run_command(cmd, stderr=subprocess.PIPE) # filter out binaryen interpreter logging that the spec suite # doesn't expect filtered = [line for line in stdout.splitlines() if not line.startswith('[trap')] stdout = '\n'.join(filtered) + '\n' with open(expected, 'w') as o: o.write(stdout)
def test_asserts_output(): for wasm in assert_tests: print('..', wasm) asserts = os.path.basename(wasm).replace('.wast.asserts', '.asserts.js') traps = os.path.basename(wasm).replace('.wast.asserts', '.traps.js') asserts_expected_file = os.path.join(shared.options.binaryen_test, asserts) traps_expected_file = os.path.join(shared.options.binaryen_test, traps) wasm = os.path.join(shared.get_test_dir('wasm2js'), wasm) cmd = shared.WASM2JS + [wasm, '--allow-asserts', '-all', '--disable-exception-handling'] out = support.run_command(cmd) shared.fail_if_not_identical_to_file(out, asserts_expected_file) cmd += ['--pedantic'] out = support.run_command(cmd) shared.fail_if_not_identical_to_file(out, traps_expected_file)
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from scripts.test import shared from scripts.test import support tests = shared.get_tests(shared.options.binaryen_test) spec_tests = shared.get_tests(shared.get_test_dir('spec'), ['.wast']) spec_tests = [t for t in spec_tests if '.fail' not in t] wasm2js_tests = shared.get_tests(shared.get_test_dir('wasm2js'), ['.wast']) assert_tests = ['wasm2js.wast.asserts'] # These tests exercise functionality not supported by wasm2js wasm2js_blacklist = ['empty_imported_table.wast'] def test_wasm2js_output(): for opt in (0, 1): for t in tests + spec_tests + wasm2js_tests: basename = os.path.basename(t) if basename in wasm2js_blacklist: continue asm = basename.replace('.wast', '.2asm.js')
def run_spec_tests(): print('\n[ checking wasm-shell spec testcases... ]\n') if not shared.options.spec_tests: # FIXME we support old and new memory formats, for now, until 0xc, and so can't pass this old-style test. BLACKLIST = ['binary.wast'] # FIXME to update the spec to 0xd, we need to implement (register "name") for import.wast spec_tests = shared.get_tests(shared.get_test_dir('spec'), ['.wast']) spec_tests = [ t for t in spec_tests if os.path.basename(t) not in BLACKLIST ] else: spec_tests = shared.options.spec_tests[:] for wast in spec_tests: print('..', os.path.basename(wast)) # skip checks for some tests if os.path.basename(wast) in [ 'linking.wast', 'nop.wast', 'stack.wast', 'typecheck.wast', 'unwind.wast' ]: # FIXME continue def run_spec_test(wast): cmd = shared.WASM_SHELL + [wast] # we must skip the stack machine portions of spec tests or apply other extra args extra = {} cmd = cmd + (extra.get(os.path.basename(wast)) or []) return support.run_command(cmd, stderr=subprocess.PIPE) def run_opt_test(wast): # check optimization validation cmd = shared.WASM_OPT + [wast, '-O', '-all'] support.run_command(cmd) def check_expected(actual, expected): if expected and os.path.exists(expected): expected = open(expected).read() # fix it up, our pretty (i32.const 83) must become compared to a homely 83 : i32 def fix_expected(x): x = x.strip() if not x: return x v, t = x.split(' : ') if v.endswith('.'): v = v[:-1] # remove trailing '.' return '(' + t + '.const ' + v + ')' def fix_actual(x): if '[trap ' in x: return '' return x expected = '\n'.join(map(fix_expected, expected.split('\n'))) actual = '\n'.join(map(fix_actual, actual.split('\n'))) print(' (using expected output)') actual = actual.strip() expected = expected.strip() if actual != expected: shared.fail(actual, expected) expected = os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log') # some spec tests should fail (actual process failure, not just assert_invalid) try: actual = run_spec_test(wast) except Exception as e: if ('wasm-validator error' in str(e) or 'parse exception' in str(e)) and '.fail.' in os.path.basename(wast): print('<< test failed as expected >>') continue # don't try all the binary format stuff TODO else: shared.fail_with_error(str(e)) check_expected(actual, expected) # skip binary checks for tests that reuse previous modules by name, as that's a wast-only feature if os.path.basename(wast) in ['exports.wast']: # FIXME continue # we must ignore some binary format splits splits_to_skip = {'func.wast': [2], 'return.wast': [2]} # check binary format. here we can verify execution of the final # result, no need for an output verification # some wast files cannot be split: # * comments.wast: contains characters that are not valid utf-8, # so our string splitting code fails there if os.path.basename(wast) not in ['comments.wast']: split_num = 0 actual = '' for module, asserts in support.split_wast(wast): skip = splits_to_skip.get(os.path.basename(wast)) or [] if split_num in skip: print(' skipping split module', split_num - 1) split_num += 1 continue print(' testing split module', split_num) split_num += 1 support.write_wast('split.wast', module, asserts) run_spec_test( 'split.wast' ) # before binary stuff - just check it's still ok split out run_opt_test('split.wast' ) # also that our optimizer doesn't break on it result_wast = shared.binary_format_check( 'split.wast', verify_final_result=False, original_wast=wast) # add the asserts, and verify that the test still passes open(result_wast, 'a').write('\n' + '\n'.join(asserts)) actual += run_spec_test(result_wast) # compare all the outputs to the expected output check_expected( actual, os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log')) else: # handle unsplittable wast files run_spec_test(wast)
def run_spec_tests(): print('\n[ checking wasm-shell spec testcases... ]\n') for wast in shared.options.spec_tests: print('..', os.path.basename(wast)) def run_spec_test(wast): cmd = shared.WASM_SHELL + [wast] return support.run_command(cmd, stderr=subprocess.PIPE) def run_opt_test(wast): # check optimization validation cmd = shared.WASM_OPT + [wast, '-O', '-all'] support.run_command(cmd) def check_expected(actual, expected): if expected and os.path.exists(expected): expected = open(expected).read() print(' (using expected output)') actual = actual.strip() expected = expected.strip() if actual != expected: shared.fail(actual, expected) expected = os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log') # some spec tests should fail (actual process failure, not just assert_invalid) try: actual = run_spec_test(wast) except Exception as e: if ('wasm-validator error' in str(e) or 'parse exception' in str(e)) and '.fail.' in os.path.basename(wast): print('<< test failed as expected >>') continue # don't try all the binary format stuff TODO else: shared.fail_with_error(str(e)) check_expected(actual, expected) # skip binary checks for tests that reuse previous modules by name, as that's a wast-only feature if 'exports.wast' in os.path.basename(wast): # FIXME continue # check binary format. here we can verify execution of the final # result, no need for an output verification # some wast files cannot be split: # * comments.wast: contains characters that are not valid utf-8, # so our string splitting code fails there if os.path.basename(wast) not in ['comments.wast']: split_num = 0 actual = '' for module, asserts in support.split_wast(wast): print(' testing split module', split_num) split_num += 1 support.write_wast('split.wast', module, asserts) run_spec_test( 'split.wast' ) # before binary stuff - just check it's still ok split out run_opt_test('split.wast' ) # also that our optimizer doesn't break on it result_wast = shared.binary_format_check( 'split.wast', verify_final_result=False, original_wast=wast) # add the asserts, and verify that the test still passes open(result_wast, 'a').write('\n' + '\n'.join(asserts)) actual += run_spec_test(result_wast) # compare all the outputs to the expected output check_expected( actual, os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log')) else: # handle unsplittable wast files run_spec_test(wast)
def update_wasm_opt_tests(): print('\n[ checking wasm-opt -o notation... ]\n') wast = os.path.join(shared.options.binaryen_test, 'hello_world.wast') cmd = shared.WASM_OPT + [wast, '-o', 'a.wast', '-S'] support.run_command(cmd) open(wast, 'w').write(open('a.wast').read()) print('\n[ checking wasm-opt parsing & printing... ]\n') for t in shared.get_tests(shared.get_test_dir('print'), ['.wast']): print('..', os.path.basename(t)) wasm = t.replace('.wast', '') cmd = shared.WASM_OPT + [t, '--print', '-all'] print(' ', ' '.join(cmd)) actual = subprocess.check_output(cmd) print(cmd, actual) with open(wasm + '.txt', 'wb') as o: o.write(actual) cmd = shared.WASM_OPT + [t, '--print-minified', '-all'] print(' ', ' '.join(cmd)) actual = subprocess.check_output(cmd) with open(wasm + '.minified.txt', 'wb') as o: o.write(actual) print('\n[ checking wasm-opt passes... ]\n') for t in shared.get_tests(shared.get_test_dir('passes'), ['.wast', '.wasm']): print('..', os.path.basename(t)) binary = t.endswith('.wasm') base = os.path.basename(t).replace('.wast', '').replace('.wasm', '') passname = base if passname.isdigit(): passname = open( os.path.join(shared.options.binaryen_test, 'passes', passname + '.passes')).read().strip() opts = [('--' + p if not p.startswith('O') else '-' + p) for p in passname.split('_')] actual = '' for module, asserts in support.split_wast(t): assert len(asserts) == 0 support.write_wast('split.wast', module) cmd = shared.WASM_OPT + opts + ['split.wast', '--print'] actual += support.run_command(cmd) with open( os.path.join(shared.options.binaryen_test, 'passes', base + ('.bin' if binary else '') + '.txt'), 'w') as o: o.write(actual) if 'emit-js-wrapper' in t: with open('a.js') as i: with open(t + '.js', 'w') as o: o.write(i.read()) if 'emit-spec-wrapper' in t: with open('a.wat') as i: with open(t + '.wat', 'w') as o: o.write(i.read()) print('\n[ checking wasm-opt testcases... ]\n') for t in shared.get_tests(shared.options.binaryen_test, ['.wast']): print('..', os.path.basename(t)) f = t + '.from-wast' cmd = shared.WASM_OPT + [t, '--print', '-all'] actual = support.run_command(cmd) actual = actual.replace('printing before:\n', '') open(f, 'w').write(actual) print('\n[ checking wasm-opt debugInfo read-write... ]\n') for t in shared.get_tests(shared.options.binaryen_test, ['.fromasm']): if 'debugInfo' not in t: continue print('..', os.path.basename(t)) f = t + '.read-written' support.run_command(shared.WASM_AS + [t, '--source-map=a.map', '-o', 'a.wasm', '-g']) support.run_command(shared.WASM_OPT + [ 'a.wasm', '--input-source-map=a.map', '-o', 'b.wasm', '--output-source-map=b.map', '-g' ]) actual = support.run_command(shared.WASM_DIS + ['b.wasm', '--source-map=b.map']) open(f, 'w').write(actual)
def run_wasm_opt_tests(): print('\n[ checking wasm-opt -o notation... ]\n') for extra_args in [[], ['--no-validation']]: wast = os.path.join(shared.options.binaryen_test, 'hello_world.wat') shared.delete_from_orbit('a.wat') out = 'a.wat' cmd = shared.WASM_OPT + [wast, '-o', out, '-S'] + extra_args support.run_command(cmd) shared.fail_if_not_identical_to_file(open(out).read(), wast) print('\n[ checking wasm-opt binary reading/writing... ]\n') shutil.copyfile( os.path.join(shared.options.binaryen_test, 'hello_world.wat'), 'a.wat') shared.delete_from_orbit('a.wasm') shared.delete_from_orbit('b.wast') support.run_command(shared.WASM_OPT + ['a.wat', '-o', 'a.wasm']) assert open('a.wasm', 'rb').read()[0] == 0, 'we emit binary by default' support.run_command(shared.WASM_OPT + ['a.wasm', '-o', 'b.wast', '-S']) assert open('b.wast', 'rb').read()[0] != 0, 'we emit text with -S' print('\n[ checking wasm-opt passes... ]\n') for t in shared.get_tests(shared.get_test_dir('passes'), ['.wast', '.wasm']): print('..', os.path.basename(t)) binary = '.wasm' in t base = os.path.basename(t).replace('.wast', '').replace('.wasm', '') passname = base passes_file = os.path.join(shared.get_test_dir('passes'), passname + '.passes') if os.path.exists(passes_file): passname = open(passes_file).read().strip() opts = [('--' + p if not p.startswith('O') and p != 'g' else '-' + p) for p in passname.split('_')] actual = '' for module, asserts in support.split_wast(t): assert len(asserts) == 0 support.write_wast('split.wast', module) cmd = shared.WASM_OPT + opts + ['split.wast', '--print'] curr = support.run_command(cmd) actual += curr # also check debug mode output is valid debugged = support.run_command(cmd + ['--debug'], stderr=subprocess.PIPE) shared.fail_if_not_contained(actual, debugged) # also check pass-debug mode def check(): pass_debug = support.run_command(cmd) shared.fail_if_not_identical(curr, pass_debug) shared.with_pass_debug(check) expected_file = os.path.join( shared.get_test_dir('passes'), base + ('.bin' if binary else '') + '.txt') shared.fail_if_not_identical_to_file(actual, expected_file) if 'emit-js-wrapper' in t: with open('a.js') as actual: shared.fail_if_not_identical_to_file(actual.read(), t + '.js') if 'emit-spec-wrapper' in t: with open('a.wat') as actual: shared.fail_if_not_identical_to_file(actual.read(), t + '.wat') print('\n[ checking wasm-opt parsing & printing... ]\n') for t in shared.get_tests(shared.get_test_dir('print'), ['.wast']): print('..', os.path.basename(t)) wasm = os.path.basename(t).replace('.wast', '') cmd = shared.WASM_OPT + [t, '--print', '-all'] print(' ', ' '.join(cmd)) actual, err = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True).communicate() expected_file = os.path.join(shared.get_test_dir('print'), wasm + '.txt') shared.fail_if_not_identical_to_file(actual, expected_file) cmd = shared.WASM_OPT + [ os.path.join(shared.get_test_dir('print'), t), '--print-minified', '-all' ] print(' ', ' '.join(cmd)) actual, err = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True).communicate() shared.fail_if_not_identical( actual.strip(), open( os.path.join(shared.get_test_dir('print'), wasm + '.minified.txt')).read().strip()) print('\n[ checking wasm-opt testcases... ]\n') for t in shared.get_tests(shared.options.binaryen_test, ['.wast']): print('..', os.path.basename(t)) f = t + '.from-wast' cmd = shared.WASM_OPT + [t, '--print', '-all'] actual = support.run_command(cmd) actual = actual.replace('printing before:\n', '') shared.fail_if_not_identical_to_file(actual, f) # FIXME Remove this condition after nullref is implemented in V8 if 'reference-types.wast' not in t: shared.binary_format_check(t, wasm_as_args=['-g' ]) # test with debuginfo shared.binary_format_check(t, wasm_as_args=[], binary_suffix='.fromBinary.noDebugInfo' ) # test without debuginfo shared.minify_check(t) print('\n[ checking wasm-opt debugInfo read-write... ]\n') for t in shared.get_tests(shared.options.binaryen_test, ['.fromasm']): if 'debugInfo' not in t: continue print('..', os.path.basename(t)) f = t + '.read-written' support.run_command(shared.WASM_AS + [t, '--source-map=a.map', '-o', 'a.wasm', '-g']) support.run_command(shared.WASM_OPT + [ 'a.wasm', '--input-source-map=a.map', '-o', 'b.wasm', '--output-source-map=b.map', '-g' ]) actual = support.run_command(shared.WASM_DIS + ['b.wasm', '--source-map=b.map']) shared.fail_if_not_identical_to_file(actual, f)
def update_wasm2js_tests(): print('\n[ checking wasm2js ]\n') for opt in (0, 1): for wasm in tests + spec_tests + wasm2js_tests: if not wasm.endswith('.wast'): continue if os.path.basename(wasm) in wasm2js_blacklist: continue asm = os.path.basename(wasm).replace('.wast', '.2asm.js') expected_file = os.path.join(shared.get_test_dir('wasm2js'), asm) if opt: expected_file += '.opt' # we run wasm2js on tests and spec tests only if the output # exists - only some work so far. the tests in extra are in # the test/wasm2js dir and so are specific to wasm2js, and # we run all of those. if wasm not in wasm2js_tests and not os.path.exists(expected_file): continue print('..', wasm) t = os.path.join(shared.options.binaryen_test, wasm) all_out = [] for module, asserts in support.split_wast(t): support.write_wast('split.wast', module, asserts) cmd = shared.WASM2JS + ['split.wast', '-all'] if opt: cmd += ['-O'] if 'emscripten' in wasm: cmd += ['--emscripten'] out = support.run_command(cmd) all_out.append(out) with open(expected_file, 'w') as o: o.write(''.join(all_out)) for wasm in assert_tests: print('..', wasm) asserts = os.path.basename(wasm).replace('.wast.asserts', '.asserts.js') traps = os.path.basename(wasm).replace('.wast.asserts', '.traps.js') asserts_expected_file = os.path.join(shared.options.binaryen_test, asserts) traps_expected_file = os.path.join(shared.options.binaryen_test, traps) cmd = shared.WASM2JS + [ os.path.join(shared.get_test_dir('wasm2js'), wasm), '--allow-asserts', '-all' ] out = support.run_command(cmd) with open(asserts_expected_file, 'w') as o: o.write(out) cmd += ['--pedantic'] out = support.run_command(cmd) with open(traps_expected_file, 'w') as o: o.write(out)
def run_spec_tests(): print('\n[ checking wasm-shell spec testcases... ]\n') for wast in shared.options.spec_tests: base = os.path.basename(wast) print('..', base) # windows has some failures that need to be investigated if base == 'names.wast' and shared.skip_if_on_windows('spec: ' + base): continue # FIXME Reenable this after updating interpreter for EH if base == 'exception-handling.wast': continue def run_spec_test(wast): cmd = shared.WASM_SHELL + [wast] output = support.run_command(cmd, stderr=subprocess.PIPE) # filter out binaryen interpreter logging that the spec suite # doesn't expect filtered = [ line for line in output.splitlines() if not line.startswith('[trap') ] return '\n'.join(filtered) + '\n' def run_opt_test(wast): # check optimization validation cmd = shared.WASM_OPT + [wast, '-O', '-all'] support.run_command(cmd) def check_expected(actual, expected): if expected and os.path.exists(expected): expected = open(expected).read() print(' (using expected output)') actual = actual.strip() expected = expected.strip() if actual != expected: shared.fail(actual, expected) expected = os.path.join(shared.get_test_dir('spec'), 'expected-output', base + '.log') # some spec tests should fail (actual process failure, not just assert_invalid) try: actual = run_spec_test(wast) except Exception as e: if ('wasm-validator error' in str(e) or 'parse exception' in str(e)) and '.fail.' in base: print('<< test failed as expected >>') continue # don't try all the binary format stuff TODO else: shared.fail_with_error(str(e)) check_expected(actual, expected) # skip binary checks for tests that reuse previous modules by name, as that's a wast-only feature if 'exports.wast' in base: # FIXME continue # check binary format. here we can verify execution of the final # result, no need for an output verification # some wast files cannot be split: # * comments.wast: contains characters that are not valid utf-8, # so our string splitting code fails there # FIXME Remove reference type tests from this list after nullref is # implemented in V8 if base not in [ 'comments.wast', 'ref_null.wast', 'ref_is_null.wast', 'ref_func.wast', 'old_select.wast' ]: split_num = 0 actual = '' for module, asserts in support.split_wast(wast): print(' testing split module', split_num) split_num += 1 support.write_wast('split.wast', module, asserts) run_spec_test( 'split.wast' ) # before binary stuff - just check it's still ok split out run_opt_test('split.wast' ) # also that our optimizer doesn't break on it result_wast = shared.binary_format_check( 'split.wast', verify_final_result=False, original_wast=wast) # add the asserts, and verify that the test still passes open(result_wast, 'a').write('\n' + '\n'.join(asserts)) actual += run_spec_test(result_wast) # compare all the outputs to the expected output check_expected( actual, os.path.join(shared.get_test_dir('spec'), 'expected-output', base + '.log')) else: # handle unsplittable wast files run_spec_test(wast)
def test_wasm2js_output(): for opt in (0, 1): for t in tests + spec_tests + wasm2js_tests: basename = os.path.basename(t) if basename in wasm2js_blacklist: continue asm = basename.replace('.wast', '.2asm.js') expected_file = os.path.join(shared.get_test_dir('wasm2js'), asm) if opt: expected_file += '.opt' if not os.path.exists(expected_file): continue print('..', os.path.basename(t)) all_out = [] for module, asserts in support.split_wast(t): support.write_wast('split.wast', module, asserts) cmd = shared.WASM2JS + ['split.wast', '-all'] if opt: cmd += ['-O'] if 'emscripten' in t: cmd += ['--emscripten'] out = support.run_command(cmd) all_out.append(out) if not shared.NODEJS and not shared.MOZJS: print('No JS interpreters. Skipping spec tests.') continue open('a.2asm.mjs', 'w').write(out) cmd += ['--allow-asserts'] out = support.run_command(cmd) # also verify it passes pass-debug verifications shared.with_pass_debug(lambda: support.run_command(cmd)) open('a.2asm.asserts.mjs', 'w').write(out) # verify asm.js is valid js, note that we're using --experimental-modules # to enable ESM syntax and we're also passing a custom loader to handle the # `spectest` and `env` modules in our tests. if shared.NODEJS: loader = os.path.join(shared.options.binaryen_root, 'scripts', 'test', 'node-esm-loader.mjs') node = [ shared.NODEJS, '--experimental-modules', '--loader', loader ] cmd = node[:] cmd.append('a.2asm.mjs') out = support.run_command(cmd) shared.fail_if_not_identical(out, '') cmd = node[:] cmd.append('a.2asm.asserts.mjs') out = support.run_command(cmd, expected_err='', err_ignore='ExperimentalWarning') shared.fail_if_not_identical(out, '') shared.fail_if_not_identical_to_file(''.join(all_out), expected_file)