def run_gcc_tests(): print('\n[ checking native gcc testcases...]\n') if not shared.NATIVECC or not shared.NATIVEXX: shared.fail_with_error( 'Native compiler (e.g. gcc/g++) was not found in PATH!') return # windows + gcc will need some work if shared.skip_if_on_windows('gcc'): return for t in sorted(os.listdir(shared.get_test_dir('example'))): output_file = 'example' cmd = [ '-I' + os.path.join(shared.options.binaryen_root, 't'), '-g', '-pthread', '-o', output_file ] if not t.endswith(('.c', '.cpp')): continue src = os.path.join(shared.get_test_dir('example'), t) expected = os.path.join(shared.get_test_dir('example'), '.'.join(t.split('.')[:-1]) + '.txt') # build the C file separately libpath = shared.options.binaryen_lib extra = [ shared.NATIVECC, src, '-c', '-o', 'example.o', '-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-L' + libpath, '-pthread' ] if src.endswith('.cpp'): extra += ['-std=c++' + str(shared.cxx_standard)] if os.environ.get('COMPILER_FLAGS'): for f in os.environ.get('COMPILER_FLAGS').split(' '): extra.append(f) print('build: ', ' '.join(extra)) subprocess.check_call(extra) # Link against the binaryen C library DSO, using an executable-relative rpath cmd = ['example.o', '-L' + libpath, '-lbinaryen' ] + cmd + ['-Wl,-rpath,' + libpath] print(' ', t, src, expected) if os.environ.get('COMPILER_FLAGS'): for f in os.environ.get('COMPILER_FLAGS').split(' '): cmd.append(f) cmd = [shared.NATIVEXX, '-std=c++' + str(shared.cxx_standard)] + cmd print('link: ', ' '.join(cmd)) subprocess.check_call(cmd) print('run...', output_file) actual = subprocess.check_output([os.path.abspath(output_file) ]).decode('utf-8') os.remove(output_file) shared.fail_if_not_identical_to_file(actual, expected)
def run_gcc_tests(): print('\n[ checking native gcc testcases...]\n') if not shared.NATIVECC or not shared.NATIVEXX: shared.fail_with_error('Native compiler (e.g. gcc/g++) was not found in PATH!') return for t in sorted(os.listdir(shared.get_test_dir('example'))): output_file = 'example' cmd = ['-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-pthread', '-o', output_file] if t.endswith('.txt'): # check if there is a trace in the file, if so, we should build it out = subprocess.check_output([os.path.join(shared.options.binaryen_root, 'scripts', 'clean_c_api_trace.py'), os.path.join(shared.get_test_dir('example'), t)]) if len(out) == 0: print(' (no trace in ', t, ')') continue print(' (will check trace in ', t, ')') src = 'trace.cpp' with open(src, 'wb') as o: o.write(out) expected = os.path.join(shared.get_test_dir('example'), t + '.txt') else: src = os.path.join(shared.get_test_dir('example'), t) expected = os.path.join(shared.get_test_dir('example'), '.'.join(t.split('.')[:-1]) + '.txt') if src.endswith(('.c', '.cpp')): # build the C file separately libpath = os.path.join(os.path.dirname(shared.options.binaryen_bin), 'lib') extra = [shared.NATIVECC, src, '-c', '-o', 'example.o', '-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-L' + libpath, '-pthread'] if src.endswith('.cpp'): extra += ['-std=c++14'] if os.environ.get('COMPILER_FLAGS'): for f in os.environ.get('COMPILER_FLAGS').split(' '): extra.append(f) print('build: ', ' '.join(extra)) subprocess.check_call(extra) # Link against the binaryen C library DSO, using an executable-relative rpath cmd = ['example.o', '-L' + libpath, '-lbinaryen'] + cmd + ['-Wl,-rpath,' + libpath] else: continue print(' ', t, src, expected) if os.environ.get('COMPILER_FLAGS'): for f in os.environ.get('COMPILER_FLAGS').split(' '): cmd.append(f) cmd = [shared.NATIVEXX, '-std=c++14'] + cmd print('link: ', ' '.join(cmd)) subprocess.check_call(cmd) print('run...', output_file) actual = subprocess.check_output([os.path.abspath(output_file)]).decode('utf-8') os.remove(output_file) shared.fail_if_not_identical_to_file(actual, expected)
def check_for_stale_files(): if shared.options.test_name_filter: return # TODO(sbc): Generalize and apply other test suites all_tests = [] for t in tests + spec_tests + wasm2js_tests: all_tests.append(os.path.basename(os.path.splitext(t)[0])) all_files = os.listdir(shared.get_test_dir('wasm2js')) for f in all_files: prefix = f.split('.')[0] if prefix not in all_tests: shared.fail_with_error('orphan test output: %s' % f)
def run_spec_tests(): print('\n[ checking wasm-shell spec testcases... ]\n') for wast in shared.options.spec_tests: base = os.path.basename(wast) print('..', base) # windows has some failures that need to be investigated if base == 'names.wast' and shared.skip_if_on_windows('spec: ' + base): continue # FIXME Reenable this after updating interpreter for EH if base == 'exception-handling.wast': continue def run_spec_test(wast): cmd = shared.WASM_SHELL + [wast] output = support.run_command(cmd, stderr=subprocess.PIPE) # filter out binaryen interpreter logging that the spec suite # doesn't expect filtered = [ line for line in output.splitlines() if not line.startswith('[trap') ] return '\n'.join(filtered) + '\n' def run_opt_test(wast): # check optimization validation cmd = shared.WASM_OPT + [wast, '-O', '-all'] support.run_command(cmd) def check_expected(actual, expected): if expected and os.path.exists(expected): expected = open(expected).read() print(' (using expected output)') actual = actual.strip() expected = expected.strip() if actual != expected: shared.fail(actual, expected) expected = os.path.join(shared.get_test_dir('spec'), 'expected-output', base + '.log') # some spec tests should fail (actual process failure, not just assert_invalid) try: actual = run_spec_test(wast) except Exception as e: if ('wasm-validator error' in str(e) or 'parse exception' in str(e)) and '.fail.' in base: print('<< test failed as expected >>') continue # don't try all the binary format stuff TODO else: shared.fail_with_error(str(e)) check_expected(actual, expected) # skip binary checks for tests that reuse previous modules by name, as that's a wast-only feature if 'exports.wast' in base: # FIXME continue # check binary format. here we can verify execution of the final # result, no need for an output verification # some wast files cannot be split: # * comments.wast: contains characters that are not valid utf-8, # so our string splitting code fails there # FIXME Remove reference type tests from this list after nullref is # implemented in V8 if base not in [ 'comments.wast', 'ref_null.wast', 'ref_is_null.wast', 'ref_func.wast', 'old_select.wast' ]: split_num = 0 actual = '' for module, asserts in support.split_wast(wast): print(' testing split module', split_num) split_num += 1 support.write_wast('split.wast', module, asserts) run_spec_test( 'split.wast' ) # before binary stuff - just check it's still ok split out run_opt_test('split.wast' ) # also that our optimizer doesn't break on it result_wast = shared.binary_format_check( 'split.wast', verify_final_result=False, original_wast=wast) # add the asserts, and verify that the test still passes open(result_wast, 'a').write('\n' + '\n'.join(asserts)) actual += run_spec_test(result_wast) # compare all the outputs to the expected output check_expected( actual, os.path.join(shared.get_test_dir('spec'), 'expected-output', base + '.log')) else: # handle unsplittable wast files run_spec_test(wast)
def run_gcc_torture_tests(): print '\n[ checking native gcc testcases...]\n' if not NATIVECC or not NATIVEXX: fail_with_error( 'Native compiler (e.g. gcc/g++) was not found in PATH!') else: for t in sorted( os.listdir(os.path.join(options.binaryen_test, 'example'))): output_file = os.path.join(options.binaryen_bin, 'example') cmd = [ '-I' + os.path.join(options.binaryen_root, 'src'), '-g', '-pthread', '-o', output_file ] if t.endswith('.txt'): # check if there is a trace in the file, if so, we should build it out = subprocess.Popen([ os.path.join('scripts', 'clean_c_api_trace.py'), os.path.join(options.binaryen_test, 'example', t) ], stdout=subprocess.PIPE).communicate()[0] if len(out) == 0: print ' (no trace in ', t, ')' continue print ' (will check trace in ', t, ')' src = 'trace.cpp' with open(src, 'w') as o: o.write(out) expected = os.path.join(options.binaryen_test, 'example', t + '.txt') else: src = os.path.join(options.binaryen_test, 'example', t) expected = os.path.join(options.binaryen_test, 'example', '.'.join(t.split('.')[:-1]) + '.txt') if src.endswith(('.c', '.cpp')): # build the C file separately extra = [ NATIVECC, src, '-c', '-o', 'example.o', '-I' + os.path.join(options.binaryen_root, 'src'), '-g', '-L' + os.path.join(options.binaryen_bin, '..', 'lib'), '-pthread' ] if src.endswith('.cpp'): extra += ['-std=c++11'] print 'build: ', ' '.join(extra) subprocess.check_call(extra) # Link against the binaryen C library DSO, using an executable-relative rpath cmd = [ 'example.o', '-L' + os.path.join(options.binaryen_bin, '..', 'lib'), '-lbinaryen' ] + cmd + ['-Wl,-rpath=$ORIGIN/../lib'] else: continue print ' ', t, src, expected if os.environ.get('COMPILER_FLAGS'): for f in os.environ.get('COMPILER_FLAGS').split(' '): cmd.append(f) cmd = [NATIVEXX, '-std=c++11'] + cmd try: print 'link: ', ' '.join(cmd) subprocess.check_call(cmd) print 'run...', output_file proc = subprocess.Popen([output_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE) actual, err = proc.communicate() assert proc.returncode == 0, [proc.returncode, actual, err] finally: os.remove(output_file) if sys.platform == 'darwin': # Also removes debug directory produced on Mac OS shutil.rmtree(output_file + '.dSYM') fail_if_not_identical_to_file(actual, expected)
def run_spec_tests(): print '\n[ checking wasm-shell spec testcases... ]\n' if len(requested) == 0: BLACKLIST = [ 'memory.wast', 'binary.wast' ] # FIXME we support old and new memory formats, for now, until 0xc, and so can't pass this old-style test. # FIXME to update the spec to 0xd, we need to implement (register "name") for import.wast spec_tests = [ os.path.join('spec', t) for t in sorted( os.listdir(os.path.join(options.binaryen_test, 'spec'))) if t not in BLACKLIST ] else: spec_tests = requested[:] for t in spec_tests: if t.startswith('spec') and t.endswith('.wast'): print '..', t wast = os.path.join(options.binaryen_test, t) # skip checks for some tests if os.path.basename(wast) in [ 'linking.wast', 'nop.wast', 'stack.wast', 'typecheck.wast', 'unwind.wast' ]: # FIXME continue def run_spec_test(wast): cmd = WASM_SHELL + [wast] # we must skip the stack machine portions of spec tests or apply other extra args extra = {} cmd = cmd + (extra.get(os.path.basename(wast)) or []) return run_command(cmd, stderr=subprocess.PIPE) def run_opt_test(wast): # check optimization validation cmd = WASM_OPT + [wast, '-O'] run_command(cmd) def check_expected(actual, expected): if expected and os.path.exists(expected): expected = open(expected).read() # fix it up, our pretty (i32.const 83) must become compared to a homely 83 : i32 def fix(x): x = x.strip() if not x: return x v, t = x.split(' : ') if v.endswith('.'): v = v[:-1] # remove trailing '.' return '(' + t + '.const ' + v + ')' expected = '\n'.join(map(fix, expected.split('\n'))) print ' (using expected output)' actual = actual.strip() expected = expected.strip() if actual != expected: fail(actual, expected) expected = os.path.join(options.binaryen_test, 'spec', 'expected-output', os.path.basename(wast) + '.log') # some spec tests should fail (actual process failure, not just assert_invalid) try: actual = run_spec_test(wast) except Exception, e: if ('wasm-validator error' in str(e) or 'parse exception' in str(e)) and '.fail.' in t: print '<< test failed as expected >>' continue # don't try all the binary format stuff TODO else: fail_with_error(str(e)) check_expected(actual, expected) # skip binary checks for tests that reuse previous modules by name, as that's a wast-only feature if os.path.basename(wast) in ['exports.wast']: # FIXME continue # we must ignore some binary format splits splits_to_skip = {'func.wast': [2], 'return.wast': [2]} # check binary format. here we can verify execution of the final result, no need for an output verification split_num = 0 if os.path.basename( wast ) not in []: # avoid some tests with things still being sorted out in the spec actual = '' for module, asserts in split_wast(wast): skip = splits_to_skip.get(os.path.basename(wast)) or [] if split_num in skip: print ' skipping split module', split_num - 1 split_num += 1 continue print ' testing split module', split_num split_num += 1 with open('split.wast', 'w') as o: o.write(module + '\n' + '\n'.join(asserts)) run_spec_test( 'split.wast' ) # before binary stuff - just check it's still ok split out run_opt_test( 'split.wast' ) # also that our optimizer doesn't break on it result_wast = binary_format_check( 'split.wast', verify_final_result=False) # add the asserts, and verify that the test still passes open(result_wast, 'a').write('\n' + '\n'.join(asserts)) actual += run_spec_test(result_wast) # compare all the outputs to the expected output check_expected( actual, os.path.join(options.binaryen_test, 'spec', 'expected-output', os.path.basename(wast) + '.log'))
cmd += ['--no-legalize-javascript-ffi'] if precise and opts: # test mem init importing open('a.mem', 'wb').write(asm) cmd += ['--mem-init=a.mem'] if asm[0] == 'e': cmd += ['--mem-base=1024'] if 'i64' in asm or 'wasm-only' in asm or 'noffi' in asm: cmd += ['--wasm-only'] wasm = os.path.join(options.binaryen_test, wasm) print '..', asm, wasm actual = run_command(cmd) # verify output if not os.path.exists(wasm): fail_with_error('output .wast file %s does not exist' % wasm) expected = open(wasm, 'rb').read() if actual != expected: fail(actual, expected) binary_format_check(wasm, verify_final_result=False) # verify in wasm if options.interpreter: # remove imports, spec interpreter doesn't know what to do with them subprocess.check_call(WASM_OPT + ['--remove-imports', wasm], stdout=open('ztemp.wast', 'w'), stderr=subprocess.PIPE) proc = subprocess.Popen( [options.interpreter, 'ztemp.wast'],
out, err = proc.communicate() if proc.returncode != 0: try: # to parse the error reported = err.split(':')[1] start, end = reported.split('-') start_line, start_col = map(int, start.split('.')) lines = open('ztemp.wast').read().split('\n') print print '=' * 80 print lines[start_line - 1] print(' ' * (start_col - 1)) + '^' print(' ' * (start_col - 2)) + '/_\\' print '=' * 80 print err except Exception, e: fail_with_error('wasm interpreter error: ' + err) # failed to pretty-print fail_with_error('wasm interpreter error') # verify debug info if 'debugInfo' in asm: jsmap = 'a.wasm.map' cmd += [ '--source-map', jsmap, '--source-map-url', 'http://example.org/' + jsmap, '-o', 'a.wasm' ] run_command(cmd) if not os.path.isfile(jsmap): fail_with_error('Debug info map not created: %s' % jsmap) with open(wasm + '.map', 'rb') as expected: with open(jsmap, 'rb') as actual:
def run_spec_tests(): print('\n[ checking wasm-shell spec testcases... ]\n') if not shared.options.spec_tests: # FIXME we support old and new memory formats, for now, until 0xc, and so can't pass this old-style test. BLACKLIST = ['binary.wast'] # FIXME to update the spec to 0xd, we need to implement (register "name") for import.wast spec_tests = shared.get_tests(shared.get_test_dir('spec'), ['.wast']) spec_tests = [ t for t in spec_tests if os.path.basename(t) not in BLACKLIST ] else: spec_tests = shared.options.spec_tests[:] for wast in spec_tests: print('..', os.path.basename(wast)) # skip checks for some tests if os.path.basename(wast) in [ 'linking.wast', 'nop.wast', 'stack.wast', 'typecheck.wast', 'unwind.wast' ]: # FIXME continue def run_spec_test(wast): cmd = shared.WASM_SHELL + [wast] # we must skip the stack machine portions of spec tests or apply other extra args extra = {} cmd = cmd + (extra.get(os.path.basename(wast)) or []) return support.run_command(cmd, stderr=subprocess.PIPE) def run_opt_test(wast): # check optimization validation cmd = shared.WASM_OPT + [wast, '-O', '-all'] support.run_command(cmd) def check_expected(actual, expected): if expected and os.path.exists(expected): expected = open(expected).read() # fix it up, our pretty (i32.const 83) must become compared to a homely 83 : i32 def fix_expected(x): x = x.strip() if not x: return x v, t = x.split(' : ') if v.endswith('.'): v = v[:-1] # remove trailing '.' return '(' + t + '.const ' + v + ')' def fix_actual(x): if '[trap ' in x: return '' return x expected = '\n'.join(map(fix_expected, expected.split('\n'))) actual = '\n'.join(map(fix_actual, actual.split('\n'))) print(' (using expected output)') actual = actual.strip() expected = expected.strip() if actual != expected: shared.fail(actual, expected) expected = os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log') # some spec tests should fail (actual process failure, not just assert_invalid) try: actual = run_spec_test(wast) except Exception as e: if ('wasm-validator error' in str(e) or 'parse exception' in str(e)) and '.fail.' in os.path.basename(wast): print('<< test failed as expected >>') continue # don't try all the binary format stuff TODO else: shared.fail_with_error(str(e)) check_expected(actual, expected) # skip binary checks for tests that reuse previous modules by name, as that's a wast-only feature if os.path.basename(wast) in ['exports.wast']: # FIXME continue # we must ignore some binary format splits splits_to_skip = {'func.wast': [2], 'return.wast': [2]} # check binary format. here we can verify execution of the final # result, no need for an output verification # some wast files cannot be split: # * comments.wast: contains characters that are not valid utf-8, # so our string splitting code fails there if os.path.basename(wast) not in ['comments.wast']: split_num = 0 actual = '' for module, asserts in support.split_wast(wast): skip = splits_to_skip.get(os.path.basename(wast)) or [] if split_num in skip: print(' skipping split module', split_num - 1) split_num += 1 continue print(' testing split module', split_num) split_num += 1 support.write_wast('split.wast', module, asserts) run_spec_test( 'split.wast' ) # before binary stuff - just check it's still ok split out run_opt_test('split.wast' ) # also that our optimizer doesn't break on it result_wast = shared.binary_format_check( 'split.wast', verify_final_result=False, original_wast=wast) # add the asserts, and verify that the test still passes open(result_wast, 'a').write('\n' + '\n'.join(asserts)) actual += run_spec_test(result_wast) # compare all the outputs to the expected output check_expected( actual, os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log')) else: # handle unsplittable wast files run_spec_test(wast)
def test_asm2wasm(): print('[ checking asm2wasm testcases... ]\n') for asm in shared.get_tests(shared.options.binaryen_test, ['.asm.js']): basename = os.path.basename(asm) for precise in [0, 1, 2]: for opts in [1, 0]: cmd = shared.ASM2WASM + [asm] if 'threads' in asm: cmd += ['--enable-threads'] wasm = asm.replace('.asm.js', '.fromasm') if not precise: cmd += ['--trap-mode=allow', '--ignore-implicit-traps'] wasm += '.imprecise' elif precise == 2: cmd += ['--trap-mode=clamp'] wasm += '.clamp' if not opts: wasm += '.no-opts' if precise: cmd += ['-O0'] # test that -O0 does nothing else: cmd += ['-O'] if 'debugInfo' in basename: cmd += ['-g'] if 'noffi' in basename: cmd += ['--no-legalize-javascript-ffi'] if precise and opts: # test mem init importing open('a.mem', 'w').write(basename) cmd += ['--mem-init=a.mem'] if basename[0] == 'e': cmd += ['--mem-base=1024'] if '4GB' in basename: cmd += ['--mem-max=4294967296'] if 'i64' in basename or 'wasm-only' in basename or 'noffi' in basename: cmd += ['--wasm-only'] print('..', basename, os.path.basename(wasm)) def do_asm2wasm_test(): actual = support.run_command(cmd) # verify output if not os.path.exists(wasm): shared.fail_with_error( 'output .wast file %s does not exist' % wasm) shared.fail_if_not_identical_to_file(actual, wasm) shared.binary_format_check(wasm, verify_final_result=False) # test both normally and with pass debug (so each inter-pass state # is validated) old_pass_debug = os.environ.get('BINARYEN_PASS_DEBUG') try: os.environ['BINARYEN_PASS_DEBUG'] = '1' print("With BINARYEN_PASS_DEBUG=1:") do_asm2wasm_test() del os.environ['BINARYEN_PASS_DEBUG'] print("With BINARYEN_PASS_DEBUG disabled:") do_asm2wasm_test() finally: if old_pass_debug is not None: os.environ['BINARYEN_PASS_DEBUG'] = old_pass_debug else: if 'BINARYEN_PASS_DEBUG' in os.environ: del os.environ['BINARYEN_PASS_DEBUG'] # verify in wasm if shared.options.interpreter: # remove imports, spec interpreter doesn't know what to do with them subprocess.check_call(shared.WASM_OPT + ['--remove-imports', wasm], stdout=open('ztemp.wast', 'w'), stderr=subprocess.PIPE) proc = subprocess.Popen( [shared.options.interpreter, 'ztemp.wast'], stderr=subprocess.PIPE) out, err = proc.communicate() if proc.returncode != 0: try: # to parse the error reported = err.split(':')[1] start, end = reported.split('-') start_line, start_col = map(int, start.split('.')) lines = open('ztemp.wast').read().split('\n') print() print('=' * 80) print(lines[start_line - 1]) print((' ' * (start_col - 1)) + '^') print((' ' * (start_col - 2)) + '/_\\') print('=' * 80) print(err) except Exception: # failed to pretty-print shared.fail_with_error('wasm interpreter error: ' + err) shared.fail_with_error('wasm interpreter error') # verify debug info if 'debugInfo' in asm: jsmap = 'a.wasm.map' cmd += [ '--source-map', jsmap, '--source-map-url', 'http://example.org/' + jsmap, '-o', 'a.wasm' ] support.run_command(cmd) if not os.path.isfile(jsmap): shared.fail_with_error( 'Debug info map not created: %s' % jsmap) with open(jsmap, 'rb') as actual: shared.fail_if_not_identical_to_file( actual.read(), wasm + '.map') with open('a.wasm', 'rb') as binary: url_section_name = bytes([16]) + bytes( 'sourceMappingURL', 'utf-8') url = 'http://example.org/' + jsmap assert len(url) < 256, 'name too long' url_section_contents = bytes([len(url)]) + bytes( url, 'utf-8') print(url_section_name) binary_contents = binary.read() if url_section_name not in binary_contents: shared.fail_with_error( 'source map url section not found in binary') url_section_index = binary_contents.index( url_section_name) if url_section_contents not in binary_contents[ url_section_index:]: shared.fail_with_error( 'source map url not found in url section')
def run_asm2wasm_tests(): print '[ checking asm2wasm testcases... ]\n' for asm in tests: if asm.endswith('.asm.js'): for precise in [0, 1, 2]: for opts in [1, 0]: cmd = ASM2WASM + [os.path.join(options.binaryen_test, asm)] wasm = asm.replace('.asm.js', '.fromasm') if not precise: cmd += [ '--emit-potential-traps', '--ignore-implicit-traps' ] wasm += '.imprecise' elif precise == 2: cmd += ['--emit-clamped-potential-traps'] wasm += '.clamp' if not opts: wasm += '.no-opts' if precise: cmd += ['-O0'] # test that -O0 does nothing else: cmd += ['-O'] if 'debugInfo' in asm: cmd += ['-g'] if 'noffi' in asm: cmd += ['--no-legalize-javascript-ffi'] if precise and opts: # test mem init importing open('a.mem', 'wb').write(asm) cmd += ['--mem-init=a.mem'] if asm[0] == 'e': cmd += ['--mem-base=1024'] if 'i64' in asm or 'wasm-only' in asm or 'noffi' in asm: cmd += ['--wasm-only'] wasm = os.path.join(options.binaryen_test, wasm) print '..', asm, wasm def do_asm2wasm_test(): actual = run_command(cmd) # verify output if not os.path.exists(wasm): fail_with_error( 'output .wast file %s does not exist' % wasm) expected = open(wasm, 'rb').read() if actual != expected: fail(actual, expected) binary_format_check(wasm, verify_final_result=False) # test both normally and with pass debug (so each inter-pass state is validated) old_pass_debug = os.environ.get('BINARYEN_PASS_DEBUG') try: os.environ['BINARYEN_PASS_DEBUG'] = '1' do_asm2wasm_test() del os.environ['BINARYEN_PASS_DEBUG'] do_asm2wasm_test() finally: if old_pass_debug is not None: os.environ['BINARYEN_PASS_DEBUG'] = old_pass_debug else: if 'BINARYEN_PASS_DEBUG' in os.environ: del os.environ['BINARYEN_PASS_DEBUG'] # verify in wasm if options.interpreter: # remove imports, spec interpreter doesn't know what to do with them subprocess.check_call(WASM_OPT + ['--remove-imports', wasm], stdout=open('ztemp.wast', 'w'), stderr=subprocess.PIPE) proc = subprocess.Popen( [options.interpreter, 'ztemp.wast'], stderr=subprocess.PIPE) out, err = proc.communicate() if proc.returncode != 0: try: # to parse the error reported = err.split(':')[1] start, end = reported.split('-') start_line, start_col = map( int, start.split('.')) lines = open('ztemp.wast').read().split('\n') print print '=' * 80 print lines[start_line - 1] print(' ' * (start_col - 1)) + '^' print(' ' * (start_col - 2)) + '/_\\' print '=' * 80 print err except Exception, e: fail_with_error('wasm interpreter error: ' + err) # failed to pretty-print fail_with_error('wasm interpreter error') # verify debug info if 'debugInfo' in asm: jsmap = 'a.wasm.map' cmd += [ '--source-map', jsmap, '--source-map-url', 'http://example.org/' + jsmap, '-o', 'a.wasm' ] run_command(cmd) if not os.path.isfile(jsmap): fail_with_error('Debug info map not created: %s' % jsmap) with open(wasm + '.map', 'rb') as expected: with open(jsmap, 'rb') as actual: fail_if_not_identical(actual.read(), expected.read()) with open('a.wasm', 'rb') as binary: url_section_name = bytearray( [16]) + bytearray('sourceMappingURL') payload = 'http://example.org/' + jsmap assert len(payload) < 256, 'name too long' url_section_contents = bytearray( [len(payload)]) + bytearray(payload) print url_section_name binary_contents = bytearray(binary.read()) if url_section_name not in binary_contents: fail_with_error( 'source map url section not found in binary' ) if url_section_contents not in binary_contents[ binary_contents.index(url_section_name):]: fail_with_error( 'source map url not found in url section')
def run_spec_tests(): print('\n[ checking wasm-shell spec testcases... ]\n') for wast in shared.options.spec_tests: print('..', os.path.basename(wast)) def run_spec_test(wast): cmd = shared.WASM_SHELL + [wast] return support.run_command(cmd, stderr=subprocess.PIPE) def run_opt_test(wast): # check optimization validation cmd = shared.WASM_OPT + [wast, '-O', '-all'] support.run_command(cmd) def check_expected(actual, expected): if expected and os.path.exists(expected): expected = open(expected).read() print(' (using expected output)') actual = actual.strip() expected = expected.strip() if actual != expected: shared.fail(actual, expected) expected = os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log') # some spec tests should fail (actual process failure, not just assert_invalid) try: actual = run_spec_test(wast) except Exception as e: if ('wasm-validator error' in str(e) or 'parse exception' in str(e)) and '.fail.' in os.path.basename(wast): print('<< test failed as expected >>') continue # don't try all the binary format stuff TODO else: shared.fail_with_error(str(e)) check_expected(actual, expected) # skip binary checks for tests that reuse previous modules by name, as that's a wast-only feature if 'exports.wast' in os.path.basename(wast): # FIXME continue # check binary format. here we can verify execution of the final # result, no need for an output verification # some wast files cannot be split: # * comments.wast: contains characters that are not valid utf-8, # so our string splitting code fails there if os.path.basename(wast) not in ['comments.wast']: split_num = 0 actual = '' for module, asserts in support.split_wast(wast): print(' testing split module', split_num) split_num += 1 support.write_wast('split.wast', module, asserts) run_spec_test( 'split.wast' ) # before binary stuff - just check it's still ok split out run_opt_test('split.wast' ) # also that our optimizer doesn't break on it result_wast = shared.binary_format_check( 'split.wast', verify_final_result=False, original_wast=wast) # add the asserts, and verify that the test still passes open(result_wast, 'a').write('\n' + '\n'.join(asserts)) actual += run_spec_test(result_wast) # compare all the outputs to the expected output check_expected( actual, os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log')) else: # handle unsplittable wast files run_spec_test(wast)
cmd += ['-O'] if precise and opts: # test mem init importing open('a.mem', 'wb').write(asm) cmd += ['--mem-init=a.mem'] if asm[0] == 'e': cmd += ['--mem-base=1024'] if 'i64' in asm or 'wasm-only' in asm: cmd += ['--wasm-only'] wasm = os.path.join(options.binaryen_test, wasm) print '..', asm, wasm actual = run_command(cmd) # verify output if not os.path.exists(wasm): fail_with_error('output .wast file %s does not exist' % wasm) expected = open(wasm, 'rb').read() if actual != expected: fail(actual, expected) binary_format_check(wasm, verify_final_result=False) # verify in wasm if options.interpreter: # remove imports, spec interpreter doesn't know what to do with them subprocess.check_call(WASM_OPT + ['--remove-imports', wasm], stdout=open('ztemp.wast', 'w'), stderr=subprocess.PIPE) proc = subprocess.Popen([options.interpreter, 'ztemp.wast'], stderr=subprocess.PIPE) out, err = proc.communicate() if proc.returncode != 0: try: # to parse the error reported = err.split(':')[1]