def run_case(self, testcase: DataDrivenTestCase) -> None: extra = [] mods = [] source = '\n'.join(testcase.input) for file, content in testcase.files + [('./main.py', source)]: mod = os.path.basename(file)[:-3] mods.append(mod) extra.extend(['-m', mod]) with open(file, 'w') as f: f.write(content) options = self.parse_flags(source, extra) out_dir = 'out' try: try: if not testcase.name.endswith('_import'): options.no_import = True if not testcase.name.endswith('_semanal'): options.parse_only = True generate_stubs(options, quiet=True, add_header=False) a = [] # type: List[str] self.add_file(os.path.join(out_dir, 'main.pyi'), a) except CompileError as e: a = e.messages assert_string_arrays_equal(testcase.output, a, 'Invalid output ({}, line {})'.format( testcase.file, testcase.line)) finally: for mod in mods: if mod in sys.modules: del sys.modules[mod] shutil.rmtree(out_dir)
def test_error_stream(testcase: DataDrivenTestCase) -> None: """Perform a single error streaming test case. The argument contains the description of the test case. """ options = Options() options.show_traceback = True logged_messages = [] # type: List[str] def flush_errors(msgs: List[str], serious: bool) -> None: if msgs: logged_messages.append('==== Errors flushed ====') logged_messages.extend(msgs) sources = [BuildSource('main', '__main__', '\n'.join(testcase.input))] try: build.build(sources=sources, options=options, alt_lib_path=test_temp_dir, flush_errors=flush_errors) except CompileError as e: assert e.messages == [] assert_string_arrays_equal(testcase.output, logged_messages, 'Invalid output ({}, line {})'.format( testcase.file, testcase.line))
def test_transform(testcase): """Perform an identity transform test case.""" try: src = '\n'.join(testcase.input) result = build.build('main', target=build.SEMANTIC_ANALYSIS, program_text=src, pyversion=testfile_pyversion(testcase.file), flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) a = [] # Include string representations of the source files in the actual # output. for fnam in sorted(result.files.keys()): f = result.files[fnam] # Omit the builtins module and files with a special marker in the # path. # TODO the test is not reliable if (not f.path.endswith((os.sep + 'builtins.py', 'typing.py', 'abc.py')) and not os.path.basename(f.path).startswith('_') and not os.path.splitext( os.path.basename(f.path))[0].endswith('_')): t = TestTransformVisitor() f = t.node(f) a += str(f).split('\n') except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format(testcase.file, testcase.line))
def test_python_evaluation(testcase: DataDrivenTestCase) -> None: # Write the program to a file. program = '_program.py' program_path = os.path.join(test_temp_dir, program) with open(program_path, 'w') as file: for s in testcase.input: file.write('{}\n'.format(s)) args = parse_args(testcase.input[0]) args.append('--tb') # Show traceback on crash. # Type check the program. fixed = [python3_path, os.path.join(testcase.old_cwd, 'scripts', 'mypy')] process = subprocess.Popen(fixed + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=test_temp_dir) outb = process.stdout.read() # Split output into lines. out = [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()] # Remove temp file. os.remove(program_path) # Compare actual output to expected. assert_string_arrays_equal(testcase.output, out, 'Invalid output ({}, line {})'.format( testcase.file, testcase.line))
def test_stubgen(testcase): if 'stubgen-test-path' not in sys.path: sys.path.insert(0, 'stubgen-test-path') os.mkdir('stubgen-test-path') source = '\n'.join(testcase.input) handle = tempfile.NamedTemporaryFile(prefix='prog_', suffix='.py', dir='stubgen-test-path') assert os.path.isabs(handle.name) path = os.path.basename(handle.name) name = path[:-3] path = os.path.join('stubgen-test-path', path) out_dir = '_out' os.mkdir(out_dir) try: with open(path, 'w') as file: file.write(source) file.close() # Without this we may sometimes be unable to import the module below, as importlib # caches os.listdir() results in Python 3.3+ (Guido explained this to me). reset_importlib_caches() try: if testcase.name.endswith('_import'): generate_stub_for_module(name, out_dir, quiet=True) else: generate_stub(path, out_dir) a = load_output(out_dir) except CompileError as e: a = e.messages assert_string_arrays_equal(testcase.output, a, 'Invalid output ({}, line {})'.format( testcase.file, testcase.line)) finally: shutil.rmtree(out_dir) handle.close()
def check_module_equivalence(self, name: str, expected: Optional[Set[str]], actual: Set[str]) -> None: if expected is not None: assert_string_arrays_equal( list(sorted(expected)), list(sorted(actual.difference({"__main__"}))), 'Set of {} modules does not match expected set'.format(name))
def run_case(self, testcase: DataDrivenTestCase) -> None: first_src = '\n'.join(testcase.input) files_dict = dict(testcase.files) second_src = files_dict['tmp/next.py'] options = parse_options(first_src, testcase, 1) messages1, files1 = self.build(first_src, options) messages2, files2 = self.build(second_src, options) a = [] if messages1: a.extend(messages1) if messages2: a.append('== next ==') a.extend(messages2) assert files1 is not None and files2 is not None, ('cases where CompileError' ' occurred should not be run') prefix = '__main__' snapshot1 = snapshot_symbol_table(prefix, files1['__main__'].names) snapshot2 = snapshot_symbol_table(prefix, files2['__main__'].names) diff = compare_symbol_table_snapshots(prefix, snapshot1, snapshot2) for trigger in sorted(diff): a.append(trigger) assert_string_arrays_equal( testcase.output, a, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def test_stubgen(testcase): source = '\n'.join(testcase.input) name = 'prog%d' % random.randrange(1000 * 1000 * 1000) path = '%s.py' % name out_dir = '_out' os.mkdir(out_dir) try: with open(path, 'w') as file: file.write(source) file.close() # Without this we may sometimes be unable to import the module below, as importlib # caches os.listdir() results in Python 3.3+ (Guido explained this to me). reset_importlib_caches() try: if testcase.name.endswith('_import'): generate_stub_for_module(name, out_dir, quiet=True) else: generate_stub(path, out_dir) a = load_output(out_dir) except CompileError as e: a = e.messages assert_string_arrays_equal(testcase.output, a, 'Invalid output ({}, line {})'.format( testcase.file, testcase.line)) finally: shutil.rmtree(out_dir) os.remove(path)
def run_test(self, testcase): """Perform a test case.""" try: # Build test case input. src = '\n'.join(testcase.input) result = build.build('main', target=build.SEMANTIC_ANALYSIS, program_text=src, flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) # Collect all TypeInfos in top-level modules. typeinfos = TypeInfoMap() for f in result.files.values(): for n in f.names.values(): if isinstance(n.node, TypeInfo): typeinfos[n.fullname] = n.node # The output is the symbol table converted into a string. a = str(typeinfos).split('\n') except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format( testcase.file, testcase.line))
def test_cgen(testcase): # Build the program. text = '\n'.join(testcase.input) program = '_program.py' try: build.build(program, target=build.C, program_text=text, flags=[build.TEST_BUILTINS], alt_lib_path='lib') # Run the program. outfile = './_program' outb = subprocess.check_output([outfile], stderr=subprocess.STDOUT) # Split output into lines. out = [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()] # Remove temp file. os.remove(outfile) except errors.CompileError as e: out = e.messages # Include line-end comments in the expected output. # Note: # characters in string literals can confuse this. for s in testcase.input: m = re.search(' #(?! type:)(.*)', s) if m: testcase.output.append(m.group(1).strip()) # Verify output. assert_string_arrays_equal(testcase.output, out, 'Invalid output ({}, line {})'.format( testcase.file, testcase.line))
def run_case(self, testcase: DataDrivenTestCase) -> None: """Perform a test case.""" try: # Build test case input. src = '\n'.join(testcase.input) result = build.build(sources=[BuildSource('main', None, src)], options=get_semanal_options(), alt_lib_path=test_temp_dir) a = result.errors if a: raise CompileError(a) # Collect all TypeInfos in top-level modules. typeinfos = TypeInfoMap() for f in result.files.values(): for n in f.names.values(): if isinstance(n.node, TypeInfo): assert n.fullname is not None typeinfos[n.fullname] = n.node # The output is the symbol table converted into a string. a = str(typeinfos).split('\n') except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format( testcase.file, testcase.line))
def test_python_evaluation(testcase): # Write the program to a file. program = "_program.py" outfile = "_program.out" f = open(program, "w") for s in testcase.input: f.write("{}\n".format(s)) f.close() # Use Python 2 interpreter if running a Python 2 test case. if testcase.name.lower().endswith("python2"): args = ["--py2", python2_path] else: args = [] # Set up module path. typing_path = os.path.join(os.getcwd(), "lib-typing", "3.2") assert os.path.isdir(typing_path) os.environ["PYTHONPATH"] = os.pathsep.join([typing_path, "."]) os.environ["MYPYPATH"] = "." # Run the program. outb = subprocess.check_output([python3_path, os.path.join("scripts", "mypy")] + args + [program]) # Split output into lines. out = [s.rstrip("\n\r") for s in str(outb, "utf8").splitlines()] # Remove temp file. os.remove(program) assert_string_arrays_equal( testcase.output, out, "Invalid output ({}, line {})".format(testcase.file, testcase.line) )
def test_semanal(testcase): """Perform a semantic analysis test case. The testcase argument contains a description of the test case (inputs and output). """ try: src = "\n".join(testcase.input) options = get_semanal_options() options.python_version = testfile_pyversion(testcase.file) result = build.build(sources=[BuildSource("main", None, src)], options=options, alt_lib_path=test_temp_dir) a = result.errors if a: raise CompileError(a) # Include string representations of the source files in the actual # output. for fnam in sorted(result.files.keys()): f = result.files[fnam] # Omit the builtins module and files with a special marker in the # path. # TODO the test is not reliable if ( not f.path.endswith( (os.sep + "builtins.pyi", "typing.pyi", "mypy_extensions.pyi", "abc.pyi", "collections.pyi") ) and not os.path.basename(f.path).startswith("_") and not os.path.splitext(os.path.basename(f.path))[0].endswith("_") ): a += str(f).split("\n") except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, "Invalid semantic analyzer output ({}, line {})".format(testcase.file, testcase.line) )
def test_python_evaluation(testcase: DataDrivenTestCase, cache_dir: str) -> None: """Runs Mypy in a subprocess. If this passes without errors, executes the script again with a given Python version. """ assert testcase.old_cwd is not None, "test was not properly set up" # TODO: Enable strict optional for these tests mypy_cmdline = [ '--show-traceback', '--no-site-packages', '--no-strict-optional', '--no-silence-site-packages', ] if testcase.name.lower().endswith('_newsemanal'): mypy_cmdline.append('--new-semantic-analyzer') py2 = testcase.name.lower().endswith('python2') if py2: mypy_cmdline.append('--py2') interpreter = try_find_python2_interpreter() if interpreter is None: # Skip, can't find a Python 2 interpreter. pytest.skip() # placate the type checker return else: interpreter = python3_path mypy_cmdline.append('--python-version={}'.format('.'.join(map(str, PYTHON3_VERSION)))) # Write the program to a file. program = '_' + testcase.name + '.py' program_path = os.path.join(test_temp_dir, program) mypy_cmdline.append(program_path) with open(program_path, 'w', encoding='utf8') as file: for s in testcase.input: file.write('{}\n'.format(s)) mypy_cmdline.append('--cache-dir={}'.format(cache_dir)) output = [] # Type check the program. out, err, returncode = api.run(mypy_cmdline) # split lines, remove newlines, and remove directory of test case for line in (out + err).splitlines(): if line.startswith(test_temp_dir + os.sep): output.append(line[len(test_temp_dir + os.sep):].rstrip("\r\n")) else: output.append(line.rstrip("\r\n")) if returncode == 0: # Execute the program. returncode, interp_out = run_command([interpreter, program]) output.extend(interp_out) # Remove temp file. os.remove(program_path) for i, line in enumerate(output): if os.path.sep + 'typeshed' + os.path.sep in line: output[i] = line.split(os.path.sep)[-1] assert_string_arrays_equal(adapt_output(testcase), output, 'Invalid output ({}, line {})'.format( testcase.file, testcase.line))
def test_parse_error(testcase): try: # Compile temporary file. parse(bytes("\n".join(testcase.input), "ascii"), INPUT_FILE_NAME) raise AssertionFailure("No errors reported") except CompileError as e: # Verify that there was a compile error and that the error messages # are equivalent. assert_string_arrays_equal( testcase.output, e.messages, "Invalid compiler output ({}, line {})".format(testcase.file, testcase.line) )
def run_case_once(self, testcase: DataDrivenTestCase, incremental=0) -> None: find_module_clear_caches() program_text = '\n'.join(testcase.input) module_name, program_name, program_text = self.parse_module(program_text) options = self.parse_options(program_text) options.use_builtins_fixtures = True options.python_version = testcase_pyversion(testcase.file, testcase.name) output = testcase.output if incremental: options.incremental = True if incremental == 1: # In run 1, copy program text to program file. output = [] with open(program_name, 'w') as f: f.write(program_text) elif incremental == 2: # In run 2, copy *.py.next files to *.py files. for dn, dirs, files in os.walk(os.curdir): for file in files: if file.endswith('.py.next'): full = os.path.join(dn, file) target = full[:-5] shutil.copy(full, target) # Always set to none so we're forced to reread program_name program_text = None source = BuildSource(program_name, module_name, program_text) try: res = build.build(sources=[source], options=options, alt_lib_path=test_temp_dir) a = res.errors except CompileError as e: res = None a = e.messages a = normalize_error_messages(a) if output != a and self.update_data: update_testcase_output(testcase, a) assert_string_arrays_equal( output, a, 'Invalid type checker output ({}, line {})'.format( testcase.file, testcase.line)) if incremental and res: self.verify_cache(module_name, program_name, a, res.manager) if testcase.expected_stale_modules is not None and incremental == 2: assert_string_arrays_equal( list(sorted(testcase.expected_stale_modules)), list(sorted(res.manager.stale_modules.difference({"__main__"}))), 'Set of stale modules does not match expected set')
def test_parse_error(testcase): try: # Compile temporary file. The test file contains non-ASCII characters. parse(bytes('\n'.join(testcase.input), 'utf-8'), INPUT_FILE_NAME, None, Options()) raise AssertionFailure('No errors reported') except CompileError as e: # Verify that there was a compile error and that the error messages # are equivalent. assert_string_arrays_equal( testcase.output, e.messages, 'Invalid compiler output ({}, line {})'.format(testcase.file, testcase.line))
def test_python_evaluation(testcase): python2_interpreter = try_find_python2_interpreter() # Use Python 2 interpreter if running a Python 2 test case. if testcase.name.lower().endswith('python2'): if not python2_interpreter: # Skip, can't find a Python 2 interpreter. raise SkipTestCaseException() interpreter = python2_interpreter args = ['--py2'] py2 = True else: interpreter = python3_path args = [] py2 = False args.append('--show-traceback') # Write the program to a file. program = '_program.py' program_path = os.path.join(test_temp_dir, program) with open(program_path, 'w') as file: for s in testcase.input: file.write('{}\n'.format(s)) # Type check the program. # This uses the same PYTHONPATH as the current process. process = subprocess.Popen([python3_path, os.path.join(testcase.old_cwd, 'scripts', 'mypy')] + args + [program], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=test_temp_dir) outb = process.stdout.read() # Split output into lines. out = [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()] if not process.wait(): # Set up module path for the execution. # This needs the typing module but *not* the mypy module. vers_dir = '2.7' if py2 else '3.2' typing_path = os.path.join(testcase.old_cwd, 'lib-typing', vers_dir) assert os.path.isdir(typing_path) env = os.environ.copy() env['PYTHONPATH'] = typing_path process = subprocess.Popen([interpreter, program], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=test_temp_dir, env=env) outb = process.stdout.read() # Split output into lines. out += [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()] # Remove temp file. os.remove(program_path) assert_string_arrays_equal(testcase.output, out, 'Invalid output ({}, line {})'.format( testcase.file, testcase.line))
def check_module_equivalence(self, name: str, expected: Optional[Set[str]], actual: Set[str]) -> None: if expected is not None: expected_normalized = sorted(expected) actual_normalized = sorted(actual.difference({"__main__"})) assert_string_arrays_equal( expected_normalized, actual_normalized, ('Actual modules ({}) do not match expected modules ({}) ' 'for "[{} ...]"').format( ', '.join(actual_normalized), ', '.join(expected_normalized), name))
def run_case(self, testcase: DataDrivenTestCase) -> None: try: line = testcase.input[0] mask = '' if line.startswith('##'): mask = '(' + line[2:].strip() + ')$' src = '\n'.join(testcase.input) options = Options() options.strict_optional = False # TODO: Enable strict optional checking options.use_builtins_fixtures = True options.show_traceback = True options.export_types = True result = build.build(sources=[BuildSource('main', None, src)], options=options, alt_lib_path=test_temp_dir) a = result.errors map = result.types nodes = map.keys() # Ignore NameExpr nodes of variables with explicit (trivial) types # to simplify output. searcher = SkippedNodeSearcher() for file in result.files.values(): file.accept(searcher) ignored = searcher.nodes # Filter nodes that should be included in the output. keys = [] for node in nodes: if node.line is not None and node.line != -1 and map[node]: if ignore_node(node) or node in ignored: continue if (re.match(mask, short_type(node)) or (isinstance(node, NameExpr) and re.match(mask, node.name))): # Include node in output. keys.append(node) for key in sorted(keys, key=lambda n: (n.line, short_type(n), str(n) + str(map[n]))): ts = str(map[key]).replace('*', '') # Remove erased tags ts = ts.replace('__main__.', '') a.append('{}({}) : {}'.format(short_type(key), key.line, ts)) except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid type checker output ({}, line {})'.format(testcase.file, testcase.line))
def run_test(self, testcase): implementation = testcase_python_implementation(testcase) a = [] try: line = testcase.input[0] mask = '' if line.startswith('##'): mask = '(' + line[2:].strip() + ')$' src = '\n'.join(testcase.input) result = build.build(target=build.TYPE_CHECK, sources=[BuildSource('main', None, src)], implementation=implementation, flags=[build.TEST_BUILTINS], alt_lib_path=config.test_temp_dir) map = result.types nodes = map.keys() # Ignore NameExpr nodes of variables with explicit (trivial) types # to simplify output. searcher = VariableDefinitionNodeSearcher() for file in result.files.values(): file.accept(searcher) ignored = searcher.nodes # Filter nodes that should be included in the output. keys = [] for node in nodes: if node.line is not None and node.line != -1 and map[node]: if ignore_node(node) or node in ignored: continue if (re.match(mask, short_type(node)) or (isinstance(node, NameExpr) and re.match(mask, node.name))): # Include node in output. keys.append(node) for key in sorted(keys, key=lambda n: (n.line, short_type(n), str(n) + str(map[n]))): ts = str(map[key]).replace('*', '') # Remove erased tags ts = ts.replace('__main__.', '') a.append('{}({}) : {}'.format(short_type(key), key.line, ts)) except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid type checker output ({}, line {})'.format(testcase.file, testcase.line))
def test_python_evaluation(testcase): python2_interpreter = try_find_python2_interpreter() # Use Python 2 interpreter if running a Python 2 test case. if testcase.name.lower().endswith('python2'): if not python2_interpreter: # Skip, can't find a Python 2 interpreter. raise SkipTestCaseException() interpreter = python2_interpreter args = ['--py2'] py2 = True else: interpreter = python3_path args = [] py2 = False # Write the program to a file. program = '_program.py' outfile = '_program.out' f = open(program, 'w') for s in testcase.input: f.write('{}\n'.format(s)) f.close() # Set up module path. typing_path = os.path.join(os.getcwd(), 'lib-typing', '3.2') assert os.path.isdir(typing_path) os.environ['PYTHONPATH'] = os.pathsep.join([typing_path, '.']) os.environ['MYPYPATH'] = '.' # Type check the program. process = subprocess.Popen([python3_path, os.path.join('scripts', 'mypy')] + args + [program], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) outb = process.stdout.read() # Split output into lines. out = [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()] if not process.wait(): if py2: typing_path = os.path.join(os.getcwd(), 'lib-typing', '2.7') os.environ['PYTHONPATH'] = os.pathsep.join([typing_path, '.']) process = subprocess.Popen([interpreter, program], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) outb = process.stdout.read() # Split output into lines. out += [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()] # Remove temp file. os.remove(program) assert_string_arrays_equal(testcase.output, out, 'Invalid output ({}, line {})'.format( testcase.file, testcase.line))
def test_parse_error(testcase: DataDrivenTestCase) -> None: try: # Compile temporary file. The test file contains non-ASCII characters. parse(bytes('\n'.join(testcase.input), 'utf-8'), INPUT_FILE_NAME, '__main__', None, Options()) raise AssertionError('No errors reported') except CompileError as e: if e.module_with_blocker is not None: assert e.module_with_blocker == '__main__' # Verify that there was a compile error and that the error messages # are equivalent. assert_string_arrays_equal( testcase.output, e.messages, 'Invalid compiler output ({}, line {})'.format(testcase.file, testcase.line))
def run_case(self, testcase: DataDrivenTestCase) -> None: name = testcase.name # We use the test case name to decide which data structures to dump. # Dumping everything would result in very verbose test cases. if name.endswith('_symtable'): kind = SYMTABLE elif name.endswith('_typeinfo'): kind = TYPEINFO elif name.endswith('_types'): kind = TYPES else: kind = AST main_src = '\n'.join(testcase.input) result = self.build(main_src) assert result is not None, 'cases where CompileError occurred should not be run' result.manager.fscache.flush() fine_grained_manager = FineGrainedBuildManager(result) a = [] if result.errors: a.extend(result.errors) target_path = os.path.join(test_temp_dir, 'target.py') shutil.copy(os.path.join(test_temp_dir, 'target.py.next'), target_path) a.extend(self.dump(fine_grained_manager, kind)) old_subexpr = get_subexpressions(result.manager.modules['target']) a.append('==>') new_file, new_types = self.build_increment(fine_grained_manager, 'target', target_path) a.extend(self.dump(fine_grained_manager, kind)) for expr in old_subexpr: if isinstance(expr, TypeVarExpr): # These are merged so we can't perform the check. continue # Verify that old AST nodes are removed from the expression type map. assert expr not in new_types if testcase.normalize_output: a = normalize_error_messages(a) assert_string_arrays_equal( testcase.output, a, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def test_python_evaluation(testcase): python2_interpreter = try_find_python2_interpreter() # Use Python 2 interpreter if running a Python 2 test case. if testcase.name.lower().endswith("python2"): if not python2_interpreter: # Skip, can't find a Python 2 interpreter. raise SkipTestCaseException() interpreter = python2_interpreter args = ["--py2"] py2 = True else: interpreter = python3_path args = [] py2 = False # Write the program to a file. program = "_program.py" outfile = "_program.out" f = open(program, "w") for s in testcase.input: f.write("{}\n".format(s)) f.close() # Set up module path. typing_path = os.path.join(os.getcwd(), "lib-typing", "3.2") assert os.path.isdir(typing_path) os.environ["PYTHONPATH"] = os.pathsep.join([typing_path, "."]) os.environ["MYPYPATH"] = "." # Type check the program. process = subprocess.Popen( [python3_path, os.path.join("scripts", "mypy")] + args + [program], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) outb = process.stdout.read() # Split output into lines. out = [s.rstrip("\n\r") for s in str(outb, "utf8").splitlines()] if not process.wait(): if py2: typing_path = os.path.join(os.getcwd(), "lib-typing", "2.7") os.environ["PYTHONPATH"] = os.pathsep.join([typing_path, "."]) process = subprocess.Popen([interpreter, program], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) outb = process.stdout.read() # Split output into lines. out += [s.rstrip("\n\r") for s in str(outb, "utf8").splitlines()] # Remove temp file. os.remove(program) assert_string_arrays_equal( testcase.output, out, "Invalid output ({}, line {})".format(testcase.file, testcase.line) )
def test_daemon(testcase: DataDrivenTestCase) -> None: assert testcase.old_cwd is not None, "test was not properly set up" for i, step in enumerate(parse_script(testcase.input)): cmd = step[0] expected_lines = step[1:] assert cmd.startswith('$') cmd = cmd[1:].strip() cmd = cmd.replace('{python}', sys.executable) sts, output = run_cmd(cmd) output_lines = output.splitlines() if sts: output_lines.append('== Return code: %d' % sts) assert_string_arrays_equal(expected_lines, output_lines, "Command %d (%s) did not give expected output" % (i + 1, cmd))
def run_test(self, testcase): a = [] try: src = '\n'.join(testcase.input) build.build('main', target=build.TYPE_CHECK, program_text=src, pyversion=testfile_pyversion(testcase.file), flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) except CompileError as e: a = normalize_error_messages(e.messages) assert_string_arrays_equal( testcase.output, a, 'Invalid type checker output ({}, line {})'.format( testcase.file, testcase.line))
def test_output(testcase): """Perform an identity source code transformation test case.""" expected = testcase.output if expected == []: expected = testcase.input try: src = '\n'.join(testcase.input) # Parse and semantically analyze the source program. # Test case names with a special suffix get semantically analyzed. This # lets us test that semantic analysis does not break source code pretty # printing. if testcase.name.endswith('_SemanticAnalyzer'): result = build.build('main', target=build.SEMANTIC_ANALYSIS, program_text=src, flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) files = result.files else: files = {'main': parse(src, 'main')} a = [] first = True # Produce an output containing the pretty-printed forms (with original # formatting) of all the relevant source files. for fnam in sorted(files.keys()): f = files[fnam] # Omit the builtins and files marked for omission. if (not f.path.endswith(os.sep + 'builtins.py') and '-skip.' not in f.path): # Add file name + colon for files other than the first. if not first: a.append('{}:'.format(fix_path(remove_prefix( f.path, test_temp_dir)))) v = OutputVisitor() f.accept(v) s = v.output() if s != '': a += s.split('\n') first = False except CompileError as e: a = e.messages assert_string_arrays_equal( expected, a, 'Invalid source code output ({}, line {})'.format( testcase.file, testcase.line))
def run_test_once(self, testcase: DataDrivenTestCase, incremental=0) -> None: find_module_clear_caches() program_text = '\n'.join(testcase.input) module_name, program_name, program_text = self.parse_module(program_text) options = self.parse_options(program_text) options.use_builtins_fixtures = True options.python_version = testcase_pyversion(testcase.file, testcase.name) output = testcase.output if incremental: options.incremental = True if incremental == 1: # In run 1, copy program text to program file. output = [] with open(program_name, 'w') as f: f.write(program_text) program_text = None elif incremental == 2: # In run 2, copy *.py.next files to *.py files. for dn, dirs, files in os.walk(os.curdir): for file in files: if file.endswith('.py.next'): full = os.path.join(dn, file) target = full[:-5] shutil.copy(full, target) source = BuildSource(program_name, module_name, program_text) try: res = build.build(sources=[source], options=options, alt_lib_path=test_temp_dir) a = res.errors except CompileError as e: res = None a = e.messages a = normalize_error_messages(a) if output != a and mypy.myunit.UPDATE_TESTCASES: update_testcase_output(testcase, a, mypy.myunit.APPEND_TESTCASES) assert_string_arrays_equal( output, a, 'Invalid type checker output ({}, line {})'.format( testcase.file, testcase.line)) if incremental and res: self.verify_cache(module_name, program_name, a, res.manager)
def test_python_evaluation(testcase): implementation = testcase_python_implementation(testcase) interpreter = implementation.executable py2 = implementation.base_dialect.major == 2 # Write the program to a file. program = '_program.py' program_path = os.path.join(test_temp_dir, program) with open(program_path, 'w') as file: for s in testcase.input: file.write('{}\n'.format(s)) # Type check the program. # This uses the same PYTHONPATH as the current process. process = subprocess.Popen([sys.executable, '-m', 'mypy', program], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=test_temp_dir) outb = process.stdout.read() process.stdout.close() # Split output into lines. out = [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()] if not process.wait(): # Set up module path for the execution. # This needs the typing module but *not* the mypy module. if is_installed(): env = None else: vers_dir = '2.7' if py2 else '3.2' typing_path = os.path.join(testcase.old_cwd, 'lib-typing', vers_dir) assert os.path.isdir(typing_path) env = os.environ.copy() env['PYTHONPATH'] = typing_path process = subprocess.Popen([interpreter, program], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=test_temp_dir, env=env) outb = process.stdout.read() process.stdout.close() # Split output into lines. out += [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()] # Remove temp file. os.remove(program_path) assert_string_arrays_equal(testcase.output, out, 'Invalid output ({}, line {})'.format( testcase.file, testcase.line))
def run_case(self, testcase: DataDrivenTestCase) -> None: name = testcase.name # We use the test case name to decide which data structures to dump. # Dumping everything would result in very verbose test cases. if name.endswith('_symtable'): kind = SYMTABLE elif name.endswith('_typeinfo'): kind = TYPEINFO elif name.endswith('_types'): kind = TYPES else: kind = AST main_src = '\n'.join(testcase.input) messages, manager, graph = self.build(main_src) assert manager is not None, 'cases where CompileError occurred should not be run' a = [] if messages: a.extend(messages) shutil.copy(os.path.join(test_temp_dir, 'target.py.next'), os.path.join(test_temp_dir, 'target.py')) a.extend(self.dump(manager.modules, graph, kind)) old_modules = dict(manager.modules) old_subexpr = get_subexpressions(old_modules['target']) new_file, new_types = self.build_increment(manager, 'target') replace_modules_with_new_variants(manager, graph, old_modules, {'target': new_file}, {'target': new_types}) a.append('==>') a.extend(self.dump(manager.modules, graph, kind)) for expr in old_subexpr: # Verify that old AST nodes are removed from the expression type map. assert expr not in new_types assert_string_arrays_equal( testcase.output, a, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def test_python_evaluation(testcase: DataDrivenTestCase) -> None: """Runs Mypy in a subprocess. If this passes without errors, executes the script again with a given Python version. """ assert testcase.old_cwd is not None, "test was not properly set up" mypy_cmdline = ['--show-traceback'] py2 = testcase.name.lower().endswith('python2') if py2: mypy_cmdline.append('--py2') interpreter = try_find_python2_interpreter() if interpreter is None: # Skip, can't find a Python 2 interpreter. pytest.skip() # placate the type checker return else: interpreter = python3_path # Write the program to a file. program = '_' + testcase.name + '.py' program_path = os.path.join(test_temp_dir, program) mypy_cmdline.append(program_path) with open(program_path, 'w') as file: for s in testcase.input: file.write('{}\n'.format(s)) output = [] # Type check the program. out, err, returncode = api.run(mypy_cmdline) # split lines, remove newlines, and remove directory of test case for line in (out + err).splitlines(): if line.startswith(test_temp_dir + os.sep): output.append(line[len(test_temp_dir + os.sep):].rstrip("\r\n")) else: output.append(line.rstrip("\r\n")) if returncode == 0: # Execute the program. returncode, interp_out = run([interpreter, program]) output.extend(interp_out) # Remove temp file. os.remove(program_path) assert_string_arrays_equal(adapt_output(testcase), output, 'Invalid output ({}, line {})'.format( testcase.file, testcase.line))
def test_stubgen(testcase: DataDrivenTestCase) -> None: if 'stubgen-test-path' not in sys.path: sys.path.insert(0, 'stubgen-test-path') os.mkdir('stubgen-test-path') source = '\n'.join(testcase.input) options = parse_flags(source) handle = tempfile.NamedTemporaryFile(prefix='prog_', suffix='.py', dir='stubgen-test-path', delete=False) assert os.path.isabs(handle.name) path = os.path.basename(handle.name) name = path[:-3] path = os.path.join('stubgen-test-path', path) out_dir = '_out' os.mkdir(out_dir) try: handle.write(bytes(source, 'ascii')) handle.close() # Without this we may sometimes be unable to import the module below, as importlib # caches os.listdir() results in Python 3.3+ (Guido explained this to me). reset_importlib_caches() try: if testcase.name.endswith('_import'): generate_stub_for_module( name, out_dir, quiet=True, no_import=options.no_import, include_private=options.include_private) else: generate_stub(path, out_dir, include_private=options.include_private) a = load_output(out_dir) except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line)) finally: handle.close() os.unlink(handle.name) shutil.rmtree(out_dir)
def run_case(self, testcase: DataDrivenTestCase) -> None: assert testcase.old_cwd is not None, "test was not properly set up" mypy_cmdline = [ '--show-traceback', '--no-silence-site-packages', '--config-file={}/sqlalchemy.ini'.format(inipath), ] py2 = testcase.name.lower().endswith('python2') if py2: if try_find_python2_interpreter() is None: pytest.skip() return mypy_cmdline.append('--py2') else: if sys.version_info[:2] == (3, 5): version = (3, 6) # Always accept variable annotations. else: version = sys.version_info[:2] mypy_cmdline.append('--python-version={}'.format('.'.join( map(str, version)))) # Write the program to a file. program_path = os.path.join(test_temp_dir, 'main.py') mypy_cmdline.append(program_path) with open(program_path, 'w') as file: for s in testcase.input: file.write('{}\n'.format(s)) output = [] # Type check the program. out, err, returncode = api.run(mypy_cmdline) # split lines, remove newlines, and remove directory of test case for line in (out + err).splitlines(): if line.startswith(test_temp_dir + os.sep): output.append(line[len(test_temp_dir + os.sep):].rstrip("\r\n").replace( '.py', '')) else: output.append(line.rstrip("\r\n")) # Remove temp file. os.remove(program_path) assert_string_arrays_equal( testcase.output, output, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def run_case(self, testcase: DataDrivenTestCase) -> None: src = '\n'.join(testcase.input) dump_all = '# __dump_all__' in src options = parse_options(src, testcase, incremental_step=1) if testcase.name.endswith('python2'): options.python_version = defaults.PYTHON2_VERSION options.use_builtins_fixtures = True options.show_traceback = True options.cache_dir = os.devnull options.export_types = True options.preserve_asts = True messages, files, type_map = self.build(src, options) a = messages if files is None or type_map is None: if not a: a = [ 'Unknown compile error (likely syntax error in test case or fixture)' ] else: deps: DefaultDict[str, Set[str]] = defaultdict(set) for module in files: if module in dumped_modules or dump_all and module not in ( 'abc', 'typing', 'mypy_extensions', 'typing_extensions', 'enum'): new_deps = get_dependencies(files[module], type_map, options.python_version, options) for source in new_deps: deps[source].update(new_deps[source]) TypeState.add_all_protocol_deps(deps) for source, targets in sorted(deps.items()): if source.startswith(('<enum', '<typing', '<mypy')): # Remove noise. continue line = '%s -> %s' % (source, ', '.join(sorted(targets))) # Clean up output a bit line = line.replace('__main__', 'm') a.append(line) assert_string_arrays_equal( testcase.output, a, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def test_parse_error(testcase: DataDrivenTestCase) -> None: try: options = parse_options('\n'.join(testcase.input), testcase, 0) if options.python_version != sys.version_info[:2]: skip() # Compile temporary file. The test file contains non-ASCII characters. parse(bytes('\n'.join(testcase.input), 'utf-8'), INPUT_FILE_NAME, '__main__', None, options) raise AssertionError('No errors reported') except CompileError as e: if e.module_with_blocker is not None: assert e.module_with_blocker == '__main__' # Verify that there was a compile error and that the error messages # are equivalent. assert_string_arrays_equal( testcase.output, e.messages, 'Invalid compiler output ({}, line {})'.format( testcase.file, testcase.line))
def test_semanal_error(testcase): """Perform a test case.""" try: src = '\n'.join(testcase.input) build.build(target=build.SEMANTIC_ANALYSIS, sources=[BuildSource('main', None, src)], flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) raise AssertionError('No errors reported in {}, line {}'.format( testcase.file, testcase.line)) except CompileError as e: # Verify that there was a compile error and that the error messages # are equivalent. assert_string_arrays_equal( testcase.output, normalize_error_messages(e.messages), 'Invalid compiler output ({}, line {})'.format( testcase.file, testcase.line))
def test_python_evaluation(testcase: DataDrivenTestCase) -> None: # Write the program to a file. program = '_program.py' program_path = os.path.join(test_temp_dir, program) with open(program_path, 'w') as file: for s in testcase.input: file.write('{}\n'.format(s)) args = parse_args(testcase.input[0]) args.append('--show-traceback') # Type check the program. fixed = [python3_path, os.path.join(testcase.old_cwd, 'scripts', 'mypy')] process = subprocess.Popen(fixed + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=test_temp_dir) outb = process.stdout.read() # Split output into lines. out = [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()] # Remove temp file. os.remove(program_path) # Compare actual output to expected. if testcase.output_files: for path, expected_content in testcase.output_files: if not os.path.exists(path): raise AssertionFailure( 'Expected file {} was not produced by test case'.format( path)) with open(path, 'r') as output_file: actual_output_content = output_file.read().splitlines() normalized_output = normalize_file_output( actual_output_content, os.path.abspath(test_temp_dir)) if testcase.native_sep and os.path.sep == '\\': normalized_output = [ fix_cobertura_filename(line) for line in normalized_output ] assert_string_arrays_equal( expected_content.splitlines(), normalized_output, 'Output file {} did not match its expected output'.format( path)) else: assert_string_arrays_equal( testcase.output, out, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def test_semanal_error(testcase: DataDrivenTestCase) -> None: """Perform a test case.""" try: src = '\n'.join(testcase.input) res = build.build(sources=[BuildSource('main', None, src)], options=get_semanal_options(), alt_lib_path=test_temp_dir) a = res.errors assert a, 'No errors reported in {}, line {}'.format( testcase.file, testcase.line) except CompileError as e: # Verify that there was a compile error and that the error messages # are equivalent. a = e.messages assert_string_arrays_equal( testcase.output, normalize_error_messages(a), 'Invalid compiler output ({}, line {})'.format(testcase.file, testcase.line))
def test_register(self) -> None: self.env.temp_index = 0 op = LoadInt(5) self.block.ops.append(op) self.env.add_op(op) fn = FuncIR(FuncDecl('myfunc', None, 'mod', FuncSignature([self.arg], list_rprimitive)), [self.block], self.env) emitter = Emitter(EmitterContext(NameGenerator([['mod']]))) generate_native_function(fn, emitter, 'prog.py', 'prog', optimize_int=False) result = emitter.fragments assert_string_arrays_equal( [ 'PyObject *CPyDef_myfunc(CPyTagged cpy_r_arg) {\n', ' CPyTagged cpy_r_i0;\n', 'CPyL0: ;\n', ' cpy_r_i0 = 10;\n', '}\n', ], result, msg='Generated code invalid')
def test_semanal(testcase: DataDrivenTestCase) -> None: """Perform a semantic analysis test case. The testcase argument contains a description of the test case (inputs and output). """ try: src = '\n'.join(testcase.input) options = get_semanal_options(src, testcase) options.python_version = testfile_pyversion(testcase.file) result = build.build(sources=[BuildSource('main', None, src)], options=options, alt_lib_path=test_temp_dir) a = result.errors if a: raise CompileError(a) # Include string representations of the source files in the actual # output. for fnam in sorted(result.files.keys()): f = result.files[fnam] # Omit the builtins module and files with a special marker in the # path. # TODO the test is not reliable if (not f.path.endswith((os.sep + 'builtins.pyi', 'typing.pyi', 'mypy_extensions.pyi', 'typing_extensions.pyi', 'abc.pyi', 'collections.pyi', 'sys.pyi')) and not os.path.basename(f.path).startswith('_') and not os.path.splitext( os.path.basename(f.path))[0].endswith('_')): a += str(f).split('\n') except CompileError as e: a = e.messages if testcase.normalize_output: a = normalize_error_messages(a) assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format(testcase.file, testcase.line))
def test_python_evaluation(testcase: DataDrivenTestCase) -> None: """Runs Mypy in a subprocess. If this passes without errors, executes the script again with a given Python version. """ assert testcase.old_cwd is not None, "test was not properly set up" mypy_cmdline = [ python3_path, os.path.join(testcase.old_cwd, 'scripts', 'mypy'), '--show-traceback', ] py2 = testcase.name.lower().endswith('python2') if py2: mypy_cmdline.append('--py2') interpreter = try_find_python2_interpreter() if interpreter is None: # Skip, can't find a Python 2 interpreter. pytest.skip() # placate the type checker return else: interpreter = python3_path # Write the program to a file. program = '_' + testcase.name + '.py' mypy_cmdline.append(program) program_path = os.path.join(test_temp_dir, program) with open(program_path, 'w') as file: for s in testcase.input: file.write('{}\n'.format(s)) # Type check the program. # This uses the same PYTHONPATH as the current process. returncode, out = run(mypy_cmdline) if returncode == 0: # Execute the program. returncode, interp_out = run([interpreter, program]) out += interp_out # Remove temp file. os.remove(program_path) assert_string_arrays_equal(adapt_output(testcase), out, 'Invalid output ({}, line {})'.format( testcase.file, testcase.line))
def run_case(self, testcase: DataDrivenTestCase) -> None: src = '\n'.join(testcase.input) messages, files, type_map = self.build(src) a = messages assert files is not None and type_map is not None, ( 'cases where CompileError' ' occurred should not be run') deps = get_dependencies('__main__', files['__main__'], type_map) for source, targets in sorted(deps.items()): line = '%s -> %s' % (source, ', '.join(sorted(targets))) # Clean up output a bit line = line.replace('__main__', 'm') a.append(line) assert_string_arrays_equal( testcase.output, a, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def run_case(self, testcase: DataDrivenTestCase) -> None: main_src = '\n'.join(testcase.input) messages, manager, graph = self.build(main_src) a = [] if messages: a.extend(normalize_messages(messages)) fine_grained_manager = FineGrainedBuildManager(manager, graph) steps = testcase.find_steps() all_triggered = [] for operations in steps: modules = [] for op in operations: if isinstance(op, UpdateFile): # Modify/create file shutil.copy(op.source_path, op.target_path) modules.append((op.module, op.target_path)) else: # Delete file os.remove(op.path) modules.append((op.module, op.path)) new_messages = fine_grained_manager.update(modules) all_triggered.append(fine_grained_manager.triggered) new_messages = normalize_messages(new_messages) a.append('==') a.extend(new_messages) # Normalize paths in test output (for Windows). a = [line.replace('\\', '/') for line in a] assert_string_arrays_equal( testcase.output, a, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line)) if testcase.triggered: assert_string_arrays_equal( testcase.triggered, self.format_triggered(all_triggered), 'Invalid active triggers ({}, line {})'.format( testcase.file, testcase.line))
def test_parser(testcase): """Perform a single parser test case. The argument contains the description of the test case. """ pyversion = 3 if testcase.file.endswith('python2.test'): pyversion = 2 try: n = parse('\n'.join(testcase.input), pyversion=pyversion) a = str(n).split('\n') except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid parser output ({}, line {})'.format(testcase.file, testcase.line))
def test_parser(testcase): """Perform a single parser test case. The argument contains the description of the test case. """ if testcase.file.endswith('python2.test'): pyversion = defaults.PYTHON2_VERSION else: pyversion = defaults.PYTHON3_VERSION try: n = parse(bytes('\n'.join(testcase.input), 'ascii'), pyversion=pyversion, fnam='main') a = str(n).split('\n') except CompileError as e: a = e.messages assert_string_arrays_equal(testcase.output, a, 'Invalid parser output ({}, line {})'.format( testcase.file, testcase.line))
def test_transform(testcase: DataDrivenTestCase) -> None: """Perform an identity transform test case.""" try: src = '\n'.join(testcase.input) options = parse_options(src, testcase, 1) options.use_builtins_fixtures = True options.semantic_analysis_only = True options.enable_incomplete_features = True options.show_traceback = True result = build.build(sources=[BuildSource('main', None, src)], options=options, alt_lib_path=test_temp_dir) a = result.errors if a: raise CompileError(a) # Include string representations of the source files in the actual # output. for fnam in sorted(result.files.keys()): f = result.files[fnam] # Omit the builtins module and files with a special marker in the # path. # TODO the test is not reliable if (not f.path.endswith((os.sep + 'builtins.pyi', 'typing_extensions.pyi', 'typing.pyi', 'abc.pyi', 'sys.pyi')) and not os.path.basename(f.path).startswith('_') and not os.path.splitext( os.path.basename(f.path))[0].endswith('_')): t = TypeAssertTransformVisitor() t.test_only = True f = t.mypyfile(f) a += str(f).split('\n') except CompileError as e: a = e.messages if testcase.normalize_output: a = normalize_error_messages(a) assert_string_arrays_equal( testcase.output, a, f'Invalid semantic analyzer output ({testcase.file}, line {testcase.line})')
def test_simple(self) -> None: self.block.ops.append(Return(self.reg)) fn = FuncIR( FuncDecl('myfunc', None, 'mod', FuncSignature([self.arg], int_rprimitive)), [self.reg], [self.block]) value_names = generate_names_for_ir(fn.arg_regs, fn.blocks) emitter = Emitter(EmitterContext(NameGenerator([['mod']])), value_names) generate_native_function(fn, emitter, 'prog.py', 'prog') result = emitter.fragments assert_string_arrays_equal([ 'CPyTagged CPyDef_myfunc(CPyTagged cpy_r_arg) {\n', 'CPyL0: ;\n', ' return cpy_r_arg;\n', '}\n', ], result, msg='Generated code invalid')
def assert_emit(self, op: Op, expected: str, next_block: Optional[BasicBlock] = None, *, rare: bool = False, next_branch: Optional[Branch] = None, skip_next: bool = False) -> None: block = BasicBlock(0) block.ops.append(op) value_names = generate_names_for_ir(self.registers, [block]) emitter = Emitter(self.context, value_names) declarations = Emitter(self.context, value_names) emitter.fragments = [] declarations.fragments = [] visitor = FunctionEmitterVisitor(emitter, declarations, 'prog.py', 'prog') visitor.next_block = next_block visitor.rare = rare if next_branch: visitor.ops = [op, next_branch] else: visitor.ops = [op] visitor.op_index = 0 op.accept(visitor) frags = declarations.fragments + emitter.fragments actual_lines = [line.strip(' ') for line in frags] assert all(line.endswith('\n') for line in actual_lines) actual_lines = [line.rstrip('\n') for line in actual_lines] if not expected.strip(): expected_lines = [] else: expected_lines = expected.rstrip().split('\n') expected_lines = [line.strip(' ') for line in expected_lines] assert_string_arrays_equal(expected_lines, actual_lines, msg='Generated code unexpected') if skip_next: assert visitor.op_index == 1 else: assert visitor.op_index == 0
def run_case(self, testcase: DataDrivenTestCase) -> None: src = "\n".join(testcase.input) options = Options() options.show_traceback = True options.python_version = sys.version_info[:2] if testcase.name.endswith("_36"): options.python_version = (3, 6) else: options.python_version = sys.version_info[:2] options.plugins = ["trio_typing.plugin"] # must specify something for config_file, else the plugins don't get loaded options.config_file = "/dev/null" result = build.build( sources=[BuildSource("main", None, src)], options=options ) assert_string_arrays_equal( testcase.output, result.errors, "Unexpected output from {0.file} line {0.line}".format(testcase), )
def run_test(self, testcase): a = [] pyversion = testcase_pyversion(testcase.file, testcase.name) try: src = '\n'.join(testcase.input) build.build('main', target=build.TYPE_CHECK, program_text=src, pyversion=pyversion, flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) except CompileError as e: a = normalize_error_messages(e.messages) if testcase.output != a and UPDATE_TESTCASES: update_testcase_output(testcase, a, APPEND_TESTCASES) assert_string_arrays_equal( testcase.output, a, 'Invalid type checker output ({}, line {})'.format( testcase.file, testcase.line))
def run_case_inner(self, testcase: DataDrivenTestCase) -> None: extra = [] # Extra command-line args mods = [] # Module names to process source = '\n'.join(testcase.input) for file, content in testcase.files + [('./main.py', source)]: # Strip ./ prefix and .py suffix. mod = file[2:-3].replace('/', '.') if mod.endswith('.__init__'): mod, _, _ = mod.rpartition('.') mods.append(mod) if '-p ' not in source: extra.extend(['-m', mod]) with open(file, 'w') as f: f.write(content) options = self.parse_flags(source, extra) modules = self.parse_modules(source) out_dir = 'out' try: try: if not testcase.name.endswith('_import'): options.no_import = True if not testcase.name.endswith('_semanal'): options.parse_only = True generate_stubs(options) a = [] # type: List[str] for module in modules: fnam = module_to_path(out_dir, module) self.add_file(fnam, a, header=len(modules) > 1) except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line)) finally: for mod in mods: if mod in sys.modules: del sys.modules[mod] shutil.rmtree(out_dir)
def test_register(self) -> None: reg = Register(int_rprimitive) op = Assign(reg, Integer(5)) self.block.ops.append(op) fn = FuncIR( FuncDecl('myfunc', None, 'mod', FuncSignature([self.arg], list_rprimitive)), [self.reg], [self.block]) value_names = generate_names_for_ir(fn.arg_regs, fn.blocks) emitter = Emitter(EmitterContext(NameGenerator([['mod']])), value_names) generate_native_function(fn, emitter, 'prog.py', 'prog') result = emitter.fragments assert_string_arrays_equal([ 'PyObject *CPyDef_myfunc(CPyTagged cpy_r_arg) {\n', ' CPyTagged cpy_r_r0;\n', 'CPyL0: ;\n', ' cpy_r_r0 = 10;\n', '}\n', ], result, msg='Generated code invalid')
def run_test(self, testcase): """Perform a test case.""" try: # Build test case input. src = '\n'.join(testcase.input) result = build.build(target=build.SEMANTIC_ANALYSIS, sources=[BuildSource('main', None, src)], flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) # The output is the symbol table converted into a string. a = [] for f in sorted(result.files.keys()): if f not in ('builtins', 'typing', 'abc'): a.append('{}:'.format(f)) for s in str(result.files[f].names).split('\n'): a.append(' ' + s) except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format( testcase.file, testcase.line))
def assert_emit(self, op: Op, expected: str) -> None: block = BasicBlock(0) block.ops.append(op) value_names = generate_names_for_ir(self.registers, [block]) emitter = Emitter(self.context, value_names) declarations = Emitter(self.context, value_names) emitter.fragments = [] declarations.fragments = [] visitor = FunctionEmitterVisitor(emitter, declarations, 'prog.py', 'prog') op.accept(visitor) frags = declarations.fragments + emitter.fragments actual_lines = [line.strip(' ') for line in frags] assert all(line.endswith('\n') for line in actual_lines) actual_lines = [line.rstrip('\n') for line in actual_lines] expected_lines = expected.rstrip().split('\n') expected_lines = [line.strip(' ') for line in expected_lines] assert_string_arrays_equal(expected_lines, actual_lines, msg='Generated code unexpected')
def run_test(self, testcase): a = [] pyversion = testcase_pyversion(testcase.file, testcase.name) program_text = '\n'.join(testcase.input) module_name, program_name, program_text = self.parse_options(program_text) source = BuildSource(program_name, module_name, program_text) try: build.build(target=build.TYPE_CHECK, sources=[source], pyversion=pyversion, flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) except CompileError as e: a = normalize_error_messages(e.messages) if testcase.output != a and mypy.myunit.UPDATE_TESTCASES: update_testcase_output(testcase, a, mypy.myunit.APPEND_TESTCASES) assert_string_arrays_equal( testcase.output, a, 'Invalid type checker output ({}, line {})'.format( testcase.file, testcase.line))
def run_case(self, testcase: DataDrivenTestCase) -> None: src = '\n'.join(testcase.input) dump_all = '# __dump_all__' in src if testcase.name.endswith('python2'): python_version = defaults.PYTHON2_VERSION else: python_version = defaults.PYTHON3_VERSION messages, files, type_map = self.build(src, python_version) a = messages if files is None or type_map is None: if not a: a = [ 'Unknown compile error (likely syntax error in test case or fixture)' ] else: deps = defaultdict(set) # type: DefaultDict[str, Set[str]] for module in files: if module in dumped_modules or dump_all and module not in ( 'abc', 'typing', 'mypy_extensions', 'enum'): new_deps = get_dependencies(files[module], type_map, python_version) for source in new_deps: deps[source].update(new_deps[source]) TypeState.add_all_protocol_deps(deps) for source, targets in sorted(deps.items()): if source.startswith('<enum.'): # Remove noise. continue line = '%s -> %s' % (source, ', '.join(sorted(targets))) # Clean up output a bit line = line.replace('__main__', 'm') a.append(line) assert_string_arrays_equal( testcase.output, a, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def test_semanal(testcase): """Perform a semantic analysis test case. The testcase argument contains a description of the test case (inputs and output). """ try: src = '\n'.join(testcase.input) result = build.build(target=build.SEMANTIC_ANALYSIS, sources=[BuildSource('main', None, src)], pyversion=testfile_pyversion(testcase.file), flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) a = result.errors if a: raise CompileError(a) # Include string representations of the source files in the actual # output. for fnam in sorted(result.files.keys()): f = result.files[fnam] # Omit the builtins module and files with a special marker in the # path. # TODO the test is not reliable if (not f.path.endswith((os.sep + 'builtins.py', 'typing.py', 'abc.py', 'collections.py')) and not os.path.basename(f.path).startswith('_') and not os.path.splitext( os.path.basename(f.path))[0].endswith('_')): a += str(f).split('\n') except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format(testcase.file, testcase.line))
def run_case(self, testcase: DataDrivenTestCase) -> None: """Perform a test case.""" try: # Build test case input. src = '\n'.join(testcase.input) result = build.build(sources=[BuildSource('main', None, src)], options=get_semanal_options(), alt_lib_path=test_temp_dir) # The output is the symbol table converted into a string. a = result.errors if a: raise CompileError(a) for f in sorted(result.files.keys()): if f not in ('builtins', 'typing', 'abc'): a.append('{}:'.format(f)) for s in str(result.files[f].names).split('\n'): a.append(' ' + s) except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format( testcase.file, testcase.line))
def test_transform(testcase): """Perform an identity transform test case.""" try: src = '\n'.join(testcase.input) options = Options() options.use_builtins_fixtures = True options.semantic_analysis_only = True options.python_version = testfile_pyversion(testcase.file) result = build.build(sources=[BuildSource('main', None, src)], options=options, alt_lib_path=test_temp_dir) a = result.errors if a: raise CompileError(a) # Include string representations of the source files in the actual # output. for fnam in sorted(result.files.keys()): f = result.files[fnam] # Omit the builtins module and files with a special marker in the # path. # TODO the test is not reliable if (not f.path.endswith( (os.sep + 'builtins.pyi', 'typing.pyi', 'abc.pyi')) and not os.path.basename(f.path).startswith('_') and not os.path.splitext(os.path.basename( f.path))[0].endswith('_')): t = TestTransformVisitor() f = t.node(f) a += str(f).split('\n') except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format( testcase.file, testcase.line))