def get_sources(graph: Graph, changed_modules: List[Tuple[str, str]]) -> List[BuildSource]: sources = [BuildSource(st.path, st.id, None) for st in graph.values()] for id, path in changed_modules: if id not in graph: sources.append(BuildSource(path, id, None)) return sources
def build(self, source: str, testcase: DataDrivenTestCase, sources_override: Optional[List[Tuple[str, str]]], build_cache: bool, enable_cache: bool) -> Tuple[List[str], BuildManager, Graph]: # This handles things like '# flags: --foo'. options = parse_options(source, testcase, incremental_step=1) options.incremental = True options.use_builtins_fixtures = True options.show_traceback = True options.fine_grained_incremental = not build_cache options.use_fine_grained_cache = enable_cache and not build_cache options.cache_fine_grained = enable_cache main_path = os.path.join(test_temp_dir, 'main') with open(main_path, 'w') as f: f.write(source) if sources_override is not None: sources = [ BuildSource(path, module, None) for module, path in sources_override ] else: sources = [BuildSource(main_path, None, None)] try: result = build.build(sources=sources, options=options, alt_lib_path=test_temp_dir) except CompileError as e: # TODO: We need a manager and a graph in this case as well assert False, str('\n'.join(e.messages)) return e.messages, None, None return result.errors, result.manager, result.graph
def expand_dir(arg: str, mod_prefix: str = '') -> List[BuildSource]: """Convert a directory name to a list of sources to build.""" f = get_init_file(arg) if mod_prefix and not f: return [] seen = set() # type: Set[str] sources = [] if f and not mod_prefix: top_dir, top_mod = crawl_up(f) mod_prefix = top_mod + '.' if mod_prefix: sources.append(BuildSource(f, mod_prefix.rstrip('.'), None)) names = os.listdir(arg) names.sort(key=keyfunc) for name in names: path = os.path.join(arg, name) if os.path.isdir(path): sub_sources = expand_dir(path, mod_prefix + name + '.') if sub_sources: seen.add(name) sources.extend(sub_sources) else: base, suffix = os.path.splitext(name) if base == '__init__': continue if base not in seen and '.' not in base and suffix in PY_EXTENSIONS: seen.add(base) src = BuildSource(path, mod_prefix + base, None) sources.append(src) return sources
def create_source_list(files: Sequence[str], options: Options, fscache: Optional[FileSystemCache] = None, allow_empty_dir: bool = False) -> List[BuildSource]: """From a list of source files/directories, makes a list of BuildSources. Raises InvalidSourceList on errors. """ fscache = fscache or FileSystemCache() finder = SourceFinder(fscache) targets = [] for f in files: if f.endswith(PY_EXTENSIONS): # Can raise InvalidSourceList if a directory doesn't have a valid module name. name, base_dir = finder.crawl_up(os.path.normpath(f)) targets.append(BuildSource(f, name, None, base_dir)) elif fscache.isdir(f): sub_targets = finder.expand_dir(os.path.normpath(f)) if not sub_targets and not allow_empty_dir: raise InvalidSourceList( "There are no .py[i] files in directory '{}'".format(f)) targets.extend(sub_targets) else: mod = os.path.basename(f) if options.scripts_are_modules else None targets.append(BuildSource(f, mod, None)) return targets
def get_sources(modules: Dict[str, str], changed_modules: List[Tuple[str, str]]) -> List[BuildSource]: # TODO: Race condition when reading from the file system; we should only read each # bit of external state once during a build to have a consistent view of the world items = sorted(modules.items(), key=lambda x: x[0]) sources = [BuildSource(path, id, None) for id, path in items if os.path.isfile(path)] for id, path in changed_modules: if os.path.isfile(path) and id not in modules: sources.append(BuildSource(path, id, None)) return sources
def test_transform(testcase): """Perform an identity transform test case.""" try: src = '\n'.join(testcase.input) result = build.build(target=build.SEMANTIC_ANALYSIS, sources=[BuildSource('main', None, src)], pyversion=testfile_pyversion(testcase.file), flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) a = [] # Include string representations of the source files in the actual # output. for fnam in sorted(result.files.keys()): f = result.files[fnam] # Omit the builtins module and files with a special marker in the # path. # TODO the test is not reliable if (not f.path.endswith( (os.sep + 'builtins.py', 'typing.py', 'abc.py')) and not os.path.basename(f.path).startswith('_') and not os.path.splitext(os.path.basename( f.path))[0].endswith('_')): t = TestTransformVisitor() f = t.node(f) a += str(f).split('\n') except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format( testcase.file, testcase.line))
def get_sources(fscache: FileSystemCache, modules: Dict[str, str], changed_modules: List[Tuple[str, str]]) -> List[BuildSource]: sources = [] for id, path in changed_modules: if fscache.isfile(path): sources.append(BuildSource(path, id, None)) return sources
def run_test(self, testcase: DataDrivenTestCase) -> None: """Perform a test case.""" try: # Build test case input. src = '\n'.join(testcase.input) result = build.build(sources=[BuildSource('main', None, src)], options=get_semanal_options(), alt_lib_path=test_temp_dir) a = result.errors if a: raise CompileError(a) # Collect all TypeInfos in top-level modules. typeinfos = TypeInfoMap() for f in result.files.values(): for n in f.names.values(): if isinstance(n.node, TypeInfo): typeinfos[n.fullname] = n.node # The output is the symbol table converted into a string. a = str(typeinfos).split('\n') except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format( testcase.file, testcase.line))
def test_error_stream(testcase: DataDrivenTestCase) -> None: """Perform a single error streaming test case. The argument contains the description of the test case. """ options = Options() options.show_traceback = True logged_messages = [] # type: List[str] def flush_errors(msgs: List[str], serious: bool) -> None: if msgs: logged_messages.append('==== Errors flushed ====') logged_messages.extend(msgs) sources = [BuildSource('main', '__main__', '\n'.join(testcase.input))] try: build.build(sources=sources, options=options, flush_errors=flush_errors) except CompileError as e: assert e.messages == [] assert_string_arrays_equal( testcase.output, logged_messages, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def run(sourcePath): def logHeader(sourcePath): utils.log(True, 'Performing static type validation on application: {}\n', sourcePath) try: options = Options() # options.silent_imports = True result = main.type_check_only([BuildSource(sourcePath, None, None)], None, options) if result.errors: logHeader(sourcePath) oldModuleName = '' for message in result.errors: if ': error:' in message: moduleName, lineNr, errorLabel, tail = message.split( ':', 4) if moduleName != oldModuleName: utils.log(True, '\tFile {}\n', moduleName) oldModuleName = moduleName utils.log(True, '\t\tLine {}:{}\n', lineNr, tail) utils.log(True, '\n') except CompileError as compileError: if compileError.messages: logHeader(sourcePath) for message in compileError.messages: utils.log(True, '\t{}', message) utils.log(True, '\n')
def run_test(self, testcase): """Perform a test case.""" try: # Build test case input. src = '\n'.join(testcase.input) result = build.build(target=build.SEMANTIC_ANALYSIS, sources=[BuildSource('main', None, src)], flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) # Collect all TypeInfos in top-level modules. typeinfos = TypeInfoMap() for f in result.files.values(): for n in f.names.values(): if isinstance(n.node, TypeInfo): typeinfos[n.fullname] = n.node # The output is the symbol table converted into a string. a = str(typeinfos).split('\n') except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format( testcase.file, testcase.line))
def parse_sources(self, program_text: str, incremental_step: int) -> List[BuildSource]: """Return target BuildSources for a test case. Normally, the unit tests will check all files included in the test case. This differs from how testcheck works by default, as dmypy doesn't currently support following imports. You can override this behavior and instruct the tests to check multiple modules by using a comment like this in the test case input: # cmd: main a.py You can also use `# cmdN:` to have a different cmd for incremental step N (2, 3, ...). """ m = re.search('# cmd: mypy ([a-zA-Z0-9_./ ]+)$', program_text, flags=re.MULTILINE) regex = '# cmd{}: mypy ([a-zA-Z0-9_./ ]+)$'.format(incremental_step) alt_m = re.search(regex, program_text, flags=re.MULTILINE) if alt_m is not None and incremental_step > 1: # Optionally return a different command if in a later step # of incremental mode, otherwise default to reusing the # original cmd. m = alt_m if m: # The test case wants to use a non-default set of files. paths = m.group(1).strip().split() result = [] for path in paths: path = os.path.join(test_temp_dir, path) module = module_from_path(path) if module == 'main': module = '__main__' result.append(BuildSource(path, module, None)) return result else: base = BuildSource(os.path.join(test_temp_dir, 'main'), '__main__', None) return [base] + expand_dir(test_temp_dir)
def check(self, filename: str) -> bool: """ Typechecks the given file and collects all type information needed for the translation to Viper """ def report_errors(errors: List[str]) -> None: for error in errors: logger.info(error) raise TypeException(errors) try: options_strict = self._create_options(True) res_strict = mypy.build.build( [BuildSource(filename, None, None)], options_strict, bin_dir=config.mypy_dir ) if res_strict.errors: # Run mypy a second time with strict optional checking disabled, # s.t. we don't get overapproximated none-related errors. options_non_strict = self._create_options(False) res_non_strict = mypy.build.build( [BuildSource(filename, None, None)], options_non_strict, bin_dir=config.mypy_dir ) if res_non_strict.errors: report_errors(res_non_strict.errors) for name, file in res_strict.files.items(): if name in IGNORED_IMPORTS: continue self.files[name] = file.path visitor = TypeVisitor(res_strict.types, name, file.ignored_lines) visitor.prefix = name.split('.') file.accept(visitor) self.all_types.update(visitor.all_types) self.alt_types.update(visitor.alt_types) self.type_aliases.update(visitor.type_aliases) self.type_vars.update(visitor.type_vars) return True except mypy.errors.CompileError as e: report_errors(e.messages)
def create_source_list(files: Sequence[str], options: Options) -> List[BuildSource]: targets = [] for f in files: if f.endswith(PY_EXTENSIONS): try: targets.append(BuildSource(f, crawl_up(f)[1], None)) except InvalidPackageName as e: fail(str(e)) elif os.path.isdir(f): try: sub_targets = expand_dir(f) except InvalidPackageName as e: fail(str(e)) if not sub_targets: fail("There are no .py[i] files in directory '{}'".format(f)) targets.extend(sub_targets) else: mod = os.path.basename(f) if options.scripts_are_modules else None targets.append(BuildSource(f, mod, None)) return targets
def build(self, source: str) -> Tuple[List[str], BuildManager, Dict[str, State]]: options = Options() options.use_builtins_fixtures = True options.show_traceback = True try: result = build.build(sources=[BuildSource('main', None, source)], options=options, alt_lib_path=test_temp_dir) except CompileError as e: # TODO: Is it okay to return None? return e.messages, None, {} return result.errors, result.manager, result.graph
def create_source_list(files: Sequence[str], options: Options) -> List[BuildSource]: """From a list of source files/directories, makes a list of BuildSources. Raises InvalidSourceList on errors. """ targets = [] for f in files: if f.endswith(PY_EXTENSIONS): # Can raise InvalidSourceList if a directory doesn't have a valid module name. targets.append(BuildSource(f, crawl_up(f)[1], None)) elif os.path.isdir(f): sub_targets = expand_dir(f) if not sub_targets: raise InvalidSourceList( "There are no .py[i] files in directory '{}'".format(f)) targets.extend(sub_targets) else: mod = os.path.basename(f) if options.scripts_are_modules else None targets.append(BuildSource(f, mod, None)) return targets
def run_test_once(self, testcase: DataDrivenTestCase, incremental=0) -> None: find_module_clear_caches() program_text = '\n'.join(testcase.input) module_name, program_name, program_text = self.parse_module(program_text) options = self.parse_options(program_text) options.use_builtins_fixtures = True options.python_version = testcase_pyversion(testcase.file, testcase.name) output = testcase.output if incremental: options.incremental = True if incremental == 1: # In run 1, copy program text to program file. output = [] with open(program_name, 'w') as f: f.write(program_text) program_text = None elif incremental == 2: # In run 2, copy *.py.next files to *.py files. for dn, dirs, files in os.walk(os.curdir): for file in files: if file.endswith('.py.next'): full = os.path.join(dn, file) target = full[:-5] shutil.copy(full, target) source = BuildSource(program_name, module_name, program_text) try: res = build.build(sources=[source], options=options, alt_lib_path=test_temp_dir) a = res.errors except CompileError as e: res = None a = e.messages a = normalize_error_messages(a) if output != a and mypy.myunit.UPDATE_TESTCASES: update_testcase_output(testcase, a, mypy.myunit.APPEND_TESTCASES) assert_string_arrays_equal( output, a, 'Invalid type checker output ({}, line {})'.format( testcase.file, testcase.line)) if incremental and res: self.verify_cache(module_name, program_name, a, res.manager) if testcase.expected_stale_modules is not None and incremental == 2: assert_string_arrays_equal( list(sorted(testcase.expected_stale_modules)), list(sorted(res.manager.stale_modules.difference({"__main__"}))), 'Set of stale modules does not match expected set')
def build(self, source: str) -> Tuple[List[str], Dict[str, MypyFile]]: options = Options() options.use_builtins_fixtures = True options.show_traceback = True options.cache_dir = os.devnull try: result = build.build(sources=[BuildSource('main', None, source)], options=options, alt_lib_path=test_temp_dir) except CompileError as e: # TODO: Is it okay to return None? return e.messages, None return result.errors, result.files
def build(self, source: str, options: Options) -> Tuple[List[str], Optional[Dict[str, MypyFile]], Optional[Dict[Expression, Type]]]: try: result = build.build(sources=[BuildSource('main', None, source)], options=options, alt_lib_path=test_temp_dir) except CompileError as e: # TODO: Should perhaps not return None here. return e.messages, None, None return result.errors, result.files, result.types
def build(self, source: str) -> Tuple[List[str], BuildManager, Graph]: options = Options() options.use_builtins_fixtures = True options.show_traceback = True try: result = build.build(sources=[BuildSource('main', None, source)], options=options, alt_lib_path=test_temp_dir) except CompileError as e: # TODO: We need a manager and a graph in this case as well assert False, str('\n'.join(e.messages)) return e.messages, None, None return result.errors, result.manager, result.graph
def run_case(self, testcase: DataDrivenTestCase) -> None: try: line = testcase.input[0] mask = '' if line.startswith('##'): mask = '(' + line[2:].strip() + ')$' src = '\n'.join(testcase.input) options = Options() options.strict_optional = False # TODO: Enable strict optional checking options.use_builtins_fixtures = True options.show_traceback = True options.export_types = True result = build.build(sources=[BuildSource('main', None, src)], options=options, alt_lib_path=test_temp_dir) a = result.errors map = result.types nodes = map.keys() # Ignore NameExpr nodes of variables with explicit (trivial) types # to simplify output. searcher = SkippedNodeSearcher() for file in result.files.values(): file.accept(searcher) ignored = searcher.nodes # Filter nodes that should be included in the output. keys = [] for node in nodes: if node.line is not None and node.line != -1 and map[node]: if ignore_node(node) or node in ignored: continue if (re.match(mask, short_type(node)) or (isinstance(node, NameExpr) and re.match(mask, node.name))): # Include node in output. keys.append(node) for key in sorted(keys, key=lambda n: (n.line, short_type(n), str(n) + str(map[n]))): ts = str(map[key]).replace('*', '') # Remove erased tags ts = ts.replace('__main__.', '') a.append('{}({}) : {}'.format(short_type(key), key.line, ts)) except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid type checker output ({}, line {})'.format( testcase.file, testcase.line))
def build(self, source: str) -> Tuple[List[str], Dict[str, MypyFile], Dict[Expression, Type]]: options = Options() options.use_builtins_fixtures = True options.show_traceback = True try: result = build.build(sources=[BuildSource('main', None, source)], options=options, alt_lib_path=test_temp_dir) except CompileError as e: # TODO: Should perhaps not return None here. return e.messages, None, None return result.errors, result.files, result.types
def run_test_once(self, testcase: DataDrivenTestCase, incremental=0) -> None: find_module_clear_caches() pyversion = testcase_pyversion(testcase.file, testcase.name) program_text = '\n'.join(testcase.input) module_name, program_name, program_text = self.parse_options( program_text) flags = self.parse_flags(program_text) output = testcase.output if incremental: flags.append(build.INCREMENTAL) if incremental == 1: # In run 1, copy program text to program file. output = [] with open(program_name, 'w') as f: f.write(program_text) program_text = None elif incremental == 2: # In run 2, copy *.py.next files to *.py files. for dn, dirs, files in os.walk(os.curdir): for file in files: if file.endswith('.py.next'): full = os.path.join(dn, file) target = full[:-5] shutil.copy(full, target) source = BuildSource(program_name, module_name, program_text) try: res = build.build(target=build.TYPE_CHECK, sources=[source], pyversion=pyversion, flags=flags + [build.TEST_BUILTINS], alt_lib_path=test_temp_dir) a = res.errors except CompileError as e: res = None a = e.messages a = normalize_error_messages(a) if output != a and mypy.myunit.UPDATE_TESTCASES: update_testcase_output(testcase, a, mypy.myunit.APPEND_TESTCASES) assert_string_arrays_equal( output, a, 'Invalid type checker output ({}, line {})'.format( testcase.file, testcase.line)) if incremental and res: self.verify_cache(module_name, program_name, a, res.manager)
def build(self, source: str) -> Tuple[List[str], Optional[BuildManager], Dict[str, State]]: options = Options() options.incremental = True options.use_builtins_fixtures = True options.show_traceback = True main_path = os.path.join(test_temp_dir, 'main') with open(main_path, 'w') as f: f.write(source) try: result = build.build(sources=[BuildSource(main_path, None, None)], options=options, alt_lib_path=test_temp_dir) except CompileError as e: # TODO: Is it okay to return None? return e.messages, None, {} return result.errors, result.manager, result.graph
def run_test(self, testcase): a = [] try: line = testcase.input[0] mask = '' if line.startswith('##'): mask = '(' + line[2:].strip() + ')$' src = '\n'.join(testcase.input) result = build.build(target=build.TYPE_CHECK, sources=[BuildSource('main', None, src)], flags=[build.TEST_BUILTINS], alt_lib_path=config.test_temp_dir) map = result.types nodes = map.keys() # Ignore NameExpr nodes of variables with explicit (trivial) types # to simplify output. searcher = VariableDefinitionNodeSearcher() for file in result.files.values(): file.accept(searcher) ignored = searcher.nodes # Filter nodes that should be included in the output. keys = [] for node in nodes: if node.line is not None and node.line != -1 and map[node]: if ignore_node(node) or node in ignored: continue if (re.match(mask, short_type(node)) or (isinstance(node, NameExpr) and re.match(mask, node.name))): # Include node in output. keys.append(node) for key in sorted(keys, key=lambda n: (n.line, short_type(n), str(n) + str(map[n]))): ts = str(map[key]).replace('*', '') # Remove erased tags ts = ts.replace('__main__.', '') a.append('{}({}) : {}'.format(short_type(key), key.line, ts)) except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid type checker output ({}, line {})'.format(testcase.file, testcase.line))
def expand_dir(arg: str) -> List[BuildSource]: """Convert a directory name to a list of sources to build.""" dir, mod = crawl_up(arg) if not mod: # It's a directory without an __init__.py[i]. # List all the .py[i] files (but not recursively). targets = [] # type: List[BuildSource] for name in os.listdir(dir): stripped = strip_py(name) if stripped: path = os.path.join(dir, name) targets.append(BuildSource(path, stripped, None)) return targets else: lib_path = [dir] return build.find_modules_recursive(mod, lib_path)
def test_semanal_error(testcase: DataDrivenTestCase) -> None: """Perform a test case.""" try: src = '\n'.join(testcase.input) res = build.build(sources=[BuildSource('main', None, src)], options=get_semanal_options(), alt_lib_path=test_temp_dir) a = res.errors assert a, 'No errors reported in {}, line {}'.format(testcase.file, testcase.line) except CompileError as e: # Verify that there was a compile error and that the error messages # are equivalent. a = e.messages assert_string_arrays_equal( testcase.output, normalize_error_messages(a), 'Invalid compiler output ({}, line {})'.format(testcase.file, testcase.line))
def test_semanal_error(testcase): """Perform a test case.""" try: src = '\n'.join(testcase.input) build.build(target=build.SEMANTIC_ANALYSIS, sources=[BuildSource('main', None, src)], flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) raise AssertionError('No errors reported in {}, line {}'.format( testcase.file, testcase.line)) except CompileError as e: # Verify that there was a compile error and that the error messages # are equivalent. assert_string_arrays_equal( testcase.output, normalize_error_messages(e.messages), 'Invalid compiler output ({}, line {})'.format( testcase.file, testcase.line))
def build(self, source: str) -> Optional[BuildResult]: options = Options() options.incremental = True options.fine_grained_incremental = True options.use_builtins_fixtures = True options.show_traceback = True options.python_version = PYTHON3_VERSION main_path = os.path.join(test_temp_dir, 'main') with open(main_path, 'w') as f: f.write(source) try: result = build.build(sources=[BuildSource(main_path, None, None)], options=options, alt_lib_path=test_temp_dir) except CompileError as e: # TODO: Is it okay to return None? return None return result
def parse_sources(self, program_text: str, incremental_step: int, options: Options) -> List[BuildSource]: """Return target BuildSources for a test case. Normally, the unit tests will check all files included in the test case. This differs from how testcheck works by default, as dmypy doesn't currently support following imports. You can override this behavior and instruct the tests to check multiple modules by using a comment like this in the test case input: # cmd: main a.py You can also use `# cmdN:` to have a different cmd for incremental step N (2, 3, ...). """ m = re.search('# cmd: mypy ([a-zA-Z0-9_./ ]+)$', program_text, flags=re.MULTILINE) regex = '# cmd{}: mypy ([a-zA-Z0-9_./ ]+)$'.format(incremental_step) alt_m = re.search(regex, program_text, flags=re.MULTILINE) if alt_m is not None: # Optionally return a different command if in a later step # of incremental mode, otherwise default to reusing the # original cmd. m = alt_m if m: # The test case wants to use a non-default set of files. paths = [ os.path.join(test_temp_dir, path) for path in m.group(1).strip().split() ] return create_source_list(paths, options) else: base = BuildSource(os.path.join(test_temp_dir, 'main'), '__main__', None) # Use expand_dir instead of create_source_list to avoid complaints # when there aren't any .py files in an increment return [base] + create_source_list( [test_temp_dir], options, allow_empty_dir=True)