def dump_types(self, manager: FineGrainedBuildManager) -> List[str]: a = [] # To make the results repeatable, we try to generate unique and # deterministic sort keys. for module_id in sorted(manager.manager.modules): if not is_dumped_module(module_id): continue all_types = manager.manager.all_types # Compute a module type map from the global type map tree = manager.graph[module_id].tree assert tree is not None type_map = { node: all_types[node] for node in get_subexpressions(tree) if node in all_types } if type_map: a.append(f'## {module_id}') for expr in sorted( type_map, key=lambda n: (n.line, short_type(n), str(n) + str(type_map[n]))): typ = type_map[expr] a.append( f'{short_type(expr)}:{expr.line}: {self.format_type(typ)}' ) return a
def run_case(self, testcase: DataDrivenTestCase) -> None: name = testcase.name # We use the test case name to decide which data structures to dump. # Dumping everything would result in very verbose test cases. if name.endswith('_symtable'): kind = SYMTABLE elif name.endswith('_typeinfo'): kind = TYPEINFO elif name.endswith('_types'): kind = TYPES else: kind = AST main_src = '\n'.join(testcase.input) result = self.build(main_src) assert result is not None, 'cases where CompileError occurred should not be run' result.manager.fscache.flush() fine_grained_manager = FineGrainedBuildManager(result) a = [] if result.errors: a.extend(result.errors) target_path = os.path.join(test_temp_dir, 'target.py') shutil.copy(os.path.join(test_temp_dir, 'target.py.next'), target_path) a.extend(self.dump(fine_grained_manager, kind)) old_subexpr = get_subexpressions(result.manager.modules['target']) a.append('==>') new_file, new_types = self.build_increment(fine_grained_manager, 'target', target_path) a.extend(self.dump(fine_grained_manager, kind)) for expr in old_subexpr: if isinstance(expr, TypeVarExpr): # These are merged so we can't perform the check. continue # Verify that old AST nodes are removed from the expression type map. assert expr not in new_types if testcase.normalize_output: a = normalize_error_messages(a) assert_string_arrays_equal( testcase.output, a, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def run_case(self, testcase: DataDrivenTestCase) -> None: name = testcase.name # We use the test case name to decide which data structures to dump. # Dumping everything would result in very verbose test cases. if name.endswith('_symtable'): kind = SYMTABLE elif name.endswith('_typeinfo'): kind = TYPEINFO elif name.endswith('_types'): kind = TYPES else: kind = AST main_src = '\n'.join(testcase.input) messages, manager, graph = self.build(main_src) a = [] if messages: a.extend(messages) shutil.copy(os.path.join(test_temp_dir, 'target.py.next'), os.path.join(test_temp_dir, 'target.py')) a.extend(self.dump(manager.modules, graph, kind)) old_modules = dict(manager.modules) old_subexpr = get_subexpressions(old_modules['target']) new_file, new_types = self.build_increment(manager, 'target') replace_modules_with_new_variants(manager, graph, old_modules, {'target': new_file}, {'target': new_types}) a.append('==>') a.extend(self.dump(manager.modules, graph, kind)) for expr in old_subexpr: # Verify that old AST nodes are removed from the expression type map. assert expr not in new_types assert_string_arrays_equal( testcase.output, a, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))