def visit_instance(self, left: Instance) -> bool: if left.type.fallback_to_any: if isinstance(self.right, NoneTyp): # NOTE: `None` is a *non-subclassable* singleton, therefore no class # can by a subtype of it, even with an `Any` fallback. # This special case is needed to treat descriptors in classes with # dynamic base classes correctly, see #5456. return False return True right = self.right if isinstance(right, TupleType) and right.fallback.type.is_enum: return is_subtype(left, right.fallback) if isinstance(right, Instance): if TypeState.is_cached_subtype_check(left, right): return True # NOTE: left.type.mro may be None in quick mode if there # was an error somewhere. if left.type.mro is not None: for base in left.type.mro: # TODO: Also pass recursively ignore_declared_variance if base._promote and is_subtype( base._promote, self.right, self.check_type_parameter, ignore_pos_arg_names=self.ignore_pos_arg_names): TypeState.record_subtype_cache_entry(left, right) return True rname = right.type.fullname() # Always try a nominal check if possible, # there might be errors that a user wants to silence *once*. if ((left.type.has_base(rname) or rname == 'builtins.object') and not self.ignore_declared_variance): # Map left type to corresponding right instances. t = map_instance_to_supertype(left, right.type) nominal = all(self.check_type_parameter(lefta, righta, tvar.variance) for lefta, righta, tvar in zip(t.args, right.args, right.type.defn.type_vars)) if nominal: TypeState.record_subtype_cache_entry(left, right) return nominal if right.type.is_protocol and is_protocol_implementation(left, right): return True return False if isinstance(right, TypeType): item = right.item if isinstance(item, TupleType): item = item.fallback if is_named_instance(left, 'builtins.type'): return is_subtype(TypeType(AnyType(TypeOfAny.special_form)), right) if left.type.is_metaclass(): if isinstance(item, AnyType): return True if isinstance(item, Instance): return is_named_instance(item, 'builtins.object') if isinstance(right, CallableType): # Special case: Instance can be a subtype of Callable. call = find_member('__call__', left, left) if call: return is_subtype(call, right) return False else: return False
def get_all_dependencies(manager: BuildManager, graph: Dict[str, State]) -> Dict[str, Set[str]]: """Return the fine-grained dependency map for an entire build.""" # Deps for each module were computed during build() or loaded from the cache. deps = {} # type: Dict[str, Set[str]] collect_dependencies(graph, deps, graph) TypeState.add_all_protocol_deps(deps) return deps
def propagate_changes_using_dependencies( manager: BuildManager, graph: Dict[str, State], deps: Dict[str, Set[str]], triggered: Set[str], up_to_date_modules: Set[str], targets_with_errors: Set[str]) -> List[Tuple[str, str]]: """Transitively rechecks targets based on triggers and the dependency map. Returns a list (module id, path) tuples representing modules that contain a target that needs to be reprocessed but that has not been parsed yet.""" num_iter = 0 remaining_modules = [] # type: List[Tuple[str, str]] # Propagate changes until nothing visible has changed during the last # iteration. while triggered or targets_with_errors: num_iter += 1 if num_iter > MAX_ITER: raise RuntimeError( 'Max number of iterations (%d) reached (endless loop?)' % MAX_ITER) todo, unloaded, stale_protos = find_targets_recursive( manager, graph, triggered, deps, up_to_date_modules) # TODO: we sort to make it deterministic, but this is *incredibly* ad hoc remaining_modules.extend( (id, graph[id].xpath) for id in sorted(unloaded)) # Also process targets that used to have errors, as otherwise some # errors might be lost. for target in targets_with_errors: id = module_prefix(graph, target) if id is not None and id not in up_to_date_modules: if id not in todo: todo[id] = set() manager.log_fine_grained('process target with error: %s' % target) more_nodes, _ = lookup_target(manager, target) todo[id].update(more_nodes) triggered = set() # First invalidate subtype caches in all stale protocols. # We need to do this to avoid false negatives if the protocol itself is # unchanged, but was marked stale because its sub- (or super-) type changed. for info in stale_protos: TypeState.reset_subtype_caches_for(info) # Then fully reprocess all targets. # TODO: Preserve order (set is not optimal) for id, nodes in sorted(todo.items(), key=lambda x: x[0]): assert id not in up_to_date_modules triggered |= reprocess_nodes(manager, graph, id, nodes, deps) # Changes elsewhere may require us to reprocess modules that were # previously considered up to date. For example, there may be a # dependency loop that loops back to an originally processed module. up_to_date_modules = set() targets_with_errors = set() if is_verbose(manager): manager.log_fine_grained('triggered: %r' % list(triggered)) return remaining_modules
def collect_dependencies(new_modules: Iterable[str], deps: Dict[str, Set[str]], graph: Dict[str, State]) -> None: for id in new_modules: if id not in graph: continue for trigger, targets in graph[id].fine_grained_deps.items(): deps.setdefault(trigger, set()).update(targets) # Merge also the newly added protocol deps. TypeState.update_protocol_deps(deps)
def get_all_dependencies(manager: BuildManager, graph: Dict[str, State]) -> Dict[str, Set[str]]: """Return the fine-grained dependency map for an entire build.""" # Deps for each module were computed during build() or loaded from the cache. deps = manager.load_fine_grained_deps(FAKE_ROOT_MODULE) # type: Dict[str, Set[str]] for id in graph: if graph[id].tree is not None: merge_dependencies(graph[id].compute_fine_grained_deps(), deps) TypeState.add_all_protocol_deps(deps) return deps
def propagate_changes_using_dependencies( manager: BuildManager, graph: Dict[str, State], deps: Dict[str, Set[str]], triggered: Set[str], up_to_date_modules: Set[str], targets_with_errors: Set[str]) -> List[Tuple[str, str]]: """Transitively rechecks targets based on triggers and the dependency map. Returns a list (module id, path) tuples representing modules that contain a target that needs to be reprocessed but that has not been parsed yet.""" num_iter = 0 remaining_modules = [] # type: List[Tuple[str, str]] # Propagate changes until nothing visible has changed during the last # iteration. while triggered or targets_with_errors: num_iter += 1 if num_iter > MAX_ITER: raise RuntimeError('Max number of iterations (%d) reached (endless loop?)' % MAX_ITER) todo, unloaded, stale_protos = find_targets_recursive(manager, graph, triggered, deps, up_to_date_modules) # TODO: we sort to make it deterministic, but this is *incredibly* ad hoc remaining_modules.extend((id, graph[id].xpath) for id in sorted(unloaded)) # Also process targets that used to have errors, as otherwise some # errors might be lost. for target in targets_with_errors: id = module_prefix(graph, target) if id is not None and id not in up_to_date_modules: if id not in todo: todo[id] = set() manager.log_fine_grained('process target with error: %s' % target) more_nodes, _ = lookup_target(manager, target) todo[id].update(more_nodes) triggered = set() # First invalidate subtype caches in all stale protocols. # We need to do this to avoid false negatives if the protocol itself is # unchanged, but was marked stale because its sub- (or super-) type changed. for info in stale_protos: TypeState.reset_subtype_caches_for(info) # Then fully reprocess all targets. # TODO: Preserve order (set is not optimal) for id, nodes in sorted(todo.items(), key=lambda x: x[0]): assert id not in up_to_date_modules triggered |= reprocess_nodes(manager, graph, id, nodes, deps) # Changes elsewhere may require us to reprocess modules that were # previously considered up to date. For example, there may be a # dependency loop that loops back to an originally processed module. up_to_date_modules = set() targets_with_errors = set() if is_verbose(manager): manager.log_fine_grained('triggered: %r' % list(triggered)) return remaining_modules
def get_all_dependencies(manager: BuildManager, graph: Dict[str, State]) -> Dict[str, Set[str]]: """Return the fine-grained dependency map for an entire build.""" # Deps for each module were computed during build() or loaded from the cache. deps = manager.load_fine_grained_deps( FAKE_ROOT_MODULE) # type: Dict[str, Set[str]] for id in graph: if graph[id].tree is not None: merge_dependencies(graph[id].compute_fine_grained_deps(), deps) TypeState.add_all_protocol_deps(deps) return deps
def calculate_mro(info: TypeInfo, obj_type: Optional[Callable[[], Instance]] = None) -> None: """Calculate and set mro (method resolution order). Raise MroError if cannot determine mro. """ mro = linearize_hierarchy(info, obj_type) assert mro, "Could not produce a MRO at all for %s" % (info,) info.mro = mro # The property of falling back to Any is inherited. info.fallback_to_any = any(baseinfo.fallback_to_any for baseinfo in info.mro) TypeState.reset_all_subtype_caches_for(info)
def fixup_and_reset_typeinfo(self, node: TypeInfo) -> TypeInfo: """Fix-up type info and reset subtype caches. This needs to be called at least once per each merged TypeInfo, as otherwise we may leak stale caches. """ if node in self.replacements: # The subclass relationships may change, so reset all caches relevant to the # old MRO. new = cast(TypeInfo, self.replacements[node]) TypeState.reset_all_subtype_caches_for(new) return self.fixup(node)
def strip_type_info(self, info: TypeInfo) -> None: info.type_vars = [] info.bases = [] info.is_abstract = False info.abstract_attributes = [] info.mro = [] info.add_type_vars() info.tuple_type = None info.typeddict_type = None info.tuple_type = None TypeState.reset_subtype_caches_for(info) info.declared_metaclass = None info.metaclass_type = None
def update_deps(module_id: str, nodes: List[DeferredNode], graph: Dict[str, State], deps: Dict[str, Set[str]], options: Options) -> None: for deferred in nodes: node = deferred.node type_map = graph[module_id].type_map() tree = graph[module_id].tree assert tree is not None, "Tree must be processed at this stage" new_deps = get_dependencies_of_target(module_id, tree, node, type_map, options.python_version) for trigger, targets in new_deps.items(): deps.setdefault(trigger, set()).update(targets) # Merge also the newly added protocol deps (if any). TypeState.update_protocol_deps(deps)
def update_deps(module_id: str, nodes: List[FineGrainedDeferredNode], graph: Dict[str, State], deps: Dict[str, Set[str]], options: Options) -> None: for deferred in nodes: node = deferred.node type_map = graph[module_id].type_map() tree = graph[module_id].tree assert tree is not None, "Tree must be processed at this stage" new_deps = get_dependencies_of_target(module_id, tree, node, type_map, options.python_version) for trigger, targets in new_deps.items(): deps.setdefault(trigger, set()).update(targets) # Merge also the newly added protocol deps (if any). TypeState.update_protocol_deps(deps)
def run_case(self, testcase: DataDrivenTestCase) -> None: src = '\n'.join(testcase.input) dump_all = '# __dump_all__' in src if testcase.name.endswith('python2'): python_version = defaults.PYTHON2_VERSION else: python_version = defaults.PYTHON3_VERSION options = parse_options(src, testcase, incremental_step=1) options.use_builtins_fixtures = True options.show_traceback = True options.cache_dir = os.devnull options.python_version = python_version options.export_types = True options.preserve_asts = True messages, files, type_map = self.build(src, options) a = messages if files is None or type_map is None: if not a: a = [ 'Unknown compile error (likely syntax error in test case or fixture)' ] else: deps = defaultdict(set) # type: DefaultDict[str, Set[str]] for module in files: if module in dumped_modules or dump_all and module not in ( 'abc', 'typing', 'mypy_extensions', 'typing_extensions', 'enum'): new_deps = get_dependencies(files[module], type_map, python_version, options) for source in new_deps: deps[source].update(new_deps[source]) TypeState.add_all_protocol_deps(deps) for source, targets in sorted(deps.items()): if source.startswith(('<enum', '<typing', '<mypy')): # Remove noise. continue line = '%s -> %s' % (source, ', '.join(sorted(targets))) # Clean up output a bit line = line.replace('__main__', 'm') a.append(line) assert_string_arrays_equal( testcase.output, a, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def run_case(self, testcase: DataDrivenTestCase) -> None: src = '\n'.join(testcase.input) dump_all = '# __dump_all__' in src if testcase.name.endswith('python2'): python_version = defaults.PYTHON2_VERSION else: python_version = defaults.PYTHON3_VERSION options = parse_options(src, testcase, incremental_step=1) options.use_builtins_fixtures = True options.show_traceback = True options.cache_dir = os.devnull options.python_version = python_version options.export_types = True messages, files, type_map = self.build(src, options) a = messages if files is None or type_map is None: if not a: a = ['Unknown compile error (likely syntax error in test case or fixture)'] else: deps = defaultdict(set) # type: DefaultDict[str, Set[str]] for module in files: if module in dumped_modules or dump_all and module not in ('abc', 'typing', 'mypy_extensions', 'typing_extensions', 'enum'): new_deps = get_dependencies(files[module], type_map, python_version, options) for source in new_deps: deps[source].update(new_deps[source]) TypeState.add_all_protocol_deps(deps) for source, targets in sorted(deps.items()): if source.startswith('<enum.'): # Remove noise. continue line = '%s -> %s' % (source, ', '.join(sorted(targets))) # Clean up output a bit line = line.replace('__main__', 'm') a.append(line) assert_string_arrays_equal( testcase.output, a, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def strip_type_info(self, info: TypeInfo) -> List[SymbolNode]: info.type_vars = [] info.bases = [] info.is_abstract = False info.abstract_attributes = [] info.mro = [] info.add_type_vars() info.tuple_type = None info.typeddict_type = None info.tuple_type = None TypeState.reset_subtype_caches_for(info) info.declared_metaclass = None info.metaclass_type = None # We need to delete any entries that were generated by plugins, # since they will get regenerated. to_delete = [(k, v) for k, v in info.names.items() if v.plugin_generated] for k, _ in to_delete: del info.names[k] return [v.node for k, v in to_delete if v.node]
def dump_all_dependencies(modules: Dict[str, MypyFile], type_map: Dict[Expression, Type], python_version: Tuple[int, int]) -> None: """Generate dependencies for all interesting modules and print them to stdout.""" all_deps = {} # type: Dict[str, Set[str]] for id, node in modules.items(): # Uncomment for debugging: # print('processing', id) if id in ('builtins', 'typing') or '/typeshed/' in node.path: continue assert id == node.fullname() deps = get_dependencies(node, type_map, python_version) for trigger, targets in deps.items(): all_deps.setdefault(trigger, set()).update(targets) TypeState.add_all_protocol_deps(all_deps) for trigger, targets in sorted(all_deps.items(), key=lambda x: x[0]): print(trigger) for target in sorted(targets): print(' %s' % target)
def visit_class_def(self, node: ClassDef) -> None: """Strip class body and type info, but don't strip methods.""" # We need to save the implicitly defined instance variables, # i.e. those defined as attributes on self. Otherwise, they would # be lost if we only reprocess top-levels (this kills TypeInfos) # but not the methods that defined those variables. if not self.recurse_into_functions: self.prepare_implicit_var_patches(node) # We need to delete any entries that were generated by plugins, # since they will get regenerated. to_delete = {v.node for v in node.info.names.values() if v.plugin_generated} node.type_vars = [] node.base_type_exprs.extend(node.removed_base_type_exprs) node.removed_base_type_exprs = [] node.defs.body = [s for s in node.defs.body if s not in to_delete] with self.enter_class(node.info): super().visit_class_def(node) TypeState.reset_subtype_caches_for(node.info) # Kill the TypeInfo, since there is none before semantic analysis. node.info = CLASSDEF_NO_INFO
def dump_all_dependencies(modules: Dict[str, MypyFile], type_map: Dict[Expression, Type], python_version: Tuple[int, int], options: Options) -> None: """Generate dependencies for all interesting modules and print them to stdout.""" all_deps = {} # type: Dict[str, Set[str]] for id, node in modules.items(): # Uncomment for debugging: # print('processing', id) if id in ('builtins', 'typing') or '/typeshed/' in node.path: continue assert id == node.fullname() deps = get_dependencies(node, type_map, python_version, options) for trigger, targets in deps.items(): all_deps.setdefault(trigger, set()).update(targets) TypeState.add_all_protocol_deps(all_deps) for trigger, targets in sorted(all_deps.items(), key=lambda x: x[0]): print(trigger) for target in sorted(targets): print(' %s' % target)
def visit_class_def(self, node: ClassDef) -> None: """Strip class body and type info, but don't strip methods.""" # We need to save the implicitly defined instance variables, # i.e. those defined as attributes on self. Otherwise, they would # be lost if we only reprocess top-levels (this kills TypeInfos) # but not the methods that defined those variables. if not self.recurse_into_functions: self.save_implicit_attributes(node) # We need to delete any entries that were generated by plugins, # since they will get regenerated. to_delete = {v.node for v in node.info.names.values() if v.plugin_generated} node.type_vars = [] node.base_type_exprs.extend(node.removed_base_type_exprs) node.removed_base_type_exprs = [] node.defs.body = [s for s in node.defs.body if s not in to_delete] with self.enter_class(node.info): super().visit_class_def(node) TypeState.reset_subtype_caches_for(node.info) # Kill the TypeInfo, since there is none before semantic analysis. node.info = CLASSDEF_NO_INFO
def visit_instance(self, left: Instance) -> bool: right = self.right if isinstance(right, Instance): if TypeState.is_cached_proper_subtype_check(left, right): return True for base in left.type.mro: if base._promote and is_proper_subtype(base._promote, right): TypeState.record_proper_subtype_cache_entry(left, right) return True if left.type.has_base(right.type.fullname()): def check_argument(leftarg: Type, rightarg: Type, variance: int) -> bool: if variance == COVARIANT: return is_proper_subtype(leftarg, rightarg) elif variance == CONTRAVARIANT: return is_proper_subtype(rightarg, leftarg) else: return sametypes.is_same_type(leftarg, rightarg) # Map left type to corresponding right instances. left = map_instance_to_supertype(left, right.type) nominal = all( check_argument(ta, ra, tvar.variance) for ta, ra, tvar in zip(left.args, right.args, right.type.defn.type_vars)) if nominal: TypeState.record_proper_subtype_cache_entry(left, right) return nominal if (right.type.is_protocol and is_protocol_implementation( left, right, proper_subtype=True)): return True return False if isinstance(right, CallableType): call = find_member('__call__', left, left) if call: return is_proper_subtype(call, right) return False return False
def free_global_state(self) -> None: TypeState.reset_all_subtype_caches()
def merge_dependencies(new_deps: Dict[str, Set[str]], deps: Dict[str, Set[str]]) -> None: for trigger, targets in new_deps.items(): deps.setdefault(trigger, set()).update(targets) # Merge also the newly added protocol deps. TypeState.update_protocol_deps(deps)
def is_protocol_implementation(left: Instance, right: Instance, proper_subtype: bool = False) -> bool: """Check whether 'left' implements the protocol 'right'. If 'proper_subtype' is True, then check for a proper subtype. Treat recursive protocols by using the 'assuming' structural subtype matrix (in sparse representation, i.e. as a list of pairs (subtype, supertype)), see also comment in nodes.TypeInfo. When we enter a check for classes (A, P), defined as following:: class P(Protocol): def f(self) -> P: ... class A: def f(self) -> A: ... this results in A being a subtype of P without infinite recursion. On every false result, we pop the assumption, thus avoiding an infinite recursion as well. """ assert right.type.is_protocol # We need to record this check to generate protocol fine-grained dependencies. TypeState.record_protocol_subtype_check(left.type, right.type) assuming = right.type.assuming_proper if proper_subtype else right.type.assuming for (l, r) in reversed(assuming): if sametypes.is_same_type(l, left) and sametypes.is_same_type( r, right): return True with pop_on_exit(assuming, left, right): for member in right.type.protocol_members: # nominal subtyping currently ignores '__init__' and '__new__' signatures if member in ('__init__', '__new__'): continue # The third argument below indicates to what self type is bound. # We always bind self to the subtype. (Similarly to nominal types). supertype = find_member(member, right, left) assert supertype is not None subtype = find_member(member, left, left) # Useful for debugging: # print(member, 'of', left, 'has type', subtype) # print(member, 'of', right, 'has type', supertype) if not subtype: return False if not proper_subtype: # Nominal check currently ignores arg names is_compat = is_subtype(subtype, supertype, ignore_pos_arg_names=True) else: is_compat = is_proper_subtype(subtype, supertype) if not is_compat: return False if isinstance(subtype, NoneTyp) and isinstance( supertype, CallableType): # We want __hash__ = None idiom to work even without --strict-optional return False subflags = get_member_flags(member, left.type) superflags = get_member_flags(member, right.type) if IS_SETTABLE in superflags: # Check opposite direction for settable attributes. if not is_subtype(supertype, subtype): return False if (IS_CLASSVAR in subflags) != (IS_CLASSVAR in superflags): return False if IS_SETTABLE in superflags and IS_SETTABLE not in subflags: return False # This rule is copied from nominal check in checker.py if IS_CLASS_OR_STATIC in superflags and IS_CLASS_OR_STATIC not in subflags: return False if proper_subtype: TypeState.record_proper_subtype_cache_entry(left, right) else: TypeState.record_subtype_cache_entry(left, right) return True