def collect_config(self) -> "ModelConfigData": """ Collects the values of the config attributes that are used by the plugin, accounting for parent classes. """ ctx = self._ctx cls = ctx.cls config = ModelConfigData() for stmt in cls.defs.body: if not isinstance(stmt, ClassDef): continue if stmt.name == "Config": for substmt in stmt.defs.body: if not isinstance(substmt, AssignmentStmt): continue config.update(self.get_config_update(substmt)) if (config.has_alias_generator and not config.allow_population_by_field_name and self.plugin_config.warn_required_dynamic_aliases): error_required_dynamic_aliases(ctx.api, stmt) for info in cls.info.mro[1:]: # 0 is the current class if METADATA_KEY not in info.metadata: continue # Each class depends on the set of fields in its ancestors ctx.api.add_plugin_dependency( make_wildcard_trigger(get_fullname(info))) for name, value in info.metadata[METADATA_KEY]["config"].items(): config.setdefault(name, value) return config
def visit_import_all(self, o: ImportAll) -> None: module_id, _ = correct_relative_import(self.scope.current_module_id(), o.relative, o.id, self.is_package_init_file) # The current target needs to be rechecked if anything "significant" changes in the # target module namespace (as the imported definitions will need to be updated). self.add_dependency(make_wildcard_trigger(module_id))
def _get_inherited_fields(self, self_fields: Set[str]) -> List[Field]: ctx = self._ctx cls = ctx.cls all_fields: List[Field] = [] known_fields = set(self_fields) for ancestor_info in cls.info.mro[1:-1]: metadata = ancestor_info.metadata.get(self._get_metadata_key()) if metadata is None: continue elif not metadata.get('processed'): raise DeferException ancestor_fields = [] ctx.api.add_plugin_dependency( mypy_trigger.make_wildcard_trigger(ancestor_info.fullname)) for name, data in metadata['fields'].items(): if name not in known_fields: if self._has_explicit_field_accessor(name): data = dict(data) data['has_explicit_accessor'] = True field = Field.deserialize(ctx.api, data) known_fields.add(name) ancestor_fields.append(field) all_fields = ancestor_fields + all_fields return all_fields
def _analyze_class(ctx: 'mypy.plugin.ClassDefContext', auto_attribs: bool) -> List[Attribute]: """Analyze the class body of an attr maker, its parents, and return the Attributes found.""" own_attrs = OrderedDict() # type: OrderedDict[str, Attribute] # Walk the body looking for assignments and decorators. for stmt in ctx.cls.defs.body: if isinstance(stmt, AssignmentStmt): for attr in _attributes_from_assignment(ctx, stmt, auto_attribs): # When attrs are defined twice in the same body we want to use the 2nd definition # in the 2nd location. So remove it from the OrderedDict. # Unless it's auto_attribs in which case we want the 2nd definition in the # 1st location. if not auto_attribs and attr.name in own_attrs: del own_attrs[attr.name] own_attrs[attr.name] = attr elif isinstance(stmt, Decorator): _cleanup_decorator(stmt, own_attrs) for attribute in own_attrs.values(): # Even though these look like class level assignments we want them to look like # instance level assignments. if attribute.name in ctx.cls.info.names: node = ctx.cls.info.names[attribute.name].node assert isinstance(node, Var) node.is_initialized_in_class = False # Traverse the MRO and collect attributes from the parents. taken_attr_names = set(own_attrs) super_attrs = [] for super_info in ctx.cls.info.mro[1:-1]: if 'attrs' in super_info.metadata: # Each class depends on the set of attributes in its attrs ancestors. ctx.api.add_plugin_dependency( make_wildcard_trigger(super_info.fullname())) for data in super_info.metadata['attrs']['attributes']: # Only add an attribute if it hasn't been defined before. This # allows for overwriting attribute definitions by subclassing. if data['name'] not in taken_attr_names: a = Attribute.deserialize(super_info, data) super_attrs.append(a) taken_attr_names.add(a.name) attributes = super_attrs + list(own_attrs.values()) # Check the init args for correct default-ness. Note: This has to be done after all the # attributes for all classes have been read, because subclasses can override parents. last_default = False for attribute in attributes: if attribute.init: if not attribute.has_default and last_default: ctx.api.fail( "Non-default attributes not allowed after default attributes.", attribute.context) last_default |= attribute.has_default return attributes
def _collect_fields(self) -> typing.List[SchemaField]: """Collect all fields declared in a schema class and its ancestors.""" ctx = self._ctx cls = self._ctx.cls fields: typing.List[SchemaField] = [] known_fields: typing.Set[str] = set() for stmt in cls.defs.body: if not isinstance(stmt, nodes.AssignmentStmt): continue lhs = stmt.lvalues[0] rhs = stmt.rvalue if not isinstance(rhs, nodes.CallExpr): continue if (isinstance(rhs.callee, nodes.RefExpr) and rhs.callee.fullname in FIELD_MAKERS): field = self._field_from_field_def(stmt, lhs, rhs) fields.append(field) all_fields = fields.copy() for ancestor_info in cls.info.mro[1:-1]: metadata = ancestor_info.metadata.get(METADATA_KEY) if metadata is None: continue elif not metadata.get('processed'): raise DeferException ancestor_fields = [] ctx.api.add_plugin_dependency( mypy_trigger.make_wildcard_trigger(ancestor_info.fullname())) for name, data in metadata['fields'].items(): if name not in known_fields: field = SchemaField.deserialize(ctx.api, data) known_fields.add(name) ancestor_fields.append(field) all_fields = ancestor_fields + all_fields return all_fields
def collect_attributes(self) -> Optional[List[DataclassAttribute]]: """Collect all attributes declared in the dataclass and its parents. All assignments of the form a: SomeType b: SomeOtherType = ... are collected. """ # First, collect attributes belonging to the current class. ctx = self._ctx cls = self._ctx.cls attrs = [] # type: List[DataclassAttribute] known_attrs = set() # type: Set[str] for stmt in cls.defs.body: # Any assignment that doesn't use the new type declaration # syntax can be ignored out of hand. if not (isinstance(stmt, AssignmentStmt) and stmt.new_syntax): continue # a: int, b: str = 1, 'foo' is not supported syntax so we # don't have to worry about it. lhs = stmt.lvalues[0] if not isinstance(lhs, NameExpr): continue sym = cls.info.names.get(lhs.name) if sym is None: # This name is likely blocked by a star import. We don't need to defer because # defer() is already called by mark_incomplete(). continue node = sym.node if isinstance(node, PlaceholderNode): # This node is not ready yet. return None assert isinstance(node, Var) # x: ClassVar[int] is ignored by dataclasses. if node.is_classvar: continue # x: InitVar[int] is turned into x: int and is removed from the class. is_init_var = False node_type = get_proper_type(node.type) if (isinstance(node_type, Instance) and node_type.type.fullname == 'dataclasses.InitVar'): is_init_var = True node.type = node_type.args[0] has_field_call, field_args = _collect_field_args(stmt.rvalue) is_in_init_param = field_args.get('init') if is_in_init_param is None: is_in_init = True else: is_in_init = bool(ctx.api.parse_bool(is_in_init_param)) has_default = False # Ensure that something like x: int = field() is rejected # after an attribute with a default. if has_field_call: has_default = 'default' in field_args or 'default_factory' in field_args # All other assignments are already type checked. elif not isinstance(stmt.rvalue, TempNode): has_default = True if not has_default: # Make all non-default attributes implicit because they are de-facto set # on self in the generated __init__(), not in the class body. sym.implicit = True known_attrs.add(lhs.name) attrs.append( DataclassAttribute( name=lhs.name, is_in_init=is_in_init, is_init_var=is_init_var, has_default=has_default, line=stmt.line, column=stmt.column, type=sym.type, )) # Next, collect attributes belonging to any class in the MRO # as long as those attributes weren't already collected. This # makes it possible to overwrite attributes in subclasses. # copy() because we potentially modify all_attrs below and if this code requires debugging # we'll have unmodified attrs laying around. all_attrs = attrs.copy() for info in cls.info.mro[1:-1]: if 'dataclass' not in info.metadata: continue super_attrs = [] # Each class depends on the set of attributes in its dataclass ancestors. ctx.api.add_plugin_dependency(make_wildcard_trigger(info.fullname)) for data in info.metadata['dataclass']['attributes']: name = data['name'] # type: str if name not in known_attrs: attr = DataclassAttribute.deserialize(info, data, ctx.api) known_attrs.add(name) super_attrs.append(attr) elif all_attrs: # How early in the attribute list an attribute appears is determined by the # reverse MRO, not simply MRO. # See https://docs.python.org/3/library/dataclasses.html#inheritance for # details. for attr in all_attrs: if attr.name == name: all_attrs.remove(attr) super_attrs.append(attr) break all_attrs = super_attrs + all_attrs # Ensure that arguments without a default don't follow # arguments that have a default. # found_default = False # for attr in all_attrs: # # If we find any attribute that is_in_init but that # # doesn't have a default after one that does have one, # # then that's an error. # if found_default and attr.is_in_init and not attr.has_default: # # If the issue comes from merging different classes, report it # # at the class definition point. # context = (Context(line=attr.line, column=attr.column) if attr in attrs # else ctx.cls) # ctx.api.fail( # 'Attributes without a default cannot follow attributes with one', # context, # ) # # found_default = found_default or (attr.has_default and attr.is_in_init) return all_attrs
def collect_attributes(self) -> List[DataclassAttribute]: """Collect all attributes declared in the dataclass and its parents. All assignments of the form a: SomeType b: SomeOtherType = ... are collected. """ # First, collect attributes belonging to the current class. ctx = self._ctx cls = self._ctx.cls attrs = [] # type: List[DataclassAttribute] known_attrs = set() # type: Set[str] for stmt in cls.defs.body: # Any assignment that doesn't use the new type declaration # syntax can be ignored out of hand. if not (isinstance(stmt, AssignmentStmt) and stmt.new_syntax): continue # a: int, b: str = 1, 'foo' is not supported syntax so we # don't have to worry about it. lhs = stmt.lvalues[0] if not isinstance(lhs, NameExpr): continue node = cls.info.names[lhs.name].node assert isinstance(node, Var) # x: ClassVar[int] is ignored by dataclasses. if node.is_classvar: continue # x: InitVar[int] is turned into x: int and is removed from the class. is_init_var = False if ( isinstance(node.type, Instance) and node.type.type.fullname() == 'dataclasses.InitVar' ): is_init_var = True node.type = node.type.args[0] has_field_call, field_args = _collect_field_args(stmt.rvalue) is_in_init_param = field_args.get('init') if is_in_init_param is None: is_in_init = True else: is_in_init = bool(ctx.api.parse_bool(is_in_init_param)) has_default = False # Ensure that something like x: int = field() is rejected # after an attribute with a default. if has_field_call: has_default = 'default' in field_args or 'default_factory' in field_args # All other assignments are already type checked. elif not isinstance(stmt.rvalue, TempNode): has_default = True known_attrs.add(lhs.name) attrs.append(DataclassAttribute( name=lhs.name, is_in_init=is_in_init, is_init_var=is_init_var, has_default=has_default, line=stmt.line, column=stmt.column, )) # Next, collect attributes belonging to any class in the MRO # as long as those attributes weren't already collected. This # makes it possible to overwrite attributes in subclasses. # copy() because we potentially modify all_attrs below and if this code requires debugging # we'll have unmodified attrs laying around. all_attrs = attrs.copy() init_method = cls.info.get_method('__init__') for info in cls.info.mro[1:-1]: if 'dataclass' not in info.metadata: continue super_attrs = [] # Each class depends on the set of attributes in its dataclass ancestors. ctx.api.add_plugin_dependency(make_wildcard_trigger(info.fullname())) for name, data in info.metadata['dataclass']['attributes'].items(): if name not in known_attrs: attr = DataclassAttribute.deserialize(info, data) if attr.is_init_var and isinstance(init_method, FuncDef): # InitVars are removed from classes so, in order for them to be inherited # properly, we need to re-inject them into subclasses' sym tables here. # To do that, we look 'em up from the parents' __init__. These variables # are subsequently removed from the sym table at the end of # DataclassTransformer.transform. for arg, arg_name in zip(init_method.arguments, init_method.arg_names): if arg_name == attr.name: cls.info.names[attr.name] = SymbolTableNode(MDEF, arg.variable) known_attrs.add(name) super_attrs.append(attr) else: # How early in the attribute list an attribute appears is determined by the # reverse MRO, not simply MRO. # See https://docs.python.org/3/library/dataclasses.html#inheritance for # details. (attr,) = [a for a in all_attrs if a.name == name] all_attrs.remove(attr) super_attrs.append(attr) all_attrs = super_attrs + all_attrs # Ensure that arguments without a default don't follow # arguments that have a default. found_default = False for attr in all_attrs: # If we find any attribute that is_in_init but that # doesn't have a default after one that does have one, # then that's an error. if found_default and attr.is_in_init and not attr.has_default: ctx.api.fail( 'Attributes without a default cannot follow attributes with one', Context(line=attr.line, column=attr.column), ) found_default = found_default or attr.has_default return all_attrs
def _analyze_class(ctx: 'mypy.plugin.ClassDefContext', auto_attribs: Optional[bool], kw_only: bool) -> List[Attribute]: """Analyze the class body of an attr maker, its parents, and return the Attributes found. auto_attribs=True means we'll generate attributes from type annotations also. auto_attribs=None means we'll detect which mode to use. kw_only=True means that all attributes created here will be keyword only args in __init__. """ own_attrs = OrderedDict() # type: OrderedDict[str, Attribute] if auto_attribs is None: auto_attribs = _detect_auto_attribs(ctx) # Walk the body looking for assignments and decorators. for stmt in ctx.cls.defs.body: if isinstance(stmt, AssignmentStmt): for attr in _attributes_from_assignment(ctx, stmt, auto_attribs, kw_only): # When attrs are defined twice in the same body we want to use the 2nd definition # in the 2nd location. So remove it from the OrderedDict. # Unless it's auto_attribs in which case we want the 2nd definition in the # 1st location. if not auto_attribs and attr.name in own_attrs: del own_attrs[attr.name] own_attrs[attr.name] = attr elif isinstance(stmt, Decorator): _cleanup_decorator(stmt, own_attrs) for attribute in own_attrs.values(): # Even though these look like class level assignments we want them to look like # instance level assignments. if attribute.name in ctx.cls.info.names: node = ctx.cls.info.names[attribute.name].node if isinstance(node, PlaceholderNode): # This node is not ready yet. continue assert isinstance(node, Var) node.is_initialized_in_class = False # Traverse the MRO and collect attributes from the parents. taken_attr_names = set(own_attrs) super_attrs = [] for super_info in ctx.cls.info.mro[1:-1]: if 'attrs' in super_info.metadata: # Each class depends on the set of attributes in its attrs ancestors. ctx.api.add_plugin_dependency(make_wildcard_trigger(super_info.fullname)) for data in super_info.metadata['attrs']['attributes']: # Only add an attribute if it hasn't been defined before. This # allows for overwriting attribute definitions by subclassing. if data['name'] not in taken_attr_names: a = Attribute.deserialize(super_info, data, ctx.api) a.expand_typevar_from_subtype(ctx.cls.info) super_attrs.append(a) taken_attr_names.add(a.name) attributes = super_attrs + list(own_attrs.values()) # Check the init args for correct default-ness. Note: This has to be done after all the # attributes for all classes have been read, because subclasses can override parents. last_default = False for i, attribute in enumerate(attributes): if not attribute.init: continue if attribute.kw_only: # Keyword-only attributes don't care whether they are default or not. continue # If the issue comes from merging different classes, report it # at the class definition point. context = attribute.context if i >= len(super_attrs) else ctx.cls if not attribute.has_default and last_default: ctx.api.fail( "Non-default attributes not allowed after default attributes.", context) last_default |= attribute.has_default return attributes
def process_type_info(self, info: TypeInfo) -> None: target = self.scope.current_full_target() for base in info.bases: self.add_type_dependencies(base, target=target) if info.tuple_type: self.add_type_dependencies(info.tuple_type, target=make_trigger(target)) if info.typeddict_type: self.add_type_dependencies(info.typeddict_type, target=make_trigger(target)) if info.declared_metaclass: self.add_type_dependencies(info.declared_metaclass, target=make_trigger(target)) if info.is_protocol: for base_info in info.mro[:-1]: # We add dependencies from whole MRO to cover explicit subprotocols. # For example: # # class Super(Protocol): # x: int # class Sub(Super, Protocol): # y: int # # In this example we add <Super[wildcard]> -> <Sub>, to invalidate Sub if # a new member is added to Super. self.add_dependency(make_wildcard_trigger( base_info.fullname()), target=make_trigger(target)) # More protocol dependencies are collected in TypeState._snapshot_protocol_deps # after a full run or update is finished. self.add_type_alias_deps(self.scope.current_target()) for name, node in info.names.items(): if isinstance(node.node, Var): # Recheck Liskov if needed, self definitions are checked in the defining method if node.node.is_initialized_in_class and has_user_bases(info): self.add_dependency( make_trigger(info.fullname() + '.' + name)) for base_info in non_trivial_bases(info): # If the type of an attribute changes in a base class, we make references # to the attribute in the subclass stale. self.add_dependency( make_trigger(base_info.fullname() + '.' + name), target=make_trigger(info.fullname() + '.' + name)) for base_info in non_trivial_bases(info): for name, node in base_info.names.items(): if self.options and self.options.logical_deps: # Skip logical dependency if an attribute is not overridden. For example, # in case of: # class Base: # x = 1 # y = 2 # class Sub(Base): # x = 3 # we skip <Base.y> -> <Child.y>, because even if `y` is unannotated it # doesn't affect precision of Liskov checking. if name not in info.names: continue self.add_dependency( make_trigger(base_info.fullname() + '.' + name), target=make_trigger(info.fullname() + '.' + name)) self.add_dependency( make_trigger(base_info.fullname() + '.__init__'), target=make_trigger(info.fullname() + '.__init__')) self.add_dependency( make_trigger(base_info.fullname() + '.__new__'), target=make_trigger(info.fullname() + '.__new__')) # If the set of abstract attributes change, this may invalidate class # instantiation, or change the generated error message, since Python checks # class abstract status when creating an instance. # # TODO: We should probably add this dependency only from the __init__ of the # current class, and independent of bases (to trigger changes in message # wording, as errors may enumerate all abstract attributes). self.add_dependency( make_trigger(base_info.fullname() + '.(abstract)'), target=make_trigger(info.fullname() + '.__init__')) # If the base class abstract attributes change, subclass abstract # attributes need to be recalculated. self.add_dependency( make_trigger(base_info.fullname() + '.(abstract)'))
def collect_attributes(self) -> List[DataclassAttribute]: """Collect all attributes declared in the dataclass and its parents. All assignments of the form a: SomeType b: SomeOtherType = ... are collected. """ # First, collect attributes belonging to the current class. ctx = self._ctx cls = self._ctx.cls attrs = [] # type: List[DataclassAttribute] known_attrs = set() # type: Set[str] for stmt in cls.defs.body: # Any assignment that doesn't use the new type declaration # syntax can be ignored out of hand. if not (isinstance(stmt, AssignmentStmt) and stmt.new_syntax): continue # a: int, b: str = 1, 'foo' is not supported syntax so we # don't have to worry about it. lhs = stmt.lvalues[0] if not isinstance(lhs, NameExpr): continue node = cls.info.names[lhs.name].node assert isinstance(node, Var) # x: ClassVar[int] is ignored by dataclasses. if node.is_classvar: continue # x: InitVar[int] is turned into x: int and is removed from the class. is_init_var = False if ( isinstance(node.type, Instance) and node.type.type.fullname() == 'dataclasses.InitVar' ): is_init_var = True node.type = node.type.args[0] has_field_call, field_args = _collect_field_args(stmt.rvalue) is_in_init_param = field_args.get('init') if is_in_init_param is None: is_in_init = True else: is_in_init = bool(ctx.api.parse_bool(is_in_init_param)) has_default = False # Ensure that something like x: int = field() is rejected # after an attribute with a default. if has_field_call: has_default = 'default' in field_args or 'default_factory' in field_args # All other assignments are already type checked. elif not isinstance(stmt.rvalue, TempNode): has_default = True known_attrs.add(lhs.name) attrs.append(DataclassAttribute( name=lhs.name, is_in_init=is_in_init, is_init_var=is_init_var, has_default=has_default, line=stmt.line, column=stmt.column, )) # Next, collect attributes belonging to any class in the MRO # as long as those attributes weren't already collected. This # makes it possible to overwrite attributes in subclasses. # copy() because we potentially modify all_attrs below and if this code requires debugging # we'll have unmodified attrs laying around. all_attrs = attrs.copy() init_method = cls.info.get_method('__init__') for info in cls.info.mro[1:-1]: if 'dataclass' not in info.metadata: continue super_attrs = [] # Each class depends on the set of attributes in its dataclass ancestors. ctx.api.add_plugin_dependency(make_wildcard_trigger(info.fullname())) for name, data in info.metadata['dataclass']['attributes'].items(): if name not in known_attrs: attr = DataclassAttribute.deserialize(info, data) if attr.is_init_var and isinstance(init_method, FuncDef): # InitVars are removed from classes so, in order for them to be inherited # properly, we need to re-inject them into subclasses' sym tables here. # To do that, we look 'em up from the parents' __init__. These variables # are subsequently removed from the sym table at the end of # DataclassTransformer.transform. for arg, arg_name in zip(init_method.arguments, init_method.arg_names): if arg_name == attr.name: cls.info.names[attr.name] = SymbolTableNode(MDEF, arg.variable) known_attrs.add(name) super_attrs.append(attr) else: # How early in the attribute list an attribute appears is determined by the # reverse MRO, not simply MRO. # See https://docs.python.org/3/library/dataclasses.html#inheritance for # details. (attr,) = [a for a in all_attrs if a.name == name] all_attrs.remove(attr) super_attrs.append(attr) all_attrs = super_attrs + all_attrs # Ensure that arguments without a default don't follow # arguments that have a default. found_default = False for attr in all_attrs: # If we find any attribute that is_in_init but that # doesn't have a default after one that does have one, # then that's an error. if found_default and attr.is_in_init and not attr.has_default: ctx.api.fail( 'Attributes without a default cannot follow attributes with one', Context(line=attr.line, column=attr.column), ) found_default = found_default or (attr.has_default and attr.is_in_init) return all_attrs
def collect_fields( self, model_config: "ModelConfigData") -> List["PydanticModelField"]: """ Collects the fields for the model, accounting for parent classes """ # First, collect fields belonging to the current class. ctx = self._ctx cls = self._ctx.cls fields = [] # type: List[PydanticModelField] known_fields = set() # type: Set[str] for stmt in cls.defs.body: if not isinstance(stmt, AssignmentStmt ): # `and stmt.new_syntax` to require annotation continue lhs = stmt.lvalues[0] if not isinstance(lhs, NameExpr): continue if not stmt.new_syntax and self.plugin_config.warn_untyped_fields: error_untyped_fields(ctx.api, stmt) # if lhs.name == '__config__': # BaseConfig not well handled; I'm not sure why yet # continue sym = cls.info.names.get(lhs.name) if sym is None: # pragma: no cover # This is likely due to a star import (see the dataclasses plugin for a more detailed explanation) # This is the same logic used in the dataclasses plugin continue node = sym.node if isinstance(node, PlaceholderNode): # pragma: no cover # See the PlaceholderNode docstring for more detail about how this can occur # Basically, it is an edge case when dealing with complex import logic # This is the same logic used in the dataclasses plugin continue assert isinstance(node, Var) # x: ClassVar[int] is ignored by dataclasses. if node.is_classvar: continue is_required = self.get_is_required(cls, stmt, lhs) alias, has_dynamic_alias = self.get_alias_info(stmt) if (has_dynamic_alias and not model_config.allow_population_by_field_name and self.plugin_config.warn_required_dynamic_aliases): error_required_dynamic_aliases(ctx.api, stmt) fields.append( PydanticModelField( name=lhs.name, is_required=is_required, alias=alias, has_dynamic_alias=has_dynamic_alias, line=stmt.line, column=stmt.column, )) known_fields.add(lhs.name) all_fields = fields.copy() for info in cls.info.mro[ 1:]: # 0 is the current class, -2 is BaseModel, -1 is object if METADATA_KEY not in info.metadata: continue superclass_fields = [] # Each class depends on the set of fields in its ancestors ctx.api.add_plugin_dependency( make_wildcard_trigger(get_fullname(info))) for name, data in info.metadata[METADATA_KEY]["fields"].items(): if name not in known_fields: field = PydanticModelField.deserialize(info, data) known_fields.add(name) superclass_fields.append(field) else: (field, ) = [a for a in all_fields if a.name == name] all_fields.remove(field) superclass_fields.append(field) all_fields = superclass_fields + all_fields return all_fields
def process_type_info(self, info: TypeInfo) -> None: target = self.scope.current_full_target() for base in info.bases: self.add_type_dependencies(base, target=target) if info.tuple_type: self.add_type_dependencies(info.tuple_type, target=make_trigger(target)) if info.typeddict_type: self.add_type_dependencies(info.typeddict_type, target=make_trigger(target)) if info.declared_metaclass: self.add_type_dependencies(info.declared_metaclass, target=make_trigger(target)) if info.is_protocol: for base_info in info.mro[:-1]: # We add dependencies from whole MRO to cover explicit subprotocols. # For example: # # class Super(Protocol): # x: int # class Sub(Super, Protocol): # y: int # # In this example we add <Super[wildcard]> -> <Sub>, to invalidate Sub if # a new member is added to Super. self.add_dependency(make_wildcard_trigger(base_info.fullname()), target=make_trigger(target)) # More protocol dependencies are collected in TypeState._snapshot_protocol_deps # after a full run or update is finished. self.add_type_alias_deps(self.scope.current_target()) for name, node in info.names.items(): if isinstance(node.node, Var): # Recheck Liskov if needed, self definitions are checked in the defining method if node.node.is_initialized_in_class and has_user_bases(info): self.add_dependency(make_trigger(info.fullname() + '.' + name)) for base_info in non_trivial_bases(info): # If the type of an attribute changes in a base class, we make references # to the attribute in the subclass stale. self.add_dependency(make_trigger(base_info.fullname() + '.' + name), target=make_trigger(info.fullname() + '.' + name)) for base_info in non_trivial_bases(info): for name, node in base_info.names.items(): if self.use_logical_deps(): # Skip logical dependency if an attribute is not overridden. For example, # in case of: # class Base: # x = 1 # y = 2 # class Sub(Base): # x = 3 # we skip <Base.y> -> <Child.y>, because even if `y` is unannotated it # doesn't affect precision of Liskov checking. if name not in info.names: continue # __init__ and __new__ can be overridden with different signatures, so no # logical depedency. if name in ('__init__', '__new__'): continue self.add_dependency(make_trigger(base_info.fullname() + '.' + name), target=make_trigger(info.fullname() + '.' + name)) if not self.use_logical_deps(): # These dependencies are only useful for propagating changes -- # they aren't logical dependencies since __init__ and __new__ can be # overridden with a different signature. self.add_dependency(make_trigger(base_info.fullname() + '.__init__'), target=make_trigger(info.fullname() + '.__init__')) self.add_dependency(make_trigger(base_info.fullname() + '.__new__'), target=make_trigger(info.fullname() + '.__new__')) # If the set of abstract attributes change, this may invalidate class # instantiation, or change the generated error message, since Python checks # class abstract status when creating an instance. # # TODO: We should probably add this dependency only from the __init__ of the # current class, and independent of bases (to trigger changes in message # wording, as errors may enumerate all abstract attributes). self.add_dependency(make_trigger(base_info.fullname() + '.(abstract)'), target=make_trigger(info.fullname() + '.__init__')) # If the base class abstract attributes change, subclass abstract # attributes need to be recalculated. self.add_dependency(make_trigger(base_info.fullname() + '.(abstract)'))
def collect_attributes(self) -> Optional[List[DataclassAttribute]]: """Collect all attributes declared in the dataclass and its parents. All assignments of the form a: SomeType b: SomeOtherType = ... are collected. Return None if some dataclass base class hasn't been processed yet and thus we'll need to ask for another pass. """ # First, collect attributes belonging to the current class. ctx = self._ctx cls = self._ctx.cls attrs: List[DataclassAttribute] = [] known_attrs: Set[str] = set() kw_only = _get_decorator_bool_argument(ctx, 'kw_only', False) for stmt in cls.defs.body: # Any assignment that doesn't use the new type declaration # syntax can be ignored out of hand. if not (isinstance(stmt, AssignmentStmt) and stmt.new_syntax): continue # a: int, b: str = 1, 'foo' is not supported syntax so we # don't have to worry about it. lhs = stmt.lvalues[0] if not isinstance(lhs, NameExpr): continue sym = cls.info.names.get(lhs.name) if sym is None: # There was probably a semantic analysis error. continue node = sym.node assert not isinstance(node, PlaceholderNode) if isinstance(node, TypeAlias): ctx.api.fail( ( 'Type aliases inside dataclass definitions ' 'are not supported at runtime' ), node ) # Skip processing this node. This doesn't match the runtime behaviour, # but the only alternative would be to modify the SymbolTable, # and it's a little hairy to do that in a plugin. continue assert isinstance(node, Var) # x: ClassVar[int] is ignored by dataclasses. if node.is_classvar: continue # x: InitVar[int] is turned into x: int and is removed from the class. is_init_var = False node_type = get_proper_type(node.type) if (isinstance(node_type, Instance) and node_type.type.fullname == 'dataclasses.InitVar'): is_init_var = True node.type = node_type.args[0] if self._is_kw_only_type(node_type): kw_only = True has_field_call, field_args = _collect_field_args(stmt.rvalue, ctx) is_in_init_param = field_args.get('init') if is_in_init_param is None: is_in_init = True else: is_in_init = bool(ctx.api.parse_bool(is_in_init_param)) has_default = False # Ensure that something like x: int = field() is rejected # after an attribute with a default. if has_field_call: has_default = 'default' in field_args or 'default_factory' in field_args # All other assignments are already type checked. elif not isinstance(stmt.rvalue, TempNode): has_default = True if not has_default: # Make all non-default attributes implicit because they are de-facto set # on self in the generated __init__(), not in the class body. sym.implicit = True is_kw_only = kw_only # Use the kw_only field arg if it is provided. Otherwise use the # kw_only value from the decorator parameter. field_kw_only_param = field_args.get('kw_only') if field_kw_only_param is not None: is_kw_only = bool(ctx.api.parse_bool(field_kw_only_param)) known_attrs.add(lhs.name) attrs.append(DataclassAttribute( name=lhs.name, is_in_init=is_in_init, is_init_var=is_init_var, has_default=has_default, line=stmt.line, column=stmt.column, type=sym.type, info=cls.info, kw_only=is_kw_only, )) # Next, collect attributes belonging to any class in the MRO # as long as those attributes weren't already collected. This # makes it possible to overwrite attributes in subclasses. # copy() because we potentially modify all_attrs below and if this code requires debugging # we'll have unmodified attrs laying around. all_attrs = attrs.copy() for info in cls.info.mro[1:-1]: if 'dataclass_tag' in info.metadata and 'dataclass' not in info.metadata: # We haven't processed the base class yet. Need another pass. return None if 'dataclass' not in info.metadata: continue super_attrs = [] # Each class depends on the set of attributes in its dataclass ancestors. ctx.api.add_plugin_dependency(make_wildcard_trigger(info.fullname)) for data in info.metadata["dataclass"]["attributes"]: name: str = data["name"] if name not in known_attrs: attr = DataclassAttribute.deserialize(info, data, ctx.api) # TODO: We shouldn't be performing type operations during the main # semantic analysis pass, since some TypeInfo attributes might # still be in flux. This should be performed in a later phase. with state.strict_optional_set(ctx.api.options.strict_optional): attr.expand_typevar_from_subtype(ctx.cls.info) known_attrs.add(name) super_attrs.append(attr) elif all_attrs: # How early in the attribute list an attribute appears is determined by the # reverse MRO, not simply MRO. # See https://docs.python.org/3/library/dataclasses.html#inheritance for # details. for attr in all_attrs: if attr.name == name: all_attrs.remove(attr) super_attrs.append(attr) break all_attrs = super_attrs + all_attrs all_attrs.sort(key=lambda a: a.kw_only) # Ensure that arguments without a default don't follow # arguments that have a default. found_default = False # Ensure that the KW_ONLY sentinel is only provided once found_kw_sentinel = False for attr in all_attrs: # If we find any attribute that is_in_init, not kw_only, and that # doesn't have a default after one that does have one, # then that's an error. if found_default and attr.is_in_init and not attr.has_default and not attr.kw_only: # If the issue comes from merging different classes, report it # at the class definition point. context = (Context(line=attr.line, column=attr.column) if attr in attrs else ctx.cls) ctx.api.fail( 'Attributes without a default cannot follow attributes with one', context, ) found_default = found_default or (attr.has_default and attr.is_in_init) if found_kw_sentinel and self._is_kw_only_type(attr.type): context = (Context(line=attr.line, column=attr.column) if attr in attrs else ctx.cls) ctx.api.fail( 'There may not be more than one field with the KW_ONLY type', context, ) found_kw_sentinel = found_kw_sentinel or self._is_kw_only_type(attr.type) return all_attrs
def _analyze_class(ctx: 'mypy.plugin.ClassDefContext', auto_attribs: bool, kw_only: bool) -> List[Attribute]: """Analyze the class body of an attr maker, its parents, and return the Attributes found. auto_attribs=True means we'll generate attributes from type annotations also. kw_only=True means that all attributes created here will be keyword only args in __init__. """ own_attrs = OrderedDict() # type: OrderedDict[str, Attribute] # Walk the body looking for assignments and decorators. for stmt in ctx.cls.defs.body: if isinstance(stmt, AssignmentStmt): for attr in _attributes_from_assignment(ctx, stmt, auto_attribs, kw_only): # When attrs are defined twice in the same body we want to use the 2nd definition # in the 2nd location. So remove it from the OrderedDict. # Unless it's auto_attribs in which case we want the 2nd definition in the # 1st location. if not auto_attribs and attr.name in own_attrs: del own_attrs[attr.name] own_attrs[attr.name] = attr elif isinstance(stmt, Decorator): _cleanup_decorator(stmt, own_attrs) for attribute in own_attrs.values(): # Even though these look like class level assignments we want them to look like # instance level assignments. if attribute.name in ctx.cls.info.names: node = ctx.cls.info.names[attribute.name].node assert isinstance(node, Var) node.is_initialized_in_class = False # Traverse the MRO and collect attributes from the parents. taken_attr_names = set(own_attrs) super_attrs = [] for super_info in ctx.cls.info.mro[1:-1]: if 'attrs' in super_info.metadata: # Each class depends on the set of attributes in its attrs ancestors. ctx.api.add_plugin_dependency(make_wildcard_trigger(super_info.fullname())) for data in super_info.metadata['attrs']['attributes']: # Only add an attribute if it hasn't been defined before. This # allows for overwriting attribute definitions by subclassing. if data['name'] not in taken_attr_names: a = Attribute.deserialize(super_info, data) super_attrs.append(a) taken_attr_names.add(a.name) attributes = super_attrs + list(own_attrs.values()) # Check the init args for correct default-ness. Note: This has to be done after all the # attributes for all classes have been read, because subclasses can override parents. last_default = False last_kw_only = False for attribute in attributes: if not attribute.init: continue if attribute.kw_only: # Keyword-only attributes don't care whether they are default or not. last_kw_only = True continue if not attribute.has_default and last_default: ctx.api.fail( "Non-default attributes not allowed after default attributes.", attribute.context) if last_kw_only: ctx.api.fail( "Non keyword-only attributes are not allowed after a keyword-only attribute.", attribute.context ) last_default |= attribute.has_default return attributes
def _collect_fields(self) -> List[Field]: """Collect all fields declared in a schema class and its ancestors.""" ctx = self._ctx cls = self._ctx.cls fields: List[Field] = [] known_fields: Set[str] = set() for stmt in cls.defs.body: if not isinstance(stmt, nodes.AssignmentStmt): continue lhs = stmt.lvalues[0] rhs = stmt.rvalue if not isinstance(rhs, nodes.CallExpr): continue fdef = rhs.callee if (isinstance(fdef, nodes.IndexExpr) and isinstance(fdef.analyzed, nodes.TypeApplication)): # Explicitly typed Field declaration ctor = fdef.analyzed.expr if len(fdef.analyzed.types) > 1: ctx.api.fail('too many type arguments to Field') ftype = fdef.analyzed.types[0] else: ctor = fdef ftype = None if (isinstance(ctor, nodes.RefExpr) and ctor.fullname in self._field_makers): field = self._field_from_field_def(stmt, lhs, rhs, ftype=ftype) fields.append(field) all_fields = fields.copy() for ancestor_info in cls.info.mro[1:-1]: metadata = ancestor_info.metadata.get(self._get_metadata_key()) if metadata is None: continue elif not metadata.get('processed'): raise DeferException ancestor_fields = [] ctx.api.add_plugin_dependency( mypy_trigger.make_wildcard_trigger(ancestor_info.fullname)) for name, data in metadata['fields'].items(): if name not in known_fields: if self._has_explicit_field_accessor(name): data = dict(data) data['has_explicit_accessor'] = True field = Field.deserialize(ctx.api, data) known_fields.add(name) ancestor_fields.append(field) all_fields = ancestor_fields + all_fields return all_fields