def visit_typeddict_type(self, t: TypedDictType) -> ProperType: if isinstance(self.s, TypedDictType): items = OrderedDict([ (item_name, s_item_type) for (item_name, s_item_type, t_item_type) in self.s.zip(t) if (is_equivalent(s_item_type, t_item_type) and (item_name in t.required_keys) == (item_name in self.s.required_keys)) ]) mapping_value_type = join_type_list(list(items.values())) fallback = self.s.create_anonymous_fallback(value_type=mapping_value_type) # We need to filter by items.keys() since some required keys present in both t and # self.s might be missing from the join if the types are incompatible. required_keys = set(items.keys()) & t.required_keys & self.s.required_keys return TypedDictType(items, required_keys, fallback) elif isinstance(self.s, Instance): return join_types(self.s, t.fallback) else: return self.default(self.s)
def visit_typeddict_type(self, t: TypedDictType) -> ProperType: if isinstance(self.s, TypedDictType): for (name, l, r) in self.s.zip(t): if (not is_equivalent(l, r) or (name in t.required_keys) != (name in self.s.required_keys)): return self.default(self.s) item_list = [] # type: List[Tuple[str, Type]] for (item_name, s_item_type, t_item_type) in self.s.zipall(t): if s_item_type is not None: item_list.append((item_name, s_item_type)) else: # at least one of s_item_type and t_item_type is not None assert t_item_type is not None item_list.append((item_name, t_item_type)) items = OrderedDict(item_list) mapping_value_type = join_type_list(list(items.values())) fallback = self.s.create_anonymous_fallback( value_type=mapping_value_type) required_keys = t.required_keys | self.s.required_keys return TypedDictType(items, required_keys, fallback) elif isinstance(self.s, Instance) and is_subtype(t, self.s): return t else: return self.default(self.s)
def _analyze_class(ctx: 'mypy.plugin.ClassDefContext', auto_attribs: Optional[bool], kw_only: bool) -> List[Attribute]: """Analyze the class body of an attr maker, its parents, and return the Attributes found. auto_attribs=True means we'll generate attributes from type annotations also. auto_attribs=None means we'll detect which mode to use. kw_only=True means that all attributes created here will be keyword only args in __init__. """ own_attrs = OrderedDict() # type: OrderedDict[str, Attribute] if auto_attribs is None: auto_attribs = _detect_auto_attribs(ctx) # Walk the body looking for assignments and decorators. for stmt in ctx.cls.defs.body: if isinstance(stmt, AssignmentStmt): for attr in _attributes_from_assignment(ctx, stmt, auto_attribs, kw_only): # When attrs are defined twice in the same body we want to use the 2nd definition # in the 2nd location. So remove it from the OrderedDict. # Unless it's auto_attribs in which case we want the 2nd definition in the # 1st location. if not auto_attribs and attr.name in own_attrs: del own_attrs[attr.name] own_attrs[attr.name] = attr elif isinstance(stmt, Decorator): _cleanup_decorator(stmt, own_attrs) for attribute in own_attrs.values(): # Even though these look like class level assignments we want them to look like # instance level assignments. if attribute.name in ctx.cls.info.names: node = ctx.cls.info.names[attribute.name].node if isinstance(node, PlaceholderNode): # This node is not ready yet. continue assert isinstance(node, Var) node.is_initialized_in_class = False # Traverse the MRO and collect attributes from the parents. taken_attr_names = set(own_attrs) super_attrs = [] for super_info in ctx.cls.info.mro[1:-1]: if 'attrs' in super_info.metadata: # Each class depends on the set of attributes in its attrs ancestors. ctx.api.add_plugin_dependency(make_wildcard_trigger(super_info.fullname)) for data in super_info.metadata['attrs']['attributes']: # Only add an attribute if it hasn't been defined before. This # allows for overwriting attribute definitions by subclassing. if data['name'] not in taken_attr_names: a = Attribute.deserialize(super_info, data, ctx.api) a.expand_typevar_from_subtype(ctx.cls.info) super_attrs.append(a) taken_attr_names.add(a.name) attributes = super_attrs + list(own_attrs.values()) # Check the init args for correct default-ness. Note: This has to be done after all the # attributes for all classes have been read, because subclasses can override parents. last_default = False for i, attribute in enumerate(attributes): if not attribute.init: continue if attribute.kw_only: # Keyword-only attributes don't care whether they are default or not. continue # If the issue comes from merging different classes, report it # at the class definition point. context = attribute.context if i >= len(super_attrs) else ctx.cls if not attribute.has_default and last_default: ctx.api.fail( "Non-default attributes not allowed after default attributes.", context) last_default |= attribute.has_default return attributes
class Errors: """Container for compile errors. This class generates and keeps tracks of compile errors and the current error context (nested imports). """ # Map from files to generated error messages. Is an OrderedDict so # that it can be used to order messages based on the order the # files were processed. error_info_map = None # type: Dict[str, List[ErrorInfo]] # Files that we have reported the errors for flushed_files = None # type: Set[str] # Current error context: nested import context/stack, as a list of (path, line) pairs. import_ctx = None # type: List[Tuple[str, int]] # Path name prefix that is removed from all paths, if set. ignore_prefix = None # type: Optional[str] # Path to current file. file = '' # type: str # Ignore some errors on these lines of each file # (path -> line -> error-codes) ignored_lines = None # type: Dict[str, Dict[int, List[str]]] # Lines on which an error was actually ignored. used_ignored_lines = None # type: Dict[str, Set[int]] # Files where all errors should be ignored. ignored_files = None # type: Set[str] # Collection of reported only_once messages. only_once_messages = None # type: Set[str] # Set to True to show "In function "foo":" messages. show_error_context = False # type: bool # Set to True to show column numbers in error messages. show_column_numbers = False # type: bool # Set to True to show absolute file paths in error messages. show_absolute_path = False # type: bool # State for keeping track of the current fine-grained incremental mode target. # (See mypy.server.update for more about targets.) # Current module id. target_module = None # type: Optional[str] scope = None # type: Optional[Scope] def __init__( self, show_error_context: bool = False, show_column_numbers: bool = False, show_error_codes: bool = False, pretty: bool = False, read_source: Optional[Callable[[str], Optional[List[str]]]] = None, show_absolute_path: bool = False, enabled_error_codes: Optional[Set[ErrorCode]] = None, disabled_error_codes: Optional[Set[ErrorCode]] = None) -> None: self.show_error_context = show_error_context self.show_column_numbers = show_column_numbers self.show_error_codes = show_error_codes self.show_absolute_path = show_absolute_path self.pretty = pretty # We use fscache to read source code when showing snippets. self.read_source = read_source self.enabled_error_codes = enabled_error_codes or set() self.disabled_error_codes = disabled_error_codes or set() self.initialize() def initialize(self) -> None: self.error_info_map = OrderedDict() self.flushed_files = set() self.import_ctx = [] self.function_or_member = [None] self.ignored_lines = OrderedDict() self.used_ignored_lines = defaultdict(set) self.ignored_files = set() self.only_once_messages = set() self.scope = None self.target_module = None def reset(self) -> None: self.initialize() def copy(self) -> 'Errors': new = Errors(self.show_error_context, self.show_column_numbers, self.show_error_codes, self.pretty, self.read_source, self.show_absolute_path, self.enabled_error_codes, self.disabled_error_codes) new.file = self.file new.import_ctx = self.import_ctx[:] new.function_or_member = self.function_or_member[:] new.target_module = self.target_module new.scope = self.scope return new def total_errors(self) -> int: return sum(len(errs) for errs in self.error_info_map.values()) def set_ignore_prefix(self, prefix: str) -> None: """Set path prefix that will be removed from all paths.""" prefix = os.path.normpath(prefix) # Add separator to the end, if not given. if os.path.basename(prefix) != '': prefix += os.sep self.ignore_prefix = prefix def simplify_path(self, file: str) -> str: if self.show_absolute_path: return os.path.abspath(file) else: file = os.path.normpath(file) return remove_path_prefix(file, self.ignore_prefix) def set_file(self, file: str, module: Optional[str], scope: Optional[Scope] = None) -> None: """Set the path and module id of the current file.""" # The path will be simplified later, in render_messages. That way # * 'file' is always a key that uniquely identifies a source file # that mypy read (simplified paths might not be unique); and # * we only have to simplify in one place, while still supporting # reporting errors for files other than the one currently being # processed. self.file = file self.target_module = module self.scope = scope def set_file_ignored_lines(self, file: str, ignored_lines: Dict[int, List[str]], ignore_all: bool = False) -> None: self.ignored_lines[file] = ignored_lines if ignore_all: self.ignored_files.add(file) def current_target(self) -> Optional[str]: """Retrieves the current target from the associated scope. If there is no associated scope, use the target module.""" if self.scope is not None: return self.scope.current_target() return self.target_module def current_module(self) -> Optional[str]: return self.target_module def import_context(self) -> List[Tuple[str, int]]: """Return a copy of the import context.""" return self.import_ctx[:] def set_import_context(self, ctx: List[Tuple[str, int]]) -> None: """Replace the entire import context with a new value.""" self.import_ctx = ctx[:] def report(self, line: int, column: Optional[int], message: str, code: Optional[ErrorCode] = None, *, blocker: bool = False, severity: str = 'error', file: Optional[str] = None, only_once: bool = False, origin_line: Optional[int] = None, offset: int = 0, end_line: Optional[int] = None) -> None: """Report message at the given line using the current error context. Args: line: line number of error column: column number of error message: message to report code: error code (defaults to 'misc'; not shown for notes) blocker: if True, don't continue analysis after this error severity: 'error' or 'note' file: if non-None, override current file as context only_once: if True, only report this exact message once per build origin_line: if non-None, override current context as origin end_line: if non-None, override current context as end """ if self.scope: type = self.scope.current_type_name() if self.scope.ignored > 0: type = None # Omit type context if nested function function = self.scope.current_function_name() else: type = None function = None if column is None: column = -1 if file is None: file = self.file if offset: message = " " * offset + message if origin_line is None: origin_line = line if end_line is None: end_line = origin_line code = code or (codes.MISC if not blocker else None) info = ErrorInfo(self.import_context(), file, self.current_module(), type, function, line, column, severity, message, code, blocker, only_once, origin=(self.file, origin_line, end_line), target=self.current_target()) self.add_error_info(info) def _add_error_info(self, file: str, info: ErrorInfo) -> None: assert file not in self.flushed_files if file not in self.error_info_map: self.error_info_map[file] = [] self.error_info_map[file].append(info) def add_error_info(self, info: ErrorInfo) -> None: file, line, end_line = info.origin if not info.blocker: # Blockers cannot be ignored if file in self.ignored_lines: # It's okay if end_line is *before* line. # Function definitions do this, for example, because the correct # error reporting line is at the *end* of the ignorable range # (for compatibility reasons). If so, just flip 'em! if end_line < line: line, end_line = end_line, line # Check each line in this context for "type: ignore" comments. # line == end_line for most nodes, so we only loop once. for scope_line in range(line, end_line + 1): if self.is_ignored_error(scope_line, info, self.ignored_lines[file]): # Annotation requests us to ignore all errors on this line. self.used_ignored_lines[file].add(scope_line) return if file in self.ignored_files: return if info.only_once: if info.message in self.only_once_messages: return self.only_once_messages.add(info.message) self._add_error_info(file, info) def is_ignored_error(self, line: int, info: ErrorInfo, ignores: Dict[int, List[str]]) -> bool: if info.blocker: # Blocking errors can never be ignored return False if info.code and self.is_error_code_enabled(info.code) is False: return True if line not in ignores: return False if not ignores[line]: # Empty list means that we ignore all errors return True if info.code and self.is_error_code_enabled(info.code) is True: return info.code.code in ignores[line] return False def is_error_code_enabled(self, error_code: ErrorCode) -> bool: if error_code in self.disabled_error_codes: return False elif error_code in self.enabled_error_codes: return True else: return error_code.default_enabled def clear_errors_in_targets(self, path: str, targets: Set[str]) -> None: """Remove errors in specific fine-grained targets within a file.""" if path in self.error_info_map: new_errors = [] for info in self.error_info_map[path]: if info.target not in targets: new_errors.append(info) elif info.only_once: self.only_once_messages.remove(info.message) self.error_info_map[path] = new_errors def generate_unused_ignore_errors(self, file: str) -> None: ignored_lines = self.ignored_lines[file] if not is_typeshed_file(file) and file not in self.ignored_files: for line in set(ignored_lines) - self.used_ignored_lines[file]: # Don't use report since add_error_info will ignore the error! info = ErrorInfo(self.import_context(), file, self.current_module(), None, None, line, -1, 'error', "unused 'type: ignore' comment", None, False, False) self._add_error_info(file, info) def num_messages(self) -> int: """Return the number of generated messages.""" return sum(len(x) for x in self.error_info_map.values()) def is_errors(self) -> bool: """Are there any generated errors?""" return bool(self.error_info_map) def is_blockers(self) -> bool: """Are the any errors that are blockers?""" return any(err for errs in self.error_info_map.values() for err in errs if err.blocker) def blocker_module(self) -> Optional[str]: """Return the module with a blocking error, or None if not possible.""" for errs in self.error_info_map.values(): for err in errs: if err.blocker: return err.module return None def is_errors_for_file(self, file: str) -> bool: """Are there any errors for the given file?""" return file in self.error_info_map def most_recent_error_location(self) -> Tuple[int, int]: info = self.error_info_map[self.file][-1] return info.line, info.column def raise_error(self, use_stdout: bool = True) -> None: """Raise a CompileError with the generated messages. Render the messages suitable for displaying. """ # self.new_messages() will format all messages that haven't already # been returned from a file_messages() call. raise CompileError(self.new_messages(), use_stdout=use_stdout, module_with_blocker=self.blocker_module()) def format_messages(self, error_info: List[ErrorInfo], source_lines: Optional[List[str]]) -> List[str]: """Return a string list that represents the error messages. Use a form suitable for displaying to the user. If self.pretty is True also append a relevant trimmed source code line (only for severity 'error'). """ a = [] # type: List[str] errors = self.render_messages(self.sort_messages(error_info)) errors = self.remove_duplicates(errors) for file, line, column, severity, message, code in errors: s = '' if file is not None: if self.show_column_numbers and line >= 0 and column >= 0: srcloc = '{}:{}:{}'.format(file, line, 1 + column) elif line >= 0: srcloc = '{}:{}'.format(file, line) else: srcloc = file s = '{}: {}: {}'.format(srcloc, severity, message) else: s = message if self.show_error_codes and code and severity != 'note': # If note has an error code, it is related to a previous error. Avoid # displaying duplicate error codes. s = '{} [{}]'.format(s, code.code) a.append(s) if self.pretty: # Add source code fragment and a location marker. if severity == 'error' and source_lines and line > 0: source_line = source_lines[line - 1] source_line_expanded = source_line.expandtabs() if column < 0: # Something went wrong, take first non-empty column. column = len(source_line) - len(source_line.lstrip()) # Shifts column after tab expansion column = len(source_line[:column].expandtabs()) # Note, currently coloring uses the offset to detect source snippets, # so these offsets should not be arbitrary. a.append(' ' * DEFAULT_SOURCE_OFFSET + source_line_expanded) a.append(' ' * (DEFAULT_SOURCE_OFFSET + column) + '^') return a def file_messages(self, path: str) -> List[str]: """Return a string list of new error messages from a given file. Use a form suitable for displaying to the user. """ if path not in self.error_info_map: return [] self.flushed_files.add(path) source_lines = None if self.pretty: assert self.read_source source_lines = self.read_source(path) return self.format_messages(self.error_info_map[path], source_lines) def new_messages(self) -> List[str]: """Return a string list of new error messages. Use a form suitable for displaying to the user. Errors from different files are ordered based on the order in which they first generated an error. """ msgs = [] for path in self.error_info_map.keys(): if path not in self.flushed_files: msgs.extend(self.file_messages(path)) return msgs def targets(self) -> Set[str]: """Return a set of all targets that contain errors.""" # TODO: Make sure that either target is always defined or that not being defined # is okay for fine-grained incremental checking. return set(info.target for errs in self.error_info_map.values() for info in errs if info.target) def render_messages(self, errors: List[ErrorInfo]) -> List[ErrorTuple]: """Translate the messages into a sequence of tuples. Each tuple is of form (path, line, col, severity, message, code). The rendered sequence includes information about error contexts. The path item may be None. If the line item is negative, the line number is not defined for the tuple. """ result = [] # type: List[ErrorTuple] prev_import_context = [] # type: List[Tuple[str, int]] prev_function_or_member = None # type: Optional[str] prev_type = None # type: Optional[str] for e in errors: # Report module import context, if different from previous message. if not self.show_error_context: pass elif e.import_ctx != prev_import_context: last = len(e.import_ctx) - 1 i = last while i >= 0: path, line = e.import_ctx[i] fmt = '{}:{}: note: In module imported here' if i < last: fmt = '{}:{}: note: ... from here' if i > 0: fmt += ',' else: fmt += ':' # Remove prefix to ignore from path (if present) to # simplify path. path = remove_path_prefix(path, self.ignore_prefix) result.append( (None, -1, -1, 'note', fmt.format(path, line), None)) i -= 1 file = self.simplify_path(e.file) # Report context within a source file. if not self.show_error_context: pass elif (e.function_or_member != prev_function_or_member or e.type != prev_type): if e.function_or_member is None: if e.type is None: result.append( (file, -1, -1, 'note', 'At top level:', None)) else: result.append((file, -1, -1, 'note', 'In class "{}":'.format(e.type), None)) else: if e.type is None: result.append( (file, -1, -1, 'note', 'In function "{}":'.format(e.function_or_member), None)) else: result.append((file, -1, -1, 'note', 'In member "{}" of class "{}":'.format( e.function_or_member, e.type), None)) elif e.type != prev_type: if e.type is None: result.append( (file, -1, -1, 'note', 'At top level:', None)) else: result.append((file, -1, -1, 'note', 'In class "{}":'.format(e.type), None)) result.append( (file, e.line, e.column, e.severity, e.message, e.code)) prev_import_context = e.import_ctx prev_function_or_member = e.function_or_member prev_type = e.type return result def sort_messages(self, errors: List[ErrorInfo]) -> List[ErrorInfo]: """Sort an array of error messages locally by line number. I.e., sort a run of consecutive messages with the same context by line number, but otherwise retain the general ordering of the messages. """ result = [] # type: List[ErrorInfo] i = 0 while i < len(errors): i0 = i # Find neighbouring errors with the same context and file. while (i + 1 < len(errors) and errors[i + 1].import_ctx == errors[i].import_ctx and errors[i + 1].file == errors[i].file): i += 1 i += 1 # Sort the errors specific to a file according to line number and column. a = sorted(errors[i0:i], key=lambda x: (x.line, x.column)) result.extend(a) return result def remove_duplicates(self, errors: List[ErrorTuple]) -> List[ErrorTuple]: """Remove duplicates from a sorted error list.""" res = [] # type: List[ErrorTuple] i = 0 while i < len(errors): dup = False # Use slightly special formatting for member conflicts reporting. conflicts_notes = False j = i - 1 while j >= 0 and errors[j][0] == errors[i][0]: if errors[j][4].strip() == 'Got:': conflicts_notes = True j -= 1 j = i - 1 while (j >= 0 and errors[j][0] == errors[i][0] and errors[j][1] == errors[i][1]): if (errors[j][3] == errors[i][3] and # Allow duplicate notes in overload conflicts reporting. not ((errors[i][3] == 'note' and errors[i][4].strip() in allowed_duplicates) or (errors[i][4].strip().startswith('def ') and conflicts_notes)) and errors[j][4] == errors[i][4]): # ignore column dup = True break j -= 1 if not dup: res.append(errors[i]) i += 1 return res