コード例 #1
0
class ClassIR:
    """Intermediate representation of a class.

    This also describes the runtime structure of native instances.
    """

    def __init__(self, name: str, module_name: str, is_trait: bool = False,
                 is_generated: bool = False, is_abstract: bool = False,
                 is_ext_class: bool = True) -> None:
        self.name = name
        self.module_name = module_name
        self.is_trait = is_trait
        self.is_generated = is_generated
        self.is_abstract = is_abstract
        self.is_ext_class = is_ext_class
        # An augmented class has additional methods separate from what mypyc generates.
        # Right now the only one is dataclasses.
        self.is_augmented = False
        # Does this inherit from a Python class?
        self.inherits_python = False
        # Do instances of this class have __dict__?
        self.has_dict = False
        # Do we allow interpreted subclasses? Derived from a mypyc_attr.
        self.allow_interpreted_subclasses = False
        # If this a subclass of some built-in python class, the name
        # of the object for that class. We currently only support this
        # in a few ad-hoc cases.
        self.builtin_base = None  # type: Optional[str]
        # Default empty constructor
        self.ctor = FuncDecl(name, None, module_name, FuncSignature([], RInstance(self)))

        self.attributes = OrderedDict()  # type: OrderedDict[str, RType]
        # We populate method_types with the signatures of every method before
        # we generate methods, and we rely on this information being present.
        self.method_decls = OrderedDict()  # type: OrderedDict[str, FuncDecl]
        # Map of methods that are actually present in an extension class
        self.methods = OrderedDict()  # type: OrderedDict[str, FuncIR]
        # Glue methods for boxing/unboxing when a class changes the type
        # while overriding a method. Maps from (parent class overrided, method)
        # to IR of glue method.
        self.glue_methods = OrderedDict()  # type: Dict[Tuple[ClassIR, str], FuncIR]

        # Properties are accessed like attributes, but have behavior like method calls.
        # They don't belong in the methods dictionary, since we don't want to expose them to
        # Python's method API. But we want to put them into our own vtable as methods, so that
        # they are properly handled and overridden. The property dictionary values are a tuple
        # containing a property getter and an optional property setter.
        self.properties = OrderedDict()  # type: OrderedDict[str, Tuple[FuncIR, Optional[FuncIR]]]
        # We generate these in prepare_class_def so that we have access to them when generating
        # other methods and properties that rely on these types.
        self.property_types = OrderedDict()  # type: OrderedDict[str, RType]

        self.vtable = None  # type: Optional[Dict[str, int]]
        self.vtable_entries = []  # type: VTableEntries
        self.trait_vtables = OrderedDict()  # type: OrderedDict[ClassIR, VTableEntries]
        # N.B: base might not actually quite be the direct base.
        # It is the nearest concrete base, but we allow a trait in between.
        self.base = None  # type: Optional[ClassIR]
        self.traits = []  # type: List[ClassIR]
        # Supply a working mro for most generated classes. Real classes will need to
        # fix it up.
        self.mro = [self]  # type: List[ClassIR]
        # base_mro is the chain of concrete (non-trait) ancestors
        self.base_mro = [self]  # type: List[ClassIR]

        # Direct subclasses of this class (use subclasses() to also incude non-direct ones)
        # None if separate compilation prevents this from working
        self.children = []  # type: Optional[List[ClassIR]]

    @property
    def fullname(self) -> str:
        return "{}.{}".format(self.module_name, self.name)

    def real_base(self) -> Optional['ClassIR']:
        """Return the actual concrete base class, if there is one."""
        if len(self.mro) > 1 and not self.mro[1].is_trait:
            return self.mro[1]
        return None

    def vtable_entry(self, name: str) -> int:
        assert self.vtable is not None, "vtable not computed yet"
        assert name in self.vtable, '%r has no attribute %r' % (self.name, name)
        return self.vtable[name]

    def attr_details(self, name: str) -> Tuple[RType, 'ClassIR']:
        for ir in self.mro:
            if name in ir.attributes:
                return ir.attributes[name], ir
            if name in ir.property_types:
                return ir.property_types[name], ir
        raise KeyError('%r has no attribute %r' % (self.name, name))

    def attr_type(self, name: str) -> RType:
        return self.attr_details(name)[0]

    def method_decl(self, name: str) -> FuncDecl:
        for ir in self.mro:
            if name in ir.method_decls:
                return ir.method_decls[name]
        raise KeyError('%r has no attribute %r' % (self.name, name))

    def method_sig(self, name: str) -> FuncSignature:
        return self.method_decl(name).sig

    def has_method(self, name: str) -> bool:
        try:
            self.method_decl(name)
        except KeyError:
            return False
        return True

    def is_method_final(self, name: str) -> bool:
        subs = self.subclasses()
        if subs is None:
            # TODO: Look at the final attribute!
            return False

        if self.has_method(name):
            method_decl = self.method_decl(name)
            for subc in subs:
                if subc.method_decl(name) != method_decl:
                    return False
            return True
        else:
            return not any(subc.has_method(name) for subc in subs)

    def has_attr(self, name: str) -> bool:
        try:
            self.attr_type(name)
        except KeyError:
            return False
        return True

    def name_prefix(self, names: NameGenerator) -> str:
        return names.private_name(self.module_name, self.name)

    def struct_name(self, names: NameGenerator) -> str:
        return '{}Object'.format(exported_name(self.fullname))

    def get_method_and_class(self, name: str) -> Optional[Tuple[FuncIR, 'ClassIR']]:
        for ir in self.mro:
            if name in ir.methods:
                return ir.methods[name], ir

        return None

    def get_method(self, name: str) -> Optional[FuncIR]:
        res = self.get_method_and_class(name)
        return res[0] if res else None

    def subclasses(self) -> Optional[Set['ClassIR']]:
        """Return all subclassses of this class, both direct and indirect.

        Return None if it is impossible to identify all subclasses, for example
        because we are performing separate compilation.
        """
        if self.children is None or self.allow_interpreted_subclasses:
            return None
        result = set(self.children)
        for child in self.children:
            if child.children:
                child_subs = child.subclasses()
                if child_subs is None:
                    return None
                result.update(child_subs)
        return result

    def concrete_subclasses(self) -> Optional[List['ClassIR']]:
        """Return all concrete (i.e. non-trait and non-abstract) subclasses.

        Include both direct and indirect subclasses. Place classes with no children first.
        """
        subs = self.subclasses()
        if subs is None:
            return None
        concrete = {c for c in subs if not (c.is_trait or c.is_abstract)}
        # We place classes with no children first because they are more likely
        # to appear in various isinstance() checks. We then sort leafs by name
        # to get stable order.
        return sorted(concrete, key=lambda c: (len(c.children or []), c.name))

    def serialize(self) -> JsonDict:
        return {
            'name': self.name,
            'module_name': self.module_name,
            'is_trait': self.is_trait,
            'is_ext_class': self.is_ext_class,
            'is_abstract': self.is_abstract,
            'is_generated': self.is_generated,
            'is_augmented': self.is_augmented,
            'inherits_python': self.inherits_python,
            'has_dict': self.has_dict,
            'allow_interpreted_subclasses': self.allow_interpreted_subclasses,
            'builtin_base': self.builtin_base,
            'ctor': self.ctor.serialize(),
            # We serialize dicts as lists to ensure order is preserved
            'attributes': [(k, t.serialize()) for k, t in self.attributes.items()],
            # We try to serialize a name reference, but if the decl isn't in methods
            # then we can't be sure that will work so we serialize the whole decl.
            'method_decls': [(k, d.fullname if k in self.methods else d.serialize())
                             for k, d in self.method_decls.items()],
            # We serialize method fullnames out and put methods in a separate dict
            'methods': [(k, m.fullname) for k, m in self.methods.items()],
            'glue_methods': [
                ((cir.fullname, k), m.fullname)
                for (cir, k), m in self.glue_methods.items()
            ],

            # We serialize properties and property_types separately out of an
            # abundance of caution about preserving dict ordering...
            'property_types': [(k, t.serialize()) for k, t in self.property_types.items()],
            'properties': list(self.properties),

            'vtable': self.vtable,
            'vtable_entries': serialize_vtable(self.vtable_entries),
            'trait_vtables': [
                (cir.fullname, serialize_vtable(v)) for cir, v in self.trait_vtables.items()
            ],

            # References to class IRs are all just names
            'base': self.base.fullname if self.base else None,
            'traits': [cir.fullname for cir in self.traits],
            'mro': [cir.fullname for cir in self.mro],
            'base_mro': [cir.fullname for cir in self.base_mro],
            'children': [
                cir.fullname for cir in self.children
            ] if self.children is not None else None,
        }

    @classmethod
    def deserialize(cls, data: JsonDict, ctx: 'DeserMaps') -> 'ClassIR':
        fullname = data['module_name'] + '.' + data['name']
        assert fullname in ctx.classes, "Class %s not in deser class map" % fullname
        ir = ctx.classes[fullname]

        ir.is_trait = data['is_trait']
        ir.is_generated = data['is_generated']
        ir.is_abstract = data['is_abstract']
        ir.is_ext_class = data['is_ext_class']
        ir.is_augmented = data['is_augmented']
        ir.inherits_python = data['inherits_python']
        ir.has_dict = data['has_dict']
        ir.allow_interpreted_subclasses = data['allow_interpreted_subclasses']
        ir.builtin_base = data['builtin_base']
        ir.ctor = FuncDecl.deserialize(data['ctor'], ctx)
        ir.attributes = OrderedDict(
            (k, deserialize_type(t, ctx)) for k, t in data['attributes']
        )
        ir.method_decls = OrderedDict((k, ctx.functions[v].decl
                                       if isinstance(v, str) else FuncDecl.deserialize(v, ctx))
                                      for k, v in data['method_decls'])
        ir.methods = OrderedDict((k, ctx.functions[v]) for k, v in data['methods'])
        ir.glue_methods = OrderedDict(
            ((ctx.classes[c], k), ctx.functions[v]) for (c, k), v in data['glue_methods']
        )
        ir.property_types = OrderedDict(
            (k, deserialize_type(t, ctx)) for k, t in data['property_types']
        )
        ir.properties = OrderedDict(
            (k, (ir.methods[k], ir.methods.get(PROPSET_PREFIX + k))) for k in data['properties']
        )

        ir.vtable = data['vtable']
        ir.vtable_entries = deserialize_vtable(data['vtable_entries'], ctx)
        ir.trait_vtables = OrderedDict(
            (ctx.classes[k], deserialize_vtable(v, ctx)) for k, v in data['trait_vtables']
        )

        base = data['base']
        ir.base = ctx.classes[base] if base else None
        ir.traits = [ctx.classes[s] for s in data['traits']]
        ir.mro = [ctx.classes[s] for s in data['mro']]
        ir.base_mro = [ctx.classes[s] for s in data['base_mro']]
        ir.children = data['children'] and [ctx.classes[s] for s in data['children']]

        return ir
コード例 #2
0
ファイル: emitclass.py プロジェクト: wimax-grapl/mypy
def generate_class(cl: ClassIR, module: str, emitter: Emitter) -> None:
    """Generate C code for a class.

    This is the main entry point to the module.
    """
    name = cl.name
    name_prefix = cl.name_prefix(emitter.names)

    setup_name = '{}_setup'.format(name_prefix)
    new_name = '{}_new'.format(name_prefix)
    members_name = '{}_members'.format(name_prefix)
    getseters_name = '{}_getseters'.format(name_prefix)
    vtable_name = '{}_vtable'.format(name_prefix)
    traverse_name = '{}_traverse'.format(name_prefix)
    clear_name = '{}_clear'.format(name_prefix)
    dealloc_name = '{}_dealloc'.format(name_prefix)
    methods_name = '{}_methods'.format(name_prefix)
    vtable_setup_name = '{}_trait_vtable_setup'.format(name_prefix)

    fields = OrderedDict()  # type: Dict[str, str]
    fields['tp_name'] = '"{}"'.format(name)

    generate_full = not cl.is_trait and not cl.builtin_base
    needs_getseters = not cl.is_generated

    if not cl.builtin_base:
        fields['tp_new'] = new_name

    if generate_full:
        fields['tp_dealloc'] = '(destructor){}_dealloc'.format(name_prefix)
        fields['tp_traverse'] = '(traverseproc){}_traverse'.format(name_prefix)
        fields['tp_clear'] = '(inquiry){}_clear'.format(name_prefix)
    if needs_getseters:
        fields['tp_getset'] = getseters_name
    fields['tp_methods'] = methods_name

    def emit_line() -> None:
        emitter.emit_line()

    emit_line()

    # If the class has a method to initialize default attribute
    # values, we need to call it during initialization.
    defaults_fn = cl.get_method('__mypyc_defaults_setup')

    # If there is a __init__ method, we'll use it in the native constructor.
    init_fn = cl.get_method('__init__')

    # Fill out slots in the type object from dunder methods.
    fields.update(generate_slots(cl, SLOT_DEFS, emitter))

    # Fill out dunder methods that live in tables hanging off the side.
    for table_name, type, slot_defs in SIDE_TABLES:
        slots = generate_slots(cl, slot_defs, emitter)
        if slots:
            table_struct_name = generate_side_table_for_class(
                cl, table_name, type, slots, emitter)
            fields['tp_{}'.format(table_name)] = '&{}'.format(
                table_struct_name)

    richcompare_name = generate_richcompare_wrapper(cl, emitter)
    if richcompare_name:
        fields['tp_richcompare'] = richcompare_name

    # If the class inherits from python, make space for a __dict__
    struct_name = cl.struct_name(emitter.names)
    if cl.builtin_base:
        base_size = 'sizeof({})'.format(cl.builtin_base)
    elif cl.is_trait:
        base_size = 'sizeof(PyObject)'
    else:
        base_size = 'sizeof({})'.format(struct_name)
    # Since our types aren't allocated using type() we need to
    # populate these fields ourselves if we want them to have correct
    # values. PyType_Ready will inherit the offsets from tp_base but
    # that isn't what we want.

    # XXX: there is no reason for the __weakref__ stuff to be mixed up with __dict__
    if cl.has_dict:
        # __dict__ lives right after the struct and __weakref__ lives right after that
        # TODO: They should get members in the struct instead of doing this nonsense.
        weak_offset = '{} + sizeof(PyObject *)'.format(base_size)
        emitter.emit_lines(
            'PyMemberDef {}[] = {{'.format(members_name),
            '{{"__dict__", T_OBJECT_EX, {}, 0, NULL}},'.format(base_size),
            '{{"__weakref__", T_OBJECT_EX, {}, 0, NULL}},'.format(weak_offset),
            '{0}',
            '};',
        )

        fields['tp_members'] = members_name
        fields['tp_basicsize'] = '{} + 2*sizeof(PyObject *)'.format(base_size)
        fields['tp_dictoffset'] = base_size
        fields['tp_weaklistoffset'] = weak_offset
    else:
        fields['tp_basicsize'] = base_size

    if generate_full:
        # Declare setup method that allocates and initializes an object. type is the
        # type of the class being initialized, which could be another class if there
        # is an interpreted subclass.
        emitter.emit_line(
            'static PyObject *{}(PyTypeObject *type);'.format(setup_name))
        assert cl.ctor is not None
        emitter.emit_line(native_function_header(cl.ctor, emitter) + ';')

        emit_line()
        generate_new_for_class(cl, new_name, vtable_name, setup_name, emitter)
        emit_line()
        generate_traverse_for_class(cl, traverse_name, emitter)
        emit_line()
        generate_clear_for_class(cl, clear_name, emitter)
        emit_line()
        generate_dealloc_for_class(cl, dealloc_name, clear_name, emitter)
        emit_line()

        if cl.allow_interpreted_subclasses:
            shadow_vtable_name = generate_vtables(
                cl,
                vtable_setup_name + "_shadow",
                vtable_name + "_shadow",
                emitter,
                shadow=True)  # type: Optional[str]
            emit_line()
        else:
            shadow_vtable_name = None
        vtable_name = generate_vtables(cl,
                                       vtable_setup_name,
                                       vtable_name,
                                       emitter,
                                       shadow=False)
        emit_line()
    if needs_getseters:
        generate_getseter_declarations(cl, emitter)
        emit_line()
        generate_getseters_table(cl, getseters_name, emitter)
        emit_line()

    if cl.is_trait:
        generate_new_for_trait(cl, new_name, emitter)

    generate_methods_table(cl, methods_name, emitter)
    emit_line()

    flags = [
        'Py_TPFLAGS_DEFAULT', 'Py_TPFLAGS_HEAPTYPE', 'Py_TPFLAGS_BASETYPE'
    ]
    if generate_full:
        flags.append('Py_TPFLAGS_HAVE_GC')
    if cl.has_method('__call__') and emitter.use_vectorcall():
        fields['tp_vectorcall_offset'] = 'offsetof({}, vectorcall)'.format(
            cl.struct_name(emitter.names))
        flags.append('_Py_TPFLAGS_HAVE_VECTORCALL')
    fields['tp_flags'] = ' | '.join(flags)

    emitter.emit_line("static PyTypeObject {}_template_ = {{".format(
        emitter.type_struct_name(cl)))
    emitter.emit_line("PyVarObject_HEAD_INIT(NULL, 0)")
    for field, value in fields.items():
        emitter.emit_line(".{} = {},".format(field, value))
    emitter.emit_line("};")
    emitter.emit_line(
        "static PyTypeObject *{t}_template = &{t}_template_;".format(
            t=emitter.type_struct_name(cl)))

    emitter.emit_line()
    if generate_full:
        generate_setup_for_class(cl, setup_name, defaults_fn, vtable_name,
                                 shadow_vtable_name, emitter)
        emitter.emit_line()
        generate_constructor_for_class(cl, cl.ctor, init_fn, setup_name,
                                       vtable_name, emitter)
        emitter.emit_line()
    if needs_getseters:
        generate_getseters(cl, emitter)
コード例 #3
0
class Environment:
    """Maintain the register symbol table and manage temp generation"""
    def __init__(self, name: Optional[str] = None) -> None:
        self.name = name
        self.indexes = OrderedDict()  # type: Dict[Value, int]
        self.symtable = OrderedDict(
        )  # type: OrderedDict[SymbolNode, AssignmentTarget]
        self.temp_index = 0
        self.temp_load_int_idx = 0
        # All names genereted; value is the number of duplicates seen.
        self.names = {}  # type: Dict[str, int]
        self.vars_needing_init = set()  # type: Set[Value]

    def regs(self) -> Iterable['Value']:
        return self.indexes.keys()

    def add(self, reg: 'Value', name: str) -> None:
        # Ensure uniqueness of variable names in this environment.
        # This is needed for things like list comprehensions, which are their own scope--
        # if we don't do this and two comprehensions use the same variable, we'd try to
        # declare that variable twice.
        unique_name = name
        while unique_name in self.names:
            unique_name = name + str(self.names[name])
            self.names[name] += 1
        self.names[unique_name] = 0
        reg.name = unique_name

        self.indexes[reg] = len(self.indexes)

    def add_local(self,
                  symbol: SymbolNode,
                  typ: RType,
                  is_arg: bool = False) -> 'Register':
        """Add register that represents a symbol to the symbol table.

        Args:
            is_arg: is this a function argument
        """
        assert isinstance(symbol, SymbolNode)
        reg = Register(typ, symbol.line, is_arg=is_arg)
        self.symtable[symbol] = AssignmentTargetRegister(reg)
        self.add(reg, symbol.name)
        return reg

    def add_local_reg(self,
                      symbol: SymbolNode,
                      typ: RType,
                      is_arg: bool = False) -> AssignmentTargetRegister:
        """Like add_local, but return an assignment target instead of value."""
        self.add_local(symbol, typ, is_arg)
        target = self.symtable[symbol]
        assert isinstance(target, AssignmentTargetRegister)
        return target

    def add_target(self, symbol: SymbolNode,
                   target: AssignmentTarget) -> AssignmentTarget:
        self.symtable[symbol] = target
        return target

    def lookup(self, symbol: SymbolNode) -> AssignmentTarget:
        return self.symtable[symbol]

    def add_temp(self, typ: RType) -> 'Register':
        """Add register that contains a temporary value with the given type."""
        assert isinstance(typ, RType)
        reg = Register(typ)
        self.add(reg, 'r%d' % self.temp_index)
        self.temp_index += 1
        return reg

    def add_op(self, reg: 'RegisterOp') -> None:
        """Record the value of an operation."""
        if reg.is_void:
            return
        if isinstance(reg, LoadInt):
            self.add(reg, "i%d" % self.temp_load_int_idx)
            self.temp_load_int_idx += 1
            return
        self.add(reg, 'r%d' % self.temp_index)
        self.temp_index += 1

    def format(self, fmt: str, *args: Any) -> str:
        result = []
        i = 0
        arglist = list(args)
        while i < len(fmt):
            n = fmt.find('%', i)
            if n < 0:
                n = len(fmt)
            result.append(fmt[i:n])
            if n < len(fmt):
                typespec = fmt[n + 1]
                arg = arglist.pop(0)
                if typespec == 'r':
                    result.append(arg.name)
                elif typespec == 'd':
                    result.append('%d' % arg)
                elif typespec == 'f':
                    result.append('%f' % arg)
                elif typespec == 'l':
                    if isinstance(arg, BasicBlock):
                        arg = arg.label
                    result.append('L%s' % arg)
                elif typespec == 's':
                    result.append(str(arg))
                else:
                    raise ValueError(
                        'Invalid format sequence %{}'.format(typespec))
                i = n + 2
            else:
                i = n
        return ''.join(result)

    def to_lines(self,
                 const_regs: Optional[Dict[str, int]] = None) -> List[str]:
        result = []
        i = 0
        regs = list(self.regs())
        if const_regs is None:
            const_regs = {}
        regs = [reg for reg in regs if reg.name not in const_regs]
        while i < len(regs):
            i0 = i
            group = [regs[i0].name]
            while i + 1 < len(regs) and regs[i + 1].type == regs[i0].type:
                i += 1
                group.append(regs[i].name)
            i += 1
            result.append('%s :: %s' % (', '.join(group), regs[i0].type))
        return result
コード例 #4
0
    def __init__(self) -> None:
        # Cache for clone_for_module()
        self._per_module_cache = None  # type: Optional[Dict[str, Options]]

        # -- build options --
        self.build_type = BuildType.STANDARD
        self.python_version = sys.version_info[:2]  # type: Tuple[int, int]
        # The executable used to search for PEP 561 packages. If this is None,
        # then mypy does not search for PEP 561 packages.
        self.python_executable = sys.executable  # type: Optional[str]
        self.platform = sys.platform
        self.custom_typing_module = None  # type: Optional[str]
        self.custom_typeshed_dir = None  # type: Optional[str]
        self.mypy_path = []  # type: List[str]
        self.report_dirs = {}  # type: Dict[str, str]
        # Show errors in PEP 561 packages/site-packages modules
        self.no_silence_site_packages = False
        self.no_site_packages = False
        self.ignore_missing_imports = False
        self.follow_imports = 'normal'  # normal|silent|skip|error
        # Whether to respect the follow_imports setting even for stub files.
        # Intended to be used for disabling specific stubs.
        self.follow_imports_for_stubs = False
        # PEP 420 namespace packages
        self.namespace_packages = False

        # disallow_any options
        self.disallow_any_generics = False
        self.disallow_any_unimported = False
        self.disallow_any_expr = False
        self.disallow_any_decorated = False
        self.disallow_any_explicit = False

        # Disallow calling untyped functions from typed ones
        self.disallow_untyped_calls = False

        # Disallow defining untyped (or incompletely typed) functions
        self.disallow_untyped_defs = False

        # Disallow defining incompletely typed functions
        self.disallow_incomplete_defs = False

        # Type check unannotated functions
        self.check_untyped_defs = False

        # Disallow decorating typed functions with untyped decorators
        self.disallow_untyped_decorators = False

        # Disallow subclassing values of type 'Any'
        self.disallow_subclassing_any = False

        # Also check typeshed for missing annotations
        self.warn_incomplete_stub = False

        # Warn about casting an expression to its inferred type
        self.warn_redundant_casts = False

        # Warn about falling off the end of a function returning non-None
        self.warn_no_return = True

        # Warn about returning objects of type Any when the function is
        # declared with a precise type
        self.warn_return_any = False

        # Warn about unused '# type: ignore' comments
        self.warn_unused_ignores = False

        # Warn about unused '[mypy-<pattern>] config sections
        self.warn_unused_configs = False

        # Files in which to ignore all non-fatal errors
        self.ignore_errors = False

        # Apply strict None checking
        self.strict_optional = True

        # Show "note: In function "foo":" messages.
        self.show_error_context = False

        # Use nicer output (when possible).
        self.color_output = True
        self.error_summary = True

        # Files in which to allow strict-Optional related errors
        # TODO: Kill this in favor of show_none_errors
        self.strict_optional_whitelist = None   # type: Optional[List[str]]

        # Alternate way to show/hide strict-None-checking related errors
        self.show_none_errors = True

        # Don't assume arguments with default values of None are Optional
        self.no_implicit_optional = False

        # Don't re-export names unless they are imported with `from ... as ...`
        self.implicit_reexport = True

        # Suppress toplevel errors caused by missing annotations
        self.allow_untyped_globals = False

        # Allow variable to be redefined with an arbitrary type in the same block
        # and the same nesting level as the initialization
        self.allow_redefinition = False

        # Prohibit equality, identity, and container checks for non-overlapping types.
        # This makes 1 == '1', 1 in ['1'], and 1 is '1' errors.
        self.strict_equality = False

        # Report an error for any branches inferred to be unreachable as a result of
        # type analysis.
        self.warn_unreachable = False

        # Variable names considered True
        self.always_true = []  # type: List[str]

        # Variable names considered False
        self.always_false = []  # type: List[str]

        # Use script name instead of __main__
        self.scripts_are_modules = False

        # Config file name
        self.config_file = None  # type: Optional[str]

        # A filename containing a JSON mapping from filenames to
        # mtime/size/hash arrays, used to avoid having to recalculate
        # source hashes as often.
        self.quickstart_file = None  # type: Optional[str]

        # A comma-separated list of files/directories for mypy to type check;
        # supports globbing
        self.files = None  # type: Optional[List[str]]

        # Write junit.xml to given file
        self.junit_xml = None  # type: Optional[str]

        # Caching and incremental checking options
        self.incremental = True
        self.cache_dir = defaults.CACHE_DIR
        self.sqlite_cache = False
        self.debug_cache = False
        self.skip_version_check = False
        self.skip_cache_mtime_checks = False
        self.fine_grained_incremental = False
        # Include fine-grained dependencies in written cache files
        self.cache_fine_grained = False
        # Read cache files in fine-grained incremental mode (cache must include dependencies)
        self.use_fine_grained_cache = False

        # Tune certain behaviors when being used as a front-end to mypyc. Set per-module
        # in modules being compiled. Not in the config file or command line.
        self.mypyc = False

        # Disable the memory optimization of freeing ASTs when
        # possible. This isn't exposed as a command line option
        # because it is intended for software integrating with
        # mypy. (Like mypyc.)
        self.preserve_asts = False

        # Paths of user plugins
        self.plugins = []  # type: List[str]

        # Per-module options (raw)
        self.per_module_options = OrderedDict()  # type: OrderedDict[str, Dict[str, object]]
        self._glob_options = []  # type: List[Tuple[str, Pattern[str]]]
        self.unused_configs = set()  # type: Set[str]

        # -- development options --
        self.verbosity = 0  # More verbose messages (for troubleshooting)
        self.pdb = False
        self.show_traceback = False
        self.raise_exceptions = False
        self.dump_type_stats = False
        self.dump_inference_stats = False
        self.dump_build_stats = False

        # -- test options --
        # Stop after the semantic analysis phase
        self.semantic_analysis_only = False

        # Use stub builtins fixtures to speed up tests
        self.use_builtins_fixtures = False

        # -- experimental options --
        self.shadow_file = None  # type: Optional[List[List[str]]]
        self.show_column_numbers = False  # type: bool
        self.show_error_codes = False
        # Use soft word wrap and show trimmed source snippets with error location markers.
        self.pretty = False
        self.dump_graph = False
        self.dump_deps = False
        self.logical_deps = False
        # If True, partial types can't span a module top level and a function
        self.local_partial_types = False
        # Some behaviors are changed when using Bazel (https://bazel.build).
        self.bazel = False
        # If True, export inferred types for all expressions as BuildResult.types
        self.export_types = False
        # List of package roots -- directories under these are packages even
        # if they don't have __init__.py.
        self.package_root = []  # type: List[str]
        self.cache_map = {}  # type: Dict[str, Tuple[str, str]]
        # Don't properly free objects on exit, just kill the current process.
        self.fast_exit = False
        # Used to transform source code before parsing if not None
        # TODO: Make the type precise (AnyStr -> AnyStr)
        self.transform_source = None  # type: Optional[Callable[[Any], Any]]
        # Print full path to each file in the report.
        self.show_absolute_path = False  # type: bool
コード例 #5
0
class Options:
    """Options collected from flags."""

    def __init__(self) -> None:
        # Cache for clone_for_module()
        self._per_module_cache = None  # type: Optional[Dict[str, Options]]

        # -- build options --
        self.build_type = BuildType.STANDARD
        self.python_version = sys.version_info[:2]  # type: Tuple[int, int]
        # The executable used to search for PEP 561 packages. If this is None,
        # then mypy does not search for PEP 561 packages.
        self.python_executable = sys.executable  # type: Optional[str]
        self.platform = sys.platform
        self.custom_typing_module = None  # type: Optional[str]
        self.custom_typeshed_dir = None  # type: Optional[str]
        self.mypy_path = []  # type: List[str]
        self.report_dirs = {}  # type: Dict[str, str]
        # Show errors in PEP 561 packages/site-packages modules
        self.no_silence_site_packages = False
        self.no_site_packages = False
        self.ignore_missing_imports = False
        self.follow_imports = 'normal'  # normal|silent|skip|error
        # Whether to respect the follow_imports setting even for stub files.
        # Intended to be used for disabling specific stubs.
        self.follow_imports_for_stubs = False
        # PEP 420 namespace packages
        self.namespace_packages = False

        # disallow_any options
        self.disallow_any_generics = False
        self.disallow_any_unimported = False
        self.disallow_any_expr = False
        self.disallow_any_decorated = False
        self.disallow_any_explicit = False

        # Disallow calling untyped functions from typed ones
        self.disallow_untyped_calls = False

        # Disallow defining untyped (or incompletely typed) functions
        self.disallow_untyped_defs = False

        # Disallow defining incompletely typed functions
        self.disallow_incomplete_defs = False

        # Type check unannotated functions
        self.check_untyped_defs = False

        # Disallow decorating typed functions with untyped decorators
        self.disallow_untyped_decorators = False

        # Disallow subclassing values of type 'Any'
        self.disallow_subclassing_any = False

        # Also check typeshed for missing annotations
        self.warn_incomplete_stub = False

        # Warn about casting an expression to its inferred type
        self.warn_redundant_casts = False

        # Warn about falling off the end of a function returning non-None
        self.warn_no_return = True

        # Warn about returning objects of type Any when the function is
        # declared with a precise type
        self.warn_return_any = False

        # Warn about unused '# type: ignore' comments
        self.warn_unused_ignores = False

        # Warn about unused '[mypy-<pattern>] config sections
        self.warn_unused_configs = False

        # Files in which to ignore all non-fatal errors
        self.ignore_errors = False

        # Apply strict None checking
        self.strict_optional = True

        # Show "note: In function "foo":" messages.
        self.show_error_context = False

        # Use nicer output (when possible).
        self.color_output = True
        self.error_summary = True

        # Files in which to allow strict-Optional related errors
        # TODO: Kill this in favor of show_none_errors
        self.strict_optional_whitelist = None   # type: Optional[List[str]]

        # Alternate way to show/hide strict-None-checking related errors
        self.show_none_errors = True

        # Don't assume arguments with default values of None are Optional
        self.no_implicit_optional = False

        # Don't re-export names unless they are imported with `from ... as ...`
        self.implicit_reexport = True

        # Suppress toplevel errors caused by missing annotations
        self.allow_untyped_globals = False

        # Allow variable to be redefined with an arbitrary type in the same block
        # and the same nesting level as the initialization
        self.allow_redefinition = False

        # Prohibit equality, identity, and container checks for non-overlapping types.
        # This makes 1 == '1', 1 in ['1'], and 1 is '1' errors.
        self.strict_equality = False

        # Report an error for any branches inferred to be unreachable as a result of
        # type analysis.
        self.warn_unreachable = False

        # Variable names considered True
        self.always_true = []  # type: List[str]

        # Variable names considered False
        self.always_false = []  # type: List[str]

        # Use script name instead of __main__
        self.scripts_are_modules = False

        # Config file name
        self.config_file = None  # type: Optional[str]

        # A filename containing a JSON mapping from filenames to
        # mtime/size/hash arrays, used to avoid having to recalculate
        # source hashes as often.
        self.quickstart_file = None  # type: Optional[str]

        # A comma-separated list of files/directories for mypy to type check;
        # supports globbing
        self.files = None  # type: Optional[List[str]]

        # Write junit.xml to given file
        self.junit_xml = None  # type: Optional[str]

        # Caching and incremental checking options
        self.incremental = True
        self.cache_dir = defaults.CACHE_DIR
        self.sqlite_cache = False
        self.debug_cache = False
        self.skip_version_check = False
        self.skip_cache_mtime_checks = False
        self.fine_grained_incremental = False
        # Include fine-grained dependencies in written cache files
        self.cache_fine_grained = False
        # Read cache files in fine-grained incremental mode (cache must include dependencies)
        self.use_fine_grained_cache = False

        # Tune certain behaviors when being used as a front-end to mypyc. Set per-module
        # in modules being compiled. Not in the config file or command line.
        self.mypyc = False

        # Disable the memory optimization of freeing ASTs when
        # possible. This isn't exposed as a command line option
        # because it is intended for software integrating with
        # mypy. (Like mypyc.)
        self.preserve_asts = False

        # Paths of user plugins
        self.plugins = []  # type: List[str]

        # Per-module options (raw)
        self.per_module_options = OrderedDict()  # type: OrderedDict[str, Dict[str, object]]
        self._glob_options = []  # type: List[Tuple[str, Pattern[str]]]
        self.unused_configs = set()  # type: Set[str]

        # -- development options --
        self.verbosity = 0  # More verbose messages (for troubleshooting)
        self.pdb = False
        self.show_traceback = False
        self.raise_exceptions = False
        self.dump_type_stats = False
        self.dump_inference_stats = False
        self.dump_build_stats = False

        # -- test options --
        # Stop after the semantic analysis phase
        self.semantic_analysis_only = False

        # Use stub builtins fixtures to speed up tests
        self.use_builtins_fixtures = False

        # -- experimental options --
        self.shadow_file = None  # type: Optional[List[List[str]]]
        self.show_column_numbers = False  # type: bool
        self.show_error_codes = False
        # Use soft word wrap and show trimmed source snippets with error location markers.
        self.pretty = False
        self.dump_graph = False
        self.dump_deps = False
        self.logical_deps = False
        # If True, partial types can't span a module top level and a function
        self.local_partial_types = False
        # Some behaviors are changed when using Bazel (https://bazel.build).
        self.bazel = False
        # If True, export inferred types for all expressions as BuildResult.types
        self.export_types = False
        # List of package roots -- directories under these are packages even
        # if they don't have __init__.py.
        self.package_root = []  # type: List[str]
        self.cache_map = {}  # type: Dict[str, Tuple[str, str]]
        # Don't properly free objects on exit, just kill the current process.
        self.fast_exit = False
        # Used to transform source code before parsing if not None
        # TODO: Make the type precise (AnyStr -> AnyStr)
        self.transform_source = None  # type: Optional[Callable[[Any], Any]]
        # Print full path to each file in the report.
        self.show_absolute_path = False  # type: bool

    # To avoid breaking plugin compatibility, keep providing new_semantic_analyzer
    @property
    def new_semantic_analyzer(self) -> bool:
        return True

    def snapshot(self) -> object:
        """Produce a comparable snapshot of this Option"""
        # Under mypyc, we don't have a __dict__, so we need to do worse things.
        d = dict(getattr(self, '__dict__', ()))
        for k in get_class_descriptors(Options):
            if hasattr(self, k) and k != "new_semantic_analyzer":
                d[k] = getattr(self, k)
        # Remove private attributes from snapshot
        d = {k: v for k, v in d.items() if not k.startswith('_')}
        return d

    def __repr__(self) -> str:
        return 'Options({})'.format(pprint.pformat(self.snapshot()))

    def apply_changes(self, changes: Dict[str, object]) -> 'Options':
        new_options = Options()
        # Under mypyc, we don't have a __dict__, so we need to do worse things.
        replace_object_state(new_options, self, copy_dict=True)
        for key, value in changes.items():
            setattr(new_options, key, value)
        return new_options

    def build_per_module_cache(self) -> None:
        self._per_module_cache = {}

        # Config precedence is as follows:
        #  1. Concrete section names: foo.bar.baz
        #  2. "Unstructured" glob patterns: foo.*.baz, in the order
        #     they appear in the file (last wins)
        #  3. "Well-structured" wildcard patterns: foo.bar.*, in specificity order.

        # Since structured configs inherit from structured configs above them in the hierarchy,
        # we need to process per-module configs in a careful order.
        # We have to process foo.* before foo.bar.* before foo.bar,
        # and we need to apply *.bar to foo.bar but not to foo.bar.*.
        # To do this, process all well-structured glob configs before non-glob configs and
        # exploit the fact that foo.* sorts earlier ASCIIbetically (unicodebetically?)
        # than foo.bar.*.
        # (A section being "processed last" results in its config "winning".)
        # Unstructured glob configs are stored and are all checked for each module.
        unstructured_glob_keys = [k for k in self.per_module_options.keys()
                                  if '*' in k[:-1]]
        structured_keys = [k for k in self.per_module_options.keys()
                           if '*' not in k[:-1]]
        wildcards = sorted(k for k in structured_keys if k.endswith('.*'))
        concrete = [k for k in structured_keys if not k.endswith('.*')]

        for glob in unstructured_glob_keys:
            self._glob_options.append((glob, self.compile_glob(glob)))

        # We (for ease of implementation) treat unstructured glob
        # sections as used if any real modules use them or if any
        # concrete config sections use them. This means we need to
        # track which get used while constructing.
        self.unused_configs = set(unstructured_glob_keys)

        for key in wildcards + concrete:
            # Find what the options for this key would be, just based
            # on inheriting from parent configs.
            options = self.clone_for_module(key)
            # And then update it with its per-module options.
            self._per_module_cache[key] = options.apply_changes(self.per_module_options[key])

        # Add the more structured sections into unused configs, since
        # they only count as used if actually used by a real module.
        self.unused_configs.update(structured_keys)

    def clone_for_module(self, module: str) -> 'Options':
        """Create an Options object that incorporates per-module options.

        NOTE: Once this method is called all Options objects should be
        considered read-only, else the caching might be incorrect.
        """
        if self._per_module_cache is None:
            self.build_per_module_cache()
        assert self._per_module_cache is not None

        # If the module just directly has a config entry, use it.
        if module in self._per_module_cache:
            self.unused_configs.discard(module)
            return self._per_module_cache[module]

        # If not, search for glob paths at all the parents. So if we are looking for
        # options for foo.bar.baz, we search foo.bar.baz.*, foo.bar.*, foo.*,
        # in that order, looking for an entry.
        # This is technically quadratic in the length of the path, but module paths
        # don't actually get all that long.
        options = self
        path = module.split('.')
        for i in range(len(path), 0, -1):
            key = '.'.join(path[:i] + ['*'])
            if key in self._per_module_cache:
                self.unused_configs.discard(key)
                options = self._per_module_cache[key]
                break

        # OK and *now* we need to look for unstructured glob matches.
        # We only do this for concrete modules, not structured wildcards.
        if not module.endswith('.*'):
            for key, pattern in self._glob_options:
                if pattern.match(module):
                    self.unused_configs.discard(key)
                    options = options.apply_changes(self.per_module_options[key])

        # We could update the cache to directly point to modules once
        # they have been looked up, but in testing this made things
        # slower and not faster, so we don't bother.

        return options

    def compile_glob(self, s: str) -> Pattern[str]:
        # Compile one of the glob patterns to a regex so that '.*' can
        # match *zero or more* module sections. This means we compile
        # '.*' into '(\..*)?'.
        parts = s.split('.')
        expr = re.escape(parts[0]) if parts[0] != '*' else '.*'
        for part in parts[1:]:
            expr += re.escape('.' + part) if part != '*' else r'(\..*)?'
        return re.compile(expr + '\\Z')

    def select_options_affecting_cache(self) -> Mapping[str, object]:
        return {opt: getattr(self, opt) for opt in OPTIONS_AFFECTING_CACHE}
class Errors:
    """Container for compile errors.

    This class generates and keeps tracks of compile errors and the
    current error context (nested imports).
    """

    # Map from files to generated error messages. Is an OrderedDict so
    # that it can be used to order messages based on the order the
    # files were processed.
    error_info_map = None  # type: Dict[str, List[ErrorInfo]]

    # Files that we have reported the errors for
    flushed_files = None  # type: Set[str]

    # Current error context: nested import context/stack, as a list of (path, line) pairs.
    import_ctx = None  # type: List[Tuple[str, int]]

    # Path name prefix that is removed from all paths, if set.
    ignore_prefix = None  # type: Optional[str]

    # Path to current file.
    file = ''  # type: str

    # Ignore some errors on these lines of each file
    # (path -> line -> error-codes)
    ignored_lines = None  # type: Dict[str, Dict[int, List[str]]]

    # Lines on which an error was actually ignored.
    used_ignored_lines = None  # type: Dict[str, Set[int]]

    # Files where all errors should be ignored.
    ignored_files = None  # type: Set[str]

    # Collection of reported only_once messages.
    only_once_messages = None  # type: Set[str]

    # Set to True to show "In function "foo":" messages.
    show_error_context = False  # type: bool

    # Set to True to show column numbers in error messages.
    show_column_numbers = False  # type: bool

    # Set to True to show absolute file paths in error messages.
    show_absolute_path = False  # type: bool

    # State for keeping track of the current fine-grained incremental mode target.
    # (See mypy.server.update for more about targets.)
    # Current module id.
    target_module = None  # type: Optional[str]
    scope = None  # type: Optional[Scope]

    def __init__(
            self,
            show_error_context: bool = False,
            show_column_numbers: bool = False,
            show_error_codes: bool = False,
            pretty: bool = False,
            read_source: Optional[Callable[[str], Optional[List[str]]]] = None,
            show_absolute_path: bool = False,
            enabled_error_codes: Optional[Set[ErrorCode]] = None,
            disabled_error_codes: Optional[Set[ErrorCode]] = None) -> None:
        self.show_error_context = show_error_context
        self.show_column_numbers = show_column_numbers
        self.show_error_codes = show_error_codes
        self.show_absolute_path = show_absolute_path
        self.pretty = pretty
        # We use fscache to read source code when showing snippets.
        self.read_source = read_source
        self.enabled_error_codes = enabled_error_codes or set()
        self.disabled_error_codes = disabled_error_codes or set()
        self.initialize()

    def initialize(self) -> None:
        self.error_info_map = OrderedDict()
        self.flushed_files = set()
        self.import_ctx = []
        self.function_or_member = [None]
        self.ignored_lines = OrderedDict()
        self.used_ignored_lines = defaultdict(set)
        self.ignored_files = set()
        self.only_once_messages = set()
        self.scope = None
        self.target_module = None

    def reset(self) -> None:
        self.initialize()

    def copy(self) -> 'Errors':
        new = Errors(self.show_error_context, self.show_column_numbers,
                     self.show_error_codes, self.pretty, self.read_source,
                     self.show_absolute_path, self.enabled_error_codes,
                     self.disabled_error_codes)
        new.file = self.file
        new.import_ctx = self.import_ctx[:]
        new.function_or_member = self.function_or_member[:]
        new.target_module = self.target_module
        new.scope = self.scope
        return new

    def total_errors(self) -> int:
        return sum(len(errs) for errs in self.error_info_map.values())

    def set_ignore_prefix(self, prefix: str) -> None:
        """Set path prefix that will be removed from all paths."""
        prefix = os.path.normpath(prefix)
        # Add separator to the end, if not given.
        if os.path.basename(prefix) != '':
            prefix += os.sep
        self.ignore_prefix = prefix

    def simplify_path(self, file: str) -> str:
        if self.show_absolute_path:
            return os.path.abspath(file)
        else:
            file = os.path.normpath(file)
            return remove_path_prefix(file, self.ignore_prefix)

    def set_file(self,
                 file: str,
                 module: Optional[str],
                 scope: Optional[Scope] = None) -> None:
        """Set the path and module id of the current file."""
        # The path will be simplified later, in render_messages. That way
        #  * 'file' is always a key that uniquely identifies a source file
        #    that mypy read (simplified paths might not be unique); and
        #  * we only have to simplify in one place, while still supporting
        #    reporting errors for files other than the one currently being
        #    processed.
        self.file = file
        self.target_module = module
        self.scope = scope

    def set_file_ignored_lines(self,
                               file: str,
                               ignored_lines: Dict[int, List[str]],
                               ignore_all: bool = False) -> None:
        self.ignored_lines[file] = ignored_lines
        if ignore_all:
            self.ignored_files.add(file)

    def current_target(self) -> Optional[str]:
        """Retrieves the current target from the associated scope.

        If there is no associated scope, use the target module."""
        if self.scope is not None:
            return self.scope.current_target()
        return self.target_module

    def current_module(self) -> Optional[str]:
        return self.target_module

    def import_context(self) -> List[Tuple[str, int]]:
        """Return a copy of the import context."""
        return self.import_ctx[:]

    def set_import_context(self, ctx: List[Tuple[str, int]]) -> None:
        """Replace the entire import context with a new value."""
        self.import_ctx = ctx[:]

    def report(self,
               line: int,
               column: Optional[int],
               message: str,
               code: Optional[ErrorCode] = None,
               *,
               blocker: bool = False,
               severity: str = 'error',
               file: Optional[str] = None,
               only_once: bool = False,
               origin_line: Optional[int] = None,
               offset: int = 0,
               end_line: Optional[int] = None) -> None:
        """Report message at the given line using the current error context.

        Args:
            line: line number of error
            column: column number of error
            message: message to report
            code: error code (defaults to 'misc'; not shown for notes)
            blocker: if True, don't continue analysis after this error
            severity: 'error' or 'note'
            file: if non-None, override current file as context
            only_once: if True, only report this exact message once per build
            origin_line: if non-None, override current context as origin
            end_line: if non-None, override current context as end
        """
        if self.scope:
            type = self.scope.current_type_name()
            if self.scope.ignored > 0:
                type = None  # Omit type context if nested function
            function = self.scope.current_function_name()
        else:
            type = None
            function = None

        if column is None:
            column = -1
        if file is None:
            file = self.file
        if offset:
            message = " " * offset + message

        if origin_line is None:
            origin_line = line

        if end_line is None:
            end_line = origin_line

        code = code or (codes.MISC if not blocker else None)

        info = ErrorInfo(self.import_context(),
                         file,
                         self.current_module(),
                         type,
                         function,
                         line,
                         column,
                         severity,
                         message,
                         code,
                         blocker,
                         only_once,
                         origin=(self.file, origin_line, end_line),
                         target=self.current_target())
        self.add_error_info(info)

    def _add_error_info(self, file: str, info: ErrorInfo) -> None:
        assert file not in self.flushed_files
        if file not in self.error_info_map:
            self.error_info_map[file] = []
        self.error_info_map[file].append(info)

    def add_error_info(self, info: ErrorInfo) -> None:
        file, line, end_line = info.origin
        if not info.blocker:  # Blockers cannot be ignored
            if file in self.ignored_lines:
                # It's okay if end_line is *before* line.
                # Function definitions do this, for example, because the correct
                # error reporting line is at the *end* of the ignorable range
                # (for compatibility reasons). If so, just flip 'em!
                if end_line < line:
                    line, end_line = end_line, line
                # Check each line in this context for "type: ignore" comments.
                # line == end_line for most nodes, so we only loop once.
                for scope_line in range(line, end_line + 1):
                    if self.is_ignored_error(scope_line, info,
                                             self.ignored_lines[file]):
                        # Annotation requests us to ignore all errors on this line.
                        self.used_ignored_lines[file].add(scope_line)
                        return
            if file in self.ignored_files:
                return
        if info.only_once:
            if info.message in self.only_once_messages:
                return
            self.only_once_messages.add(info.message)
        self._add_error_info(file, info)

    def is_ignored_error(self, line: int, info: ErrorInfo,
                         ignores: Dict[int, List[str]]) -> bool:
        if info.blocker:
            # Blocking errors can never be ignored
            return False
        if info.code and self.is_error_code_enabled(info.code) is False:
            return True
        if line not in ignores:
            return False
        if not ignores[line]:
            # Empty list means that we ignore all errors
            return True
        if info.code and self.is_error_code_enabled(info.code) is True:
            return info.code.code in ignores[line]
        return False

    def is_error_code_enabled(self, error_code: ErrorCode) -> bool:
        if error_code in self.disabled_error_codes:
            return False
        elif error_code in self.enabled_error_codes:
            return True
        else:
            return error_code.default_enabled

    def clear_errors_in_targets(self, path: str, targets: Set[str]) -> None:
        """Remove errors in specific fine-grained targets within a file."""
        if path in self.error_info_map:
            new_errors = []
            for info in self.error_info_map[path]:
                if info.target not in targets:
                    new_errors.append(info)
                elif info.only_once:
                    self.only_once_messages.remove(info.message)
            self.error_info_map[path] = new_errors

    def generate_unused_ignore_errors(self, file: str) -> None:
        ignored_lines = self.ignored_lines[file]
        if not is_typeshed_file(file) and file not in self.ignored_files:
            for line in set(ignored_lines) - self.used_ignored_lines[file]:
                # Don't use report since add_error_info will ignore the error!
                info = ErrorInfo(self.import_context(), file,
                                 self.current_module(), None, None, line, -1,
                                 'error', "unused 'type: ignore' comment",
                                 None, False, False)
                self._add_error_info(file, info)

    def num_messages(self) -> int:
        """Return the number of generated messages."""
        return sum(len(x) for x in self.error_info_map.values())

    def is_errors(self) -> bool:
        """Are there any generated errors?"""
        return bool(self.error_info_map)

    def is_blockers(self) -> bool:
        """Are the any errors that are blockers?"""
        return any(err for errs in self.error_info_map.values() for err in errs
                   if err.blocker)

    def blocker_module(self) -> Optional[str]:
        """Return the module with a blocking error, or None if not possible."""
        for errs in self.error_info_map.values():
            for err in errs:
                if err.blocker:
                    return err.module
        return None

    def is_errors_for_file(self, file: str) -> bool:
        """Are there any errors for the given file?"""
        return file in self.error_info_map

    def most_recent_error_location(self) -> Tuple[int, int]:
        info = self.error_info_map[self.file][-1]
        return info.line, info.column

    def raise_error(self, use_stdout: bool = True) -> None:
        """Raise a CompileError with the generated messages.

        Render the messages suitable for displaying.
        """
        # self.new_messages() will format all messages that haven't already
        # been returned from a file_messages() call.
        raise CompileError(self.new_messages(),
                           use_stdout=use_stdout,
                           module_with_blocker=self.blocker_module())

    def format_messages(self, error_info: List[ErrorInfo],
                        source_lines: Optional[List[str]]) -> List[str]:
        """Return a string list that represents the error messages.

        Use a form suitable for displaying to the user. If self.pretty
        is True also append a relevant trimmed source code line (only for
        severity 'error').
        """
        a = []  # type: List[str]
        errors = self.render_messages(self.sort_messages(error_info))
        errors = self.remove_duplicates(errors)
        for file, line, column, severity, message, code in errors:
            s = ''
            if file is not None:
                if self.show_column_numbers and line >= 0 and column >= 0:
                    srcloc = '{}:{}:{}'.format(file, line, 1 + column)
                elif line >= 0:
                    srcloc = '{}:{}'.format(file, line)
                else:
                    srcloc = file
                s = '{}: {}: {}'.format(srcloc, severity, message)
            else:
                s = message
            if self.show_error_codes and code and severity != 'note':
                # If note has an error code, it is related to a previous error. Avoid
                # displaying duplicate error codes.
                s = '{}  [{}]'.format(s, code.code)
            a.append(s)
            if self.pretty:
                # Add source code fragment and a location marker.
                if severity == 'error' and source_lines and line > 0:
                    source_line = source_lines[line - 1]
                    source_line_expanded = source_line.expandtabs()
                    if column < 0:
                        # Something went wrong, take first non-empty column.
                        column = len(source_line) - len(source_line.lstrip())

                    # Shifts column after tab expansion
                    column = len(source_line[:column].expandtabs())

                    # Note, currently coloring uses the offset to detect source snippets,
                    # so these offsets should not be arbitrary.
                    a.append(' ' * DEFAULT_SOURCE_OFFSET +
                             source_line_expanded)
                    a.append(' ' * (DEFAULT_SOURCE_OFFSET + column) + '^')
        return a

    def file_messages(self, path: str) -> List[str]:
        """Return a string list of new error messages from a given file.

        Use a form suitable for displaying to the user.
        """
        if path not in self.error_info_map:
            return []
        self.flushed_files.add(path)
        source_lines = None
        if self.pretty:
            assert self.read_source
            source_lines = self.read_source(path)
        return self.format_messages(self.error_info_map[path], source_lines)

    def new_messages(self) -> List[str]:
        """Return a string list of new error messages.

        Use a form suitable for displaying to the user.
        Errors from different files are ordered based on the order in which
        they first generated an error.
        """
        msgs = []
        for path in self.error_info_map.keys():
            if path not in self.flushed_files:
                msgs.extend(self.file_messages(path))
        return msgs

    def targets(self) -> Set[str]:
        """Return a set of all targets that contain errors."""
        # TODO: Make sure that either target is always defined or that not being defined
        #       is okay for fine-grained incremental checking.
        return set(info.target for errs in self.error_info_map.values()
                   for info in errs if info.target)

    def render_messages(self, errors: List[ErrorInfo]) -> List[ErrorTuple]:
        """Translate the messages into a sequence of tuples.

        Each tuple is of form (path, line, col, severity, message, code).
        The rendered sequence includes information about error contexts.
        The path item may be None. If the line item is negative, the
        line number is not defined for the tuple.
        """
        result = []  # type: List[ErrorTuple]
        prev_import_context = []  # type: List[Tuple[str, int]]
        prev_function_or_member = None  # type: Optional[str]
        prev_type = None  # type: Optional[str]

        for e in errors:
            # Report module import context, if different from previous message.
            if not self.show_error_context:
                pass
            elif e.import_ctx != prev_import_context:
                last = len(e.import_ctx) - 1
                i = last
                while i >= 0:
                    path, line = e.import_ctx[i]
                    fmt = '{}:{}: note: In module imported here'
                    if i < last:
                        fmt = '{}:{}: note: ... from here'
                    if i > 0:
                        fmt += ','
                    else:
                        fmt += ':'
                    # Remove prefix to ignore from path (if present) to
                    # simplify path.
                    path = remove_path_prefix(path, self.ignore_prefix)
                    result.append(
                        (None, -1, -1, 'note', fmt.format(path, line), None))
                    i -= 1

            file = self.simplify_path(e.file)

            # Report context within a source file.
            if not self.show_error_context:
                pass
            elif (e.function_or_member != prev_function_or_member
                  or e.type != prev_type):
                if e.function_or_member is None:
                    if e.type is None:
                        result.append(
                            (file, -1, -1, 'note', 'At top level:', None))
                    else:
                        result.append((file, -1, -1, 'note',
                                       'In class "{}":'.format(e.type), None))
                else:
                    if e.type is None:
                        result.append(
                            (file, -1, -1, 'note',
                             'In function "{}":'.format(e.function_or_member),
                             None))
                    else:
                        result.append((file, -1, -1, 'note',
                                       'In member "{}" of class "{}":'.format(
                                           e.function_or_member,
                                           e.type), None))
            elif e.type != prev_type:
                if e.type is None:
                    result.append(
                        (file, -1, -1, 'note', 'At top level:', None))
                else:
                    result.append((file, -1, -1, 'note',
                                   'In class "{}":'.format(e.type), None))

            result.append(
                (file, e.line, e.column, e.severity, e.message, e.code))

            prev_import_context = e.import_ctx
            prev_function_or_member = e.function_or_member
            prev_type = e.type

        return result

    def sort_messages(self, errors: List[ErrorInfo]) -> List[ErrorInfo]:
        """Sort an array of error messages locally by line number.

        I.e., sort a run of consecutive messages with the same
        context by line number, but otherwise retain the general
        ordering of the messages.
        """
        result = []  # type: List[ErrorInfo]
        i = 0
        while i < len(errors):
            i0 = i
            # Find neighbouring errors with the same context and file.
            while (i + 1 < len(errors)
                   and errors[i + 1].import_ctx == errors[i].import_ctx
                   and errors[i + 1].file == errors[i].file):
                i += 1
            i += 1

            # Sort the errors specific to a file according to line number and column.
            a = sorted(errors[i0:i], key=lambda x: (x.line, x.column))
            result.extend(a)
        return result

    def remove_duplicates(self, errors: List[ErrorTuple]) -> List[ErrorTuple]:
        """Remove duplicates from a sorted error list."""
        res = []  # type: List[ErrorTuple]
        i = 0
        while i < len(errors):
            dup = False
            # Use slightly special formatting for member conflicts reporting.
            conflicts_notes = False
            j = i - 1
            while j >= 0 and errors[j][0] == errors[i][0]:
                if errors[j][4].strip() == 'Got:':
                    conflicts_notes = True
                j -= 1
            j = i - 1
            while (j >= 0 and errors[j][0] == errors[i][0]
                   and errors[j][1] == errors[i][1]):
                if (errors[j][3] == errors[i][3] and
                        # Allow duplicate notes in overload conflicts reporting.
                        not ((errors[i][3] == 'note' and errors[i][4].strip()
                              in allowed_duplicates) or
                             (errors[i][4].strip().startswith('def ')
                              and conflicts_notes))
                        and errors[j][4] == errors[i][4]):  # ignore column
                    dup = True
                    break
                j -= 1
            if not dup:
                res.append(errors[i])
            i += 1
        return res
def _analyze_class(ctx: 'mypy.plugin.ClassDefContext',
                   auto_attribs: Optional[bool],
                   kw_only: bool) -> List[Attribute]:
    """Analyze the class body of an attr maker, its parents, and return the Attributes found.

    auto_attribs=True means we'll generate attributes from type annotations also.
    auto_attribs=None means we'll detect which mode to use.
    kw_only=True means that all attributes created here will be keyword only args in __init__.
    """
    own_attrs = OrderedDict()  # type: OrderedDict[str, Attribute]
    if auto_attribs is None:
        auto_attribs = _detect_auto_attribs(ctx)

    # Walk the body looking for assignments and decorators.
    for stmt in ctx.cls.defs.body:
        if isinstance(stmt, AssignmentStmt):
            for attr in _attributes_from_assignment(ctx, stmt, auto_attribs, kw_only):
                # When attrs are defined twice in the same body we want to use the 2nd definition
                # in the 2nd location. So remove it from the OrderedDict.
                # Unless it's auto_attribs in which case we want the 2nd definition in the
                # 1st location.
                if not auto_attribs and attr.name in own_attrs:
                    del own_attrs[attr.name]
                own_attrs[attr.name] = attr
        elif isinstance(stmt, Decorator):
            _cleanup_decorator(stmt, own_attrs)

    for attribute in own_attrs.values():
        # Even though these look like class level assignments we want them to look like
        # instance level assignments.
        if attribute.name in ctx.cls.info.names:
            node = ctx.cls.info.names[attribute.name].node
            if isinstance(node, PlaceholderNode):
                # This node is not ready yet.
                continue
            assert isinstance(node, Var)
            node.is_initialized_in_class = False

    # Traverse the MRO and collect attributes from the parents.
    taken_attr_names = set(own_attrs)
    super_attrs = []
    for super_info in ctx.cls.info.mro[1:-1]:
        if 'attrs' in super_info.metadata:
            # Each class depends on the set of attributes in its attrs ancestors.
            ctx.api.add_plugin_dependency(make_wildcard_trigger(super_info.fullname))

            for data in super_info.metadata['attrs']['attributes']:
                # Only add an attribute if it hasn't been defined before.  This
                # allows for overwriting attribute definitions by subclassing.
                if data['name'] not in taken_attr_names:
                    a = Attribute.deserialize(super_info, data, ctx.api)
                    a.expand_typevar_from_subtype(ctx.cls.info)
                    super_attrs.append(a)
                    taken_attr_names.add(a.name)
    attributes = super_attrs + list(own_attrs.values())

    # Check the init args for correct default-ness.  Note: This has to be done after all the
    # attributes for all classes have been read, because subclasses can override parents.
    last_default = False

    for i, attribute in enumerate(attributes):
        if not attribute.init:
            continue

        if attribute.kw_only:
            # Keyword-only attributes don't care whether they are default or not.
            continue

        # If the issue comes from merging different classes, report it
        # at the class definition point.
        context = attribute.context if i >= len(super_attrs) else ctx.cls

        if not attribute.has_default and last_default:
            ctx.api.fail(
                "Non-default attributes not allowed after default attributes.",
                context)
        last_default |= attribute.has_default

    return attributes
コード例 #8
0
ファイル: ops.py プロジェクト: udifuchs/mypy
 def __init__(self) -> None:
     self.indexes = OrderedDict()  # type: Dict[Value, int]
     self.symtable = OrderedDict(
     )  # type: OrderedDict[SymbolNode, AssignmentTarget]
     self.vars_needing_init = set()  # type: Set[Value]