def make_function(name, node, code, globs, defaults, kw_defaults, closure, annotations, opcode, ctx): """Create a function or closure given the arguments.""" if closure: closure = tuple( c for c in abstract_utils.get_atomic_python_constant(closure)) log.info("closure: %r", closure) if not name: name = abstract_utils.get_atomic_python_constant(code).co_name if not name: name = "<lambda>" val = abstract.InterpreterFunction.make( name, def_opcode=opcode, code=abstract_utils.get_atomic_python_constant(code), f_locals=ctx.vm.frame.f_locals, f_globals=globs, defaults=defaults, kw_defaults=kw_defaults, closure=closure, annotations=annotations, ctx=ctx) var = ctx.program.NewVariable() var.AddBinding(val, code.bindings, node) _check_defaults(node, val, ctx) if val.signature.annotations: ctx.vm.functions_type_params_check.append( (val, ctx.vm.frame.current_opcode)) return var
def make_class(self, node, bases, f_locals): # If BuildClass.call() hits max depth, f_locals will be [unsolvable] # Since we don't support defining NamedTuple subclasses in a nested scope # anyway, we can just return unsolvable here to prevent a crash, and let the # invalid namedtuple error get raised later. if isinstance(f_locals.data[0], abstract.Unsolvable): return node, self.ctx.new_unsolvable(node) f_locals = abstract_utils.get_atomic_python_constant(f_locals) # retrieve __qualname__ to get the name of class name = f_locals["__qualname__"] nameval = abstract_utils.get_atomic_python_constant(name) if "." in nameval: nameval = nameval.rsplit(".", 1)[-1] name = self.ctx.convert.constant_to_var(nameval) # assemble the arguments that are compatible with NamedTupleFuncBuilder.call field_list = [] defaults = [] cls_locals = classgen.get_class_locals( nameval, allow_methods=True, ordering=classgen.Ordering.FIRST_ANNOTATE, ctx=self.ctx) for k, local in cls_locals.items(): assert local.typ if k in f_locals: defaults.append(f_locals[k]) k = self.ctx.convert.constant_to_var(k, node=node) field_list.append(self.ctx.convert.build_tuple(node, (k, local.typ))) anno = self.ctx.convert.build_list(node, field_list) posargs = (name, anno) args = function.Args(posargs=posargs) node, cls_var = self.namedtuple.call(node, None, args, bases) cls_val = abstract_utils.get_atomic_value(cls_var) if not isinstance(cls_val, abstract.Unsolvable): # set __new__.__defaults__ defaults = self.ctx.convert.build_tuple(node, defaults) node, new_attr = self.ctx.attribute_handler.get_attribute( node, cls_val, "__new__") new_attr = abstract_utils.get_atomic_value(new_attr) node = self.ctx.attribute_handler.set_attribute(node, new_attr, "__defaults__", defaults) # set the attribute without overriding special namedtuple attributes node, fields = self.ctx.attribute_handler.get_attribute( node, cls_val, "_fields") fields = abstract_utils.get_atomic_python_constant(fields, tuple) fields = [abstract_utils.get_atomic_python_constant(field, str) for field in fields] for key in f_locals: if key in self._prohibited: self.ctx.errorlog.not_writable(self.ctx.vm.frames, cls_val, key) if key not in abstract_utils.CLASS_LEVEL_IGNORE and key not in fields: node = self.ctx.attribute_handler.set_attribute( node, cls_val, key, f_locals[key]) return node, cls_var
def convert_function_annotations(self, node, raw_annotations): """Convert raw annotations to a {name: annotation} dict.""" if raw_annotations: # {"i": int, "return": str} is stored as (int, str, ("i", "return")) names = abstract_utils.get_atomic_python_constant(raw_annotations[-1]) type_list = raw_annotations[:-1] annotations_list = [] for name, t in zip(names, type_list): name = abstract_utils.get_atomic_python_constant(name) t = self.convert_function_type_annotation(name, t) annotations_list.append((name, t)) return self.convert_annotations_list(node, annotations_list) else: return {}
def build_slice(self, node, start, stop, step=None): const_types = (int, type(None)) try: if start: start = abstract_utils.get_atomic_python_constant(start, const_types) if stop: stop = abstract_utils.get_atomic_python_constant(stop, const_types) if step: step = abstract_utils.get_atomic_python_constant(step, const_types) except abstract_utils.ConversionError: return self.primitive_class_instances[slice].to_variable(node) return abstract.ConcreteValue( slice(start, stop, step), self.primitive_classes[slice], self.ctx).to_variable(node)
def _convert_slots(self, slots_var): """Convert __slots__ from a Variable to a tuple.""" if slots_var is None: return None if len(slots_var.bindings) != 1: # Ambiguous slots return None # Treat "unknown __slots__" and "no __slots__" the same. val = slots_var.data[0] if isinstance(val, mixin.PythonConstant): if isinstance(val.pyval, (list, tuple)): entries = val.pyval else: return None # Happens e.g. __slots__ = {"foo", "bar"}. Not an error. else: return None # Happens e.g. for __slots__ = dir(Foo) try: names = [ abstract_utils.get_atomic_python_constant(v) for v in entries ] except abstract_utils.ConversionError: return None # Happens e.g. for __slots__ = ["x" if b else "y"] # Slot names should be strings. for s in names: if not isinstance(s, str): self.ctx.errorlog.bad_slots( self.ctx.vm.frames, "Invalid __slot__ entry: %r" % str(s)) return None return tuple(self._mangle(s) for s in names)
def call(self, node, _, args, bases=None): try: name_var, field_names, field_types = self._getargs( node, args, functional=bases is None) except abstract_utils.ConversionError: return node, self.ctx.new_unsolvable(node) try: name = abstract_utils.get_atomic_python_constant(name_var) except abstract_utils.ConversionError: return node, self.ctx.new_unsolvable(node) try: field_names = self._validate_and_rename_args(name, field_names, False) except ValueError as e: self.ctx.errorlog.invalid_namedtuple_arg(self.ctx.vm.frames, utils.message(e)) return node, self.ctx.new_unsolvable(node) annots = self.ctx.annotation_utils.convert_annotations_list( node, zip(field_names, field_types)) field_types = [ annots.get(field_name, self.ctx.convert.unsolvable) for field_name in field_names ] node, cls_var = self._build_namedtuple( name, field_names, field_types, node, bases) self.ctx.vm.trace_classdef(cls_var) return node, cls_var
def unpack_iterable(node, var, ctx): """Unpack an iterable.""" elements = [] try: itr = abstract_utils.get_atomic_python_constant( var, collections.abc.Iterable) except abstract_utils.ConversionError: if abstract_utils.is_var_indefinite_iterable(var): elements.append(abstract.Splat(ctx, var).to_variable(node)) elif (all(isinstance(d, abstract.Tuple) for d in var.data) and all(d.tuple_length == var.data[0].tuple_length for d in var.data)): # If we have a set of bindings to tuples all of the same length, treat # them as a definite tuple with union-typed fields. vs = _merge_tuple_bindings(var, ctx) elements.extend(vs) elif (any(isinstance(x, abstract.Unsolvable) for x in var.data) or all(isinstance(x, abstract.Unknown) for x in var.data)): # If we have an unsolvable or unknown we are unpacking as an iterable, # make sure it is treated as a tuple and not a single value. v = ctx.convert.tuple_type.instantiate(node) elements.append(abstract.Splat(ctx, v).to_variable(node)) else: # If we reach here we have tried to unpack something that wasn't # iterable. Wrap it in a splat and let the matcher raise an error. elements.append(abstract.Splat(ctx, var).to_variable(node)) else: for v in itr: # Some iterable constants (e.g., tuples) already contain variables, # whereas others (e.g., strings) need to be wrapped. if isinstance(v, cfg.Variable): elements.append(v) else: elements.append(ctx.convert.constant_to_var(v)) return elements
def _get_constant(self, var, name, arg_type, arg_type_desc=None): try: ret = abstract_utils.get_atomic_python_constant(var, arg_type) except abstract_utils.ConversionError as e: desc = arg_type_desc or f"a constant {arg_type.__name__}" raise TypeVarError(f"{name} must be {desc}") from e return ret
def call(self, node, func, args): args = args.simplify(node, self.ctx) self.match_args(node, args, match_all_views=True) # As long as the types match we do not really care about the actual # class name. But, if we have a string literal value as the name arg, # we will use it. name_arg = args.namedargs.get(self._name_arg_name) or args.posargs[0] try: _ = abstract_utils.get_atomic_python_constant(name_arg, str) except abstract_utils.ConversionError: name_arg = self.ctx.convert.constant_to_var( f"_NewType_Internal_Class_Name_{self.internal_name_counter}_") type_arg = args.namedargs.get(self._type_arg_name) or args.posargs[1] try: type_value = abstract_utils.get_atomic_value(type_arg) except abstract_utils.ConversionError: # We need the type arg to be an atomic value. If not, we just # silently return unsolvable. return node, self.ctx.new_unsolvable(node) value_arg_name = "val" constructor = overlay_utils.make_method( self.ctx, node, name="__init__", params=[Param(value_arg_name, type_value)]) members = abstract.Dict(self.ctx) members.set_str_item(node, "__init__", constructor) return self.ctx.make_class(node, name_arg, (type_arg, ), members.to_variable(node), None)
def _getargs(self, node, args, functional): self.match_args(node, args) sig, = self.signatures callargs = {name: var for name, var, _ in sig.signature.iter_args(args)} # typing.NamedTuple doesn't support rename or verbose name_var = callargs["typename"] fields_var = callargs["fields"] fields = abstract_utils.get_atomic_python_constant(fields_var) if isinstance(fields, str): # Since str matches Sequence, we have to manually check for it. raise function.WrongArgTypes(sig.signature, args, self.ctx, self._fields_param) # The fields is a list of tuples, so we need to deeply unwrap them. fields = [abstract_utils.get_atomic_python_constant(t) for t in fields] # We need the actual string for the field names and the BaseValue # for the field types. names = [] types = [] for field in fields: if isinstance(field, str): # Since str matches Sequence, we have to manually check for it. raise function.WrongArgTypes(sig.signature, args, self.ctx, self._fields_param) if (len(field) != 2 or any(not self._is_str_instance(v) for v in field[0].data)): # Note that we don't need to check field[1] because both 'str' # (forward reference) and 'type' are valid for it. raise function.WrongArgTypes(sig.signature, args, self.ctx, self._fields_param) name, typ = field name_py_constant = abstract_utils.get_atomic_python_constant(name) names.append(name_py_constant) if functional: allowed_type_params = ( self.ctx.annotation_utils.get_callable_type_parameter_names(typ)) annot = self.ctx.annotation_utils.extract_annotation( node, typ, name_py_constant, self.ctx.vm.simple_stack(), allowed_type_params=allowed_type_params) else: # This NamedTuple was constructed with the class syntax. The field # annotations were already processed when added to __annotations__. annot = abstract_utils.get_atomic_value(typ) types.append(annot) return name_var, names, types
def getitem_slot(self, node, name_var): # A typed dict getitem should have a concrete string arg. If we have a var # with multiple bindings just fall back to Any. if not self._check_key(name_var): return node, self.ctx.new_unsolvable(node) name = abstract_utils.get_atomic_python_constant(name_var, str) typ = self.fields[name] ret = [v.instantiate(node) for v in typ.data] return node, self.ctx.join_variables(node, ret)
def get_kwarg(self, args, name, default): if name not in args.namedargs: return default try: return abstract_utils.get_atomic_python_constant( args.namedargs[name]) except abstract_utils.ConversionError: self.ctx.errorlog.not_supported_yet( self.ctx.vm.frames, f"Non-constant argument {name!r}")
def _check_key(self, name_var): """Check that key is in the typed dict.""" try: name = abstract_utils.get_atomic_python_constant(name_var, str) except abstract_utils.ConversionError: self.ctx.errorlog.typed_dict_error(self.ctx.vm.frames, self, name=None) return False return self._check_str_key(name)
def _get_constant_tuple_prefix(value: abstract.Tuple): """Given a tuple, get its longest prefix of constant elements.""" elements = [] for element_var in value.pyval: try: element = abstract_utils.get_atomic_python_constant( element_var, tuple(value.ctx.convert.primitive_classes)) except abstract_utils.ConversionError: return tuple(elements) elements.append(element) return tuple(elements)
def contains_slot(self, node, key_var): if self.could_contain_anything: value = None else: try: str_key = abstract_utils.get_atomic_python_constant(key_var, str) except abstract_utils.ConversionError: value = None else: value = str_key in self.pyval return node, self.ctx.convert.build_bool(node, value)
def starargs_as_tuple(self, node, ctx): try: args = self.starargs and abstract_utils.get_atomic_python_constant( self.starargs, tuple) except abstract_utils.ConversionError: args = None if not args: return args return tuple( var if var.bindings else ctx.convert.empty.to_variable(node) for var in args)
def make_class(self, node, bases, f_locals, total): # If BuildClass.call() hits max depth, f_locals will be [unsolvable] # See comment in NamedTupleClassBuilder.make_class(); equivalent logic # applies here. if isinstance(f_locals.data[0], abstract.Unsolvable): return node, self.ctx.new_unsolvable(node) f_locals = abstract_utils.get_atomic_python_constant(f_locals) # retrieve __qualname__ to get the name of class name_var = f_locals["__qualname__"] cls_name = abstract_utils.get_atomic_python_constant(name_var) if "." in cls_name: cls_name = cls_name.rsplit(".", 1)[-1] if total is None: total = True else: total = abstract_utils.get_atomic_python_constant(total, bool) props = TypedDictProperties(name=cls_name, fields={}, required=set(), total=total) # Collect the key types defined in the current class. cls_locals = classgen.get_class_locals( cls_name, allow_methods=False, ordering=classgen.Ordering.FIRST_ANNOTATE, ctx=self.ctx) for k, local in cls_locals.items(): assert local.typ props.add(k, local.typ, total) # Process base classes and generate the __init__ signature. self._validate_bases(cls_name, bases) self._merge_base_class_fields(bases, props) cls = TypedDictClass(props, self, self.ctx) cls_var = cls.to_variable(node) return node, cls_var
def update_kwargs(self, args): """Update current_args with the Args passed to the decorator.""" self._current_args = self.get_initial_args() for k, v in args.namedargs.items(): if k in self._current_args: try: self._current_args[ k] = abstract_utils.get_atomic_python_constant(v) except abstract_utils.ConversionError: self.ctx.errorlog.not_supported_yet( self.ctx.vm.frames, "Non-constant argument to decorator: %r" % k)
def call(self, node, func, args): if len(args.posargs) == 4: self.match_args(node, args) # May raise FailedFunctionCall. cls, name_var, bases_var, class_dict_var = args.posargs try: bases = list( abstract_utils.get_atomic_python_constant(bases_var)) if not bases: bases = [ self.ctx.convert.object_type.to_variable( self.ctx.root_node) ] node, variable = self.ctx.make_class(node, name_var, bases, class_dict_var, cls) except abstract_utils.ConversionError: pass else: return node, variable elif (args.posargs and self.ctx.callself_stack and args.posargs[-1].data == self.ctx.callself_stack[-1].data): # We're calling type(self) in an __init__ method. A common pattern for # making a class non-instantiable is: # class Foo: # def __init__(self): # if type(self) is Foo: # raise ... # If we were to return 'Foo', pytype would think that this constructor # can never return. The correct return type is something like # TypeVar(bound=Foo), but we can't introduce a type parameter that isn't # bound to a class or function, so we'll go with Any. self.match_args(node, args) # May raise FailedFunctionCall. return node, self.ctx.new_unsolvable(node) elif args.posargs and all(v.full_name == "typing.Protocol" for v in args.posargs[-1].data): # type(Protocol) is a _ProtocolMeta class that inherits from abc.ABCMeta. # Changing the definition of Protocol in typing.pytd to include this # metaclass causes a bunch of weird breakages, so we instead return the # metaclass when type() or __class__ is accessed on Protocol. For # simplicity, we pretend the metaclass is ABCMeta rather than a subclass. self.match_args(node, args) # May raise FailedFunctionCall. abc = self.ctx.vm.import_module("abc", "abc", 0).get_module("ABCMeta") abc.load_lazy_attribute("ABCMeta") return node, abc.members["ABCMeta"].AssignToNewVariable(node) node, raw_ret = super().call(node, func, args) # Removes TypeVars from the return value. ret = self.ctx.program.NewVariable() for b in raw_ret.bindings: value = self.ctx.annotation_utils.deformalize(b.data) ret.AddBinding(value, {b}, node) return node, ret
def pop_slot(self, node, key_var, default_var=None): try: str_key = abstract_utils.get_atomic_python_constant(key_var, str) except abstract_utils.ConversionError: self.could_contain_anything = True if self.could_contain_anything: if default_var: return self.call_pytd(node, "pop", key_var, default_var) else: return self.call_pytd(node, "pop", key_var) if default_var: return node, self.pyval.pop(str_key, default_var) else: try: return node, self.pyval.pop(str_key) except KeyError as e: raise function.DictKeyMissing(str_key) from e
def make_class(node, name_var, bases, class_dict_var, cls_var, new_class_var, is_decorated, class_type, ctx): """Create a class with the name, bases and methods given. Args: node: The current CFG node. name_var: Class name. bases: Base classes. class_dict_var: Members of the class, as a Variable containing an abstract.Dict value. cls_var: The class's metaclass, if any. new_class_var: If not None, make_class() will return new_class_var with the newly constructed class added as a binding. Otherwise, a new variable if returned. is_decorated: True if the class definition has a decorator. class_type: The internal type to build an instance of. Defaults to abstract.InterpreterClass. If set, must be a subclass of abstract.InterpreterClass. ctx: The current context. Returns: A node and an instance of class_type. """ name = abstract_utils.get_atomic_python_constant(name_var) log.info("Declaring class %s", name) try: class_dict = abstract_utils.get_atomic_value(class_dict_var) except abstract_utils.ConversionError: log.error("Error initializing class %r", name) return ctx.convert.create_new_unknown(node) # Handle six.with_metaclass. metacls, bases = _filter_out_metaclasses(bases, ctx) if metacls: cls_var = metacls # Flatten Unions in the bases bases = [_process_base_class(node, base, ctx) for base in bases] # Expand Protocol[T, ...] to Protocol, Generic[T, ...] bases = _expand_generic_protocols(node, bases, ctx) if not bases: # A parent-less class inherits from classobj in Python 2 and from object # in Python 3. base = ctx.convert.object_type bases = [base.to_variable(ctx.root_node)] if (isinstance(class_dict, abstract.Unsolvable) or not isinstance(class_dict, mixin.PythonConstant)): # An unsolvable appears here if the vm hit maximum depth and gave up on # analyzing the class we're now building. Otherwise, if class_dict isn't # a constant, then it's an abstract dictionary, and we don't have enough # information to continue building the class. var = ctx.new_unsolvable(node) else: if cls_var is None: cls_var = class_dict.members.get("__metaclass__") if cls_var: # This way of declaring metaclasses no longer works in Python 3. ctx.errorlog.ignored_metaclass( ctx.vm.frames, name, cls_var.data[0].full_name if cls_var.bindings else "Any") if cls_var and all(v.data.full_name == "builtins.type" for v in cls_var.bindings): cls_var = None # pylint: disable=g-long-ternary cls = abstract_utils.get_atomic_value( cls_var, default=ctx.convert.unsolvable) if cls_var else None if ("__annotations__" not in class_dict.members and name in ctx.vm.annotated_locals): # Stores type comments in an __annotations__ member as if they were # PEP 526-style variable annotations, so that we can type-check # attribute assignments. annotations_dict = ctx.vm.annotated_locals[name] if any(local.typ for local in annotations_dict.values()): annotations_member = abstract.AnnotationsDict( annotations_dict, ctx).to_variable(node) class_dict.members["__annotations__"] = annotations_member class_dict.pyval["__annotations__"] = annotations_member try: if not class_type: class_type = abstract.InterpreterClass elif class_type is not abstract.InterpreterClass: assert issubclass(class_type, abstract.InterpreterClass) val = class_type(name, bases, class_dict.pyval, cls, ctx) _check_final_members(val, class_dict.pyval, ctx) val.is_decorated = is_decorated except mro.MROError as e: ctx.errorlog.mro_error(ctx.vm.frames, name, e.mro_seqs) var = ctx.new_unsolvable(node) except abstract_utils.GenericTypeError as e: ctx.errorlog.invalid_annotation(ctx.vm.frames, e.annot, e.error) var = ctx.new_unsolvable(node) else: if new_class_var: var = new_class_var else: var = ctx.program.NewVariable() var.AddBinding(val, class_dict_var.bindings, node) node = val.call_metaclass_init(node) node = val.call_init_subclass(node) if not val.is_abstract: # Since a class decorator could have made the class inherit from # ABCMeta, we have to mark concrete classes now and check for # abstract methods at postprocessing time. ctx.vm.concrete_classes.append((val, ctx.vm.simple_stack())) ctx.vm.trace_opcode(None, name, var) return node, var
def _getargs(self, node, args): """Extracts the typename, field_names and rename arguments. collections.namedtuple takes potentially 4 arguments, but we only care about three of them. This function checks the argument count and ensures multiple values aren't passed for 'verbose' and 'rename'. Args: node: The current CFG node. Used by match_args. args: A function.Args object Returns: A tuple containing the typename, field_names, defaults, and rename arguments passed to this call to collections.namedtuple. defaults is postprocessed from a sequence of defaults to a sequence of bools describing whether each field has a default (e.g., for collections.namedtuple('X', field_names=['a', 'b'], defaults=[0]) this method will return [False, True] for defaults to indicate that 'a' does not have a default while 'b' does). Raises: function.FailedFunctionCall: The arguments do not match those needed by the function call. See also: abstract.PyTDFunction.match_args(). abstract_utils.ConversionError: One of the args could not be extracted. Typically occurs if typename or one of the field names is in unicode. """ # abstract.PyTDFunction.match_args checks the args for this call. self.match_args(node, args) # namedtuple only has one signature sig, = self.signatures callargs = {name: var for name, var, _ in sig.signature.iter_args(args)} # The name of the namedtuple class is the first arg (a Variable) # We need the actual Variable later, so we'll just return name_var and # extract the name itself later. name_var = callargs["typename"] # The fields are also a Variable, which stores the field names as Variables. # Extract the list itself, we don't need the wrapper. fields_var = callargs["field_names"] fields = abstract_utils.get_atomic_python_constant(fields_var) # namedtuple fields can be given as a single string, e.g. "a, b, c" or as a # list [Variable('a'), Variable('b'), Variable('c')]. # We just want a list of strings. if isinstance(fields, (bytes, str)): fields = utils.native_str(fields) field_names = fields.replace(",", " ").split() else: field_names = [abstract_utils.get_atomic_python_constant(f) for f in fields] field_names = [utils.native_str(f) for f in field_names] if "defaults" in callargs: default_vars = abstract_utils.get_atomic_python_constant( callargs["defaults"]) num_defaults = len(default_vars) defaults = [False] * (len(fields) - num_defaults) + [True] * num_defaults else: defaults = [False] * len(fields) # namedtuple also takes a "verbose" argument, but we don't care about that. # rename will take any problematic field names and give them a new name. # Like the other args, it's stored as a Variable, but we want just a bool. if "rename" in callargs: rename = abstract_utils.get_atomic_python_constant(callargs["rename"]) else: rename = False return name_var, field_names, defaults, rename
def call(self, node, _, args): """Creates a namedtuple class definition. Performs the same argument checking as collections.namedtuple, e.g. making sure field names don't start with _ or digits, making sure no keywords are used for the typename or field names, and so on. Because the methods of the class have to be changed to match the number and names of the fields, we construct pytd.Function and pytd.Constant instances for each member of the class. Finally, the pytd.Class is wrapped in an abstract.PyTDClass. If incorrect arguments are passed, a subclass of function.FailedFunctionCall will be raised. Other cases may raise abstract_utils.ConversionError exceptions, such as when the arguments are in unicode or any of the arguments have multiple bindings, but these are caught and return Any. This also occurs if an argument to namedtuple is invalid, e.g. a keyword is used as a field name and rename is False. Arguments: node: the current CFG node _: the func binding, ignored. args: a function.Args instance Returns: a tuple of the given CFG node and an abstract.PyTDClass instance (wrapped in a Variable) representing the constructed namedtuple class. If an abstract_utils.ConversionError occurs or if field names are invalid, this function returns Unsolvable (in a Variable) instead of a PyTDClass. Raises: function.FailedFunctionCall: Raised by _getargs if any of the arguments do not match the function signature. """ # If we can't extract the arguments, we take the easy way out and return Any try: name_var, field_names, defaults, rename = self._getargs(node, args) except abstract_utils.ConversionError: return node, self.ctx.new_unsolvable(node) # We need the bare name for a few things, so pull that out now. # The same unicode issue can strike here, so again return Any. try: name = abstract_utils.get_atomic_python_constant(name_var) except abstract_utils.ConversionError: return node, self.ctx.new_unsolvable(node) # namedtuple does some checking and optionally renaming of field names, # so we do too. try: field_names = self._validate_and_rename_args(name, field_names, rename) except ValueError as e: self.ctx.errorlog.invalid_namedtuple_arg(self.ctx.vm.frames, utils.message(e)) return node, self.ctx.new_unsolvable(node) name = escape.pack_namedtuple(name, field_names) ast = namedtuple_ast(name, field_names, defaults, options=self.ctx.options) mapping = self._get_known_types_mapping() # A truly well-formed pyi for the namedtuple will have references to the new # namedtuple class itself for all `self` params and as the return type for # methods like __new__, _replace and _make. In order to do that, we need a # ClassType. cls_type = pytd.ClassType(name) mapping[name] = cls_type cls = ast.Lookup(name).Visit(visitors.ReplaceTypes(mapping)) cls_type.cls = cls # Make sure all NamedType nodes have been replaced by ClassType nodes with # filled cls pointers. cls.Visit(visitors.VerifyLookup()) # We can't build the PyTDClass directly, and instead must run it through # convert.constant_to_value first, for caching. instance = self.ctx.convert.constant_to_value(cls, {}, self.ctx.root_node) self.ctx.vm.trace_namedtuple(instance) return node, instance.to_variable(node)
def get_file_mode(sig, args): callargs = {name: var for name, var, _ in sig.signature.iter_args(args)} if "mode" in callargs: return abstract_utils.get_atomic_python_constant(callargs["mode"]) else: return ""
def _check_value(self, node, name_var, value_var): """Check that value has the right type.""" # We have already called check_key so name is in fields name = abstract_utils.get_atomic_python_constant(name_var, str) self._check_str_key_value(node, name, value_var)