Example #1
0
def _load_from_file_system(grammar, path, p_time):
    cache_path = _get_hashed_path(grammar, path)
    try:
        try:
            if p_time > os.path.getmtime(cache_path):
                # Cache is outdated
                return None
        except OSError as e:
            if e.errno == errno.ENOENT:
                # In Python 2 instead of an IOError here we get an OSError.
                raise FileNotFoundError
            else:
                raise

        with open(cache_path, 'rb') as f:
            gc.disable()
            try:
                module_cache_item = pickle.load(f)
            finally:
                gc.enable()
    except FileNotFoundError:
        return None
    else:
        parser_cache[path] = module_cache_item
        debug.dbg('pickle loaded: %s', path)
        return module_cache_item.node
Example #2
0
    def get_return_types(self, check_yields=False):
        func = self.base

        if func.isinstance(LambdaWrapper):
            return self._evaluator.eval_element(self.children[-1])

        if func.listeners:
            # Feed the listeners, with the params.
            for listener in func.listeners:
                listener.execute(self._get_params())
            # If we do have listeners, that means that there's not a regular
            # execution ongoing. In this case Jedi is interested in the
            # inserted params, not in the actual execution of the function.
            return []

        if check_yields:
            types = []
            returns = self.yields
        else:
            returns = self.returns
            types = list(docstrings.find_return_types(self._evaluator, func))

        for r in returns:
            check = flow_analysis.break_check(self._evaluator, self, r)
            if check is flow_analysis.UNREACHABLE:
                debug.dbg('Return unreachable: %s', r)
            else:
                types += self._evaluator.eval_element(r.children[1])
            if check is flow_analysis.REACHABLE:
                debug.dbg('Return reachable: %s', r)
                break
        return types
Example #3
0
def eval_trailer(context, base_contexts, trailer):
    trailer_op, node = trailer.children[:2]
    if node == ')':  # `arglist` is optional.
        node = None

    if trailer_op == '[':
        trailer_op, node, _ = trailer.children

        # TODO It's kind of stupid to cast this from a context set to a set.
        foo = set(base_contexts)
        # special case: PEP0484 typing module, see
        # https://github.com/davidhalter/jedi/issues/663
        result = ContextSet()
        for typ in list(foo):
            if isinstance(typ, (ClassContext, TreeInstance)):
                typing_module_types = pep0484.py__getitem__(context, typ, node)
                if typing_module_types is not None:
                    foo.remove(typ)
                    result |= typing_module_types

        return result | base_contexts.get_item(
            eval_subscript_list(context.evaluator, context, node),
            ContextualizedNode(context, trailer)
        )
    else:
        debug.dbg('eval_trailer: %s in %s', trailer, base_contexts)
        if trailer_op == '.':
            return base_contexts.py__getattribute__(
                name_context=context,
                name_or_str=node
            )
        else:
            assert trailer_op == '(', 'trailer_op is actually %s' % trailer_op
            args = arguments.TreeArguments(context.evaluator, context, node, trailer)
            return base_contexts.execute(args)
Example #4
0
    def find(self, scopes, attribute_lookup):
        """
        :params bool attribute_lookup: Tell to logic if we're accessing the
            attribute or the contents of e.g. a function.
        """
        # TODO rename scopes to names_dicts

        names = self.filter_name(scopes)
        if self._found_predefined_if_name is not None:
            return self._found_predefined_if_name

        types = self._names_to_types(names, attribute_lookup)

        if (
            not names
            and not types
            and not (isinstance(self.name_str, tree.Name) and isinstance(self.name_str.parent.parent, tree.Param))
        ):
            if not isinstance(self.name_str, (str, unicode)):  # TODO Remove?
                if attribute_lookup:
                    analysis.add_attribute_error(self._evaluator, self.scope, self.name_str)
                else:
                    message = "NameError: name '%s' is not defined." % self.name_str
                    analysis.add(self._evaluator, "name-error", self.name_str, message)

        debug.dbg("finder._names_to_types: %s -> %s", names, types)
        return types
Example #5
0
    def get_return_values(self, check_yields=False):
        funcdef = self.tree_node
        if funcdef.type == 'lambdef':
            return self.eval_node(funcdef.children[-1])

        if check_yields:
            context_set = NO_CONTEXTS
            returns = get_yield_exprs(self.evaluator, funcdef)
        else:
            returns = funcdef.iter_return_stmts()
            context_set = docstrings.infer_return_types(self.function_context)
            context_set |= pep0484.infer_return_types(self.function_context)

        for r in returns:
            check = flow_analysis.reachability_check(self, funcdef, r)
            if check is flow_analysis.UNREACHABLE:
                debug.dbg('Return unreachable: %s', r)
            else:
                if check_yields:
                    context_set |= ContextSet.from_sets(
                        lazy_context.infer()
                        for lazy_context in self._get_yield_lazy_context(r)
                    )
                else:
                    try:
                        children = r.children
                    except AttributeError:
                        ctx = compiled.builtin_from_name(self.evaluator, u'None')
                        context_set |= ContextSet(ctx)
                    else:
                        context_set |= self.eval_node(children[1])
            if check is flow_analysis.REACHABLE:
                debug.dbg('Return reachable: %s', r)
                break
        return context_set
Example #6
0
    def _copy_from_old_parser(self, line_offset, until_line_old, until_line_new):
        copied_nodes = [None]

        while until_line_new > self._nodes_stack.parsed_until_line:
            parsed_until_line_old = self._nodes_stack.parsed_until_line - line_offset
            line_stmt = self._get_old_line_stmt(parsed_until_line_old + 1)
            if line_stmt is None:
                # Parse 1 line at least. We don't need more, because we just
                # want to get into a state where the old parser has statements
                # again that can be copied (e.g. not lines within parentheses).
                self._parse(self._nodes_stack.parsed_until_line + 1)
            elif not copied_nodes:
                # We have copied as much as possible (but definitely not too
                # much). Therefore we just parse the rest.
                # We might not reach the end, because there's a statement
                # that is not finished.
                self._parse(until_line_new)
            else:
                p_children = line_stmt.parent.children
                index = p_children.index(line_stmt)

                copied_nodes = self._nodes_stack.copy_nodes(
                    p_children[index:],
                    until_line_old,
                    line_offset
                )
                # Match all the nodes that are in the wanted range.
                if copied_nodes:
                    self._copy_count += 1

                    from_ = copied_nodes[0].get_start_pos_of_prefix()[0] + line_offset
                    to = self._nodes_stack.parsed_until_line
                    self._copied_ranges.append((from_, to))

                    debug.dbg('diff actually copy %s to %s', from_, to)
Example #7
0
def _apply_decorators(context, node):
    """
    Returns the function, that should to be executed in the end.
    This is also the places where the decorators are processed.
    """
    if node.type == 'classdef':
        decoratee_context = ClassContext(
            context.evaluator,
            parent_context=context,
            tree_node=node
        )
    else:
        decoratee_context = FunctionContext.from_context(context, node)
    initial = values = ContextSet(decoratee_context)
    for dec in reversed(node.get_decorators()):
        debug.dbg('decorator: %s %s', dec, values)
        dec_values = context.eval_node(dec.children[1])
        trailer_nodes = dec.children[2:-1]
        if trailer_nodes:
            # Create a trailer and evaluate it.
            trailer = tree.PythonNode('trailer', trailer_nodes)
            trailer.parent = dec
            dec_values = eval_trailer(context, dec_values, trailer)

        if not len(dec_values):
            debug.warning('decorator not found: %s on %s', dec, node)
            return initial

        values = dec_values.execute(arguments.ValuesArguments([values]))
        if not len(values):
            debug.warning('not possible to resolve wrappers found %s', node)
            return initial

        debug.dbg('decorator end %s', values)
    return values
Example #8
0
    def get_return_values(self, check_yields=False):
        funcdef = self.tree_node
        if funcdef.type == 'lambdef':
            return self.evaluator.eval_element(self, funcdef.children[-1])

        if check_yields:
            types = set()
            returns = funcdef.iter_yield_exprs()
        else:
            returns = funcdef.iter_return_stmts()
            types = set(docstrings.infer_return_types(self.function_context))
            types |= set(pep0484.infer_return_types(self.function_context))

        for r in returns:
            check = flow_analysis.reachability_check(self, funcdef, r)
            if check is flow_analysis.UNREACHABLE:
                debug.dbg('Return unreachable: %s', r)
            else:
                if check_yields:
                    types |= set(self._eval_yield(r))
                else:
                    types |= self.eval_node(r.children[1])
            if check is flow_analysis.REACHABLE:
                debug.dbg('Return reachable: %s', r)
                break
        return types
Example #9
0
File: __init__.py Project: ABob/vim
    def _simple_complete(self, path, dot, like):
        if not path and not dot:
            scope = self._parser.user_scope()
            if not scope.is_scope():  # Might be a flow (if/while/etc).
                scope = scope.get_parent_scope()
            names_dicts = global_names_dict_generator(
                self._evaluator,
                self._evaluator.wrap(scope),
                self._pos
            )
            completion_names = []
            for names_dict, pos in names_dicts:
                names = list(chain.from_iterable(names_dict.values()))
                if not names:
                    continue
                completion_names += filter_definition_names(names, self._parser.user_stmt(), pos)
        elif self._get_under_cursor_stmt(path) is None:
            return []
        else:
            scopes = list(self._prepare_goto(path, True))
            completion_names = []
            debug.dbg('possible completion scopes: %s', scopes)
            for s in scopes:
                names = []
                for names_dict in s.names_dicts(search_global=False):
                    names += chain.from_iterable(names_dict.values())

                completion_names += filter_definition_names(names, self._parser.user_stmt())
        return completion_names
Example #10
0
    def filter_name(self, filters):
        """
        Searches names that are defined in a scope (the different
        ``filters``), until a name fits.
        """
        names = []
        if self._context.predefined_names:
            # TODO is this ok? node might not always be a tree.Name
            node = self._name
            while node is not None and not is_scope(node):
                node = node.parent
                if node.type in ("if_stmt", "for_stmt", "comp_for"):
                    try:
                        name_dict = self._context.predefined_names[node]
                        types = name_dict[self._string_name]
                    except KeyError:
                        continue
                    else:
                        self._found_predefined_types = types
                        break

        for filter in filters:
            names = filter.get(self._string_name)
            if names:
                break
        debug.dbg('finder.filter_name "%s" in (%s): %s@%s', self._string_name,
                  self._context, names, self._position)
        return list(names)
Example #11
0
    def __next__(self):
        """ Generate the next tokenize pattern. """
        try:
            typ, tok, start_pos, end_pos, self.parserline = next(self._gen)
            # dedents shouldn't change positions
            if typ != tokenize.DEDENT:
                self.start_pos, self.end_pos = start_pos, end_pos
        except (StopIteration, common.MultiLevelStopIteration):
            # on finish, set end_pos correctly
            s = self.scope
            while s is not None:
                if isinstance(s, pr.Module) \
                                     and not isinstance(s, pr.SubModule):
                    self.module.end_pos = self.end_pos
                    break
                s.end_pos = self.end_pos
                s = s.parent
            raise

        if self.user_position and (self.start_pos[0] == self.user_position[0]
                            or self.user_scope is None
                            and self.start_pos[0] >= self.user_position[0]):
            debug.dbg('user scope found [%s] = %s' %
                    (self.parserline.replace('\n', ''), repr(self.scope)))
            self.user_scope = self.scope
        self.last_token = self.current
        self.current = (typ, tok)
        return self.current
Example #12
0
    def _names_to_types(self, names, attribute_lookup):
        types = set()

        types = unite(name.infer() for name in names)

        debug.dbg('finder._names_to_types: %s -> %s', names, types)
        if not names and isinstance(self._context, AbstractInstanceContext):
            # handling __getattr__ / __getattribute__
            return self._check_getattr(self._context)

        # Add isinstance and other if/assert knowledge.
        if not types and isinstance(self._name, tree.Name) and \
                not isinstance(self._name_context, AbstractInstanceContext):
            flow_scope = self._name
            base_node = self._name_context.tree_node
            if base_node.type == 'comp_for':
                return types
            while True:
                flow_scope = get_parent_scope(flow_scope, include_flows=True)
                n = _check_flow_information(self._name_context, flow_scope,
                                            self._name, self._position)
                if n is not None:
                    return n
                if flow_scope == base_node:
                    break
        return types
Example #13
0
def search_params(evaluator, param):
    """
    A dynamic search for param values. If you try to complete a type:

    >>> def func(foo):
    ...     foo
    >>> func(1)
    >>> func("")

    It is not known what the type ``foo`` without analysing the whole code. You
    have to look for all calls to ``func`` to find out what ``foo`` possibly
    is.
    """
    if not settings.dynamic_params:
        return []

    func = param.get_parent_until(tree.Function)
    debug.dbg('Dynamic param search for %s in %s.', param, str(func.name))
    # Compare the param names.
    names = [n for n in search_function_call(evaluator, func)
             if n.value == param.name.value]
    # Evaluate the ExecutedParams to types.
    result = list(chain.from_iterable(n.parent.eval(evaluator) for n in names))
    debug.dbg('Dynamic param result %s', result)
    return result
Example #14
0
    def _parse(self, source):
        """ :type source: str """
        added_newline = False
        if not source or source[-1] != '\n':
            # To be compatible with Pythons grammar, we need a newline at the
            # end. The parser would handle it, but since the fast parser abuses
            # the normal parser in various ways, we need to care for this
            # ourselves.
            source += '\n'
            added_newline = True

        next_line_offset = line_offset = 0
        start = 0
        nodes = list(self.current_node.all_sub_nodes())
        # Now we can reset the node, because we have all the old nodes.
        self.current_node.reset_node()
        last_end_line = 1

        for code_part in self._split_parts(source):
            next_line_offset += code_part.count('\n')
            # If the last code part parsed isn't equal to the current end_pos,
            # we know that the parser went further (`def` start in a
            # docstring). So just parse the next part.
            if line_offset + 1 == last_end_line:
                self.current_node = self._get_node(code_part, source[start:],
                                                   line_offset, nodes)
            else:
                # Means that some lines where not fully parsed. Parse it now.
                # This is a very rare case. Should only happens with very
                # strange code bits.
                self.number_of_misses += 1
                while last_end_line < next_line_offset + 1:
                    line_offset = last_end_line - 1
                    # We could calculate the src in a more complicated way to
                    # make caching here possible as well. However, this is
                    # complicated and error-prone. Since this is not very often
                    # called - just ignore it.
                    src = ''.join(self._lines[line_offset:])
                    self.current_node = self._get_node(code_part, src,
                                                       line_offset, nodes)
                    last_end_line = self.current_node.parser.module.end_pos[0]

                debug.dbg('While parsing %s, line %s slowed down the fast parser.',
                          self.module_path, line_offset + 1)

            line_offset = next_line_offset
            start += len(code_part)

            last_end_line = self.current_node.parser.module.end_pos[0]

        if added_newline:
            self.current_node.remove_last_newline()

        # Now that the for loop is finished, we still want to close all nodes.
        self.current_node = self.current_node.parent_until_indent()
        self.current_node.close()

        debug.dbg('Parsed %s, with %s parsers in %s splits.'
                  % (self.module_path, self.number_parsers_used,
                     self.number_of_splits))
Example #15
0
    def execute(self, obj, params=(), evaluate_generator=False):
        if obj.isinstance(er.Function):
            obj = obj.get_decorated_func()

        debug.dbg('execute: %s %s', obj, params)
        try:
            return stdlib.execute(self, obj, params)
        except stdlib.NotInStdLib:
            pass

        if isinstance(obj, iterable.GeneratorMethod):
            return obj.execute()
        elif obj.isinstance(compiled.CompiledObject):
            if obj.is_executable_class():
                return [er.Instance(self, obj, params)]
            else:
                return list(obj.execute_function(self, params))
        elif obj.isinstance(er.Class):
            # There maybe executions of executions.
            return [er.Instance(self, obj, params)]
        else:
            stmts = []
            if obj.isinstance(er.Function):
                stmts = er.FunctionExecution(self, obj, params).get_return_types(evaluate_generator)
            else:
                if hasattr(obj, 'execute_subscope_by_name'):
                    try:
                        stmts = obj.execute_subscope_by_name('__call__', params)
                    except KeyError:
                        debug.warning("no __call__ func available %s", obj)
                else:
                    debug.warning("no execution possible %s", obj)

            debug.dbg('execute result: %s in %s', stmts, obj)
            return imports.strip_imports(self, stmts)
Example #16
0
    def _simple_complete(self, path, like):
        try:
            scopes = list(self._prepare_goto(path, True))
        except NotFoundError:
            scopes = []
            scope_generator = evaluate.get_names_of_scope(
                self._parser.user_scope, self._pos)
            completions = []
            for scope, name_list in scope_generator:
                for c in name_list:
                    completions.append((c, scope))
        else:
            completions = []
            debug.dbg('possible scopes', scopes)
            for s in scopes:
                if s.isinstance(er.Function):
                    names = s.get_magic_method_names()
                else:
                    if isinstance(s, imports.ImportPath):
                        under = like + self._module.get_path_after_cursor()
                        if under == 'import':
                            current_line = self._module.get_position_line()
                            if not current_line.endswith('import import'):
                                continue
                        a = s.import_stmt.alias
                        if a and a.start_pos <= self._pos <= a.end_pos:
                            continue
                        names = s.get_defined_names(on_import_stmt=True)
                    else:
                        names = s.get_defined_names()

                for c in names:
                    completions.append((c, s))
        return completions
Example #17
0
    def _prepare_goto(self, goto_path, is_completion=False):
        """
        Base for completions/goto. Basically it returns the resolved scopes
        under cursor.
        """
        debug.dbg('start: %s in %s', goto_path, self._parser.user_scope())

        user_stmt = self._parser.user_stmt_with_whitespace()
        if not user_stmt and len(goto_path.split('\n')) > 1:
            # If the user_stmt is not defined and the goto_path is multi line,
            # something's strange. Most probably the backwards tokenizer
            # matched to much.
            return []

        if isinstance(user_stmt, pr.Import):
            i, _ = helpers.get_on_import_stmt(self._evaluator, self._user_context,
                                              user_stmt, is_completion)
            if i is None:
                return []
            scopes = [i]
        else:
            # just parse one statement, take it and evaluate it
            eval_stmt = self._get_under_cursor_stmt(goto_path)
            if eval_stmt is None:
                return []

            module = self._parser.module()
            names, level, _, _ = helpers.check_error_statements(module, self._pos)
            if names:
                i = imports.get_importer(self._evaluator, names, module, level)
                return i.follow(self._evaluator)

            scopes = self._evaluator.eval_element(eval_stmt)

        return scopes
Example #18
0
    def execute(self, obj, arguments=(), trailer=None):
        if not isinstance(arguments, param.Arguments):
            arguments = param.Arguments(self, arguments, trailer)

        if self.is_analysis:
            arguments.eval_all()

        if obj.isinstance(er.Function):
            obj = obj.get_decorated_func()

        debug.dbg('execute: %s %s', obj, arguments)
        try:
            # Some stdlib functions like super(), namedtuple(), etc. have been
            # hard-coded in Jedi to support them.
            return stdlib.execute(self, obj, arguments)
        except stdlib.NotInStdLib:
            pass

        try:
            func = obj.py__call__
        except AttributeError:
            debug.warning("no execution possible %s", obj)
            return set()
        else:
            types = func(arguments)
            debug.dbg('execute result: %s in %s', types, obj)
            return types
Example #19
0
 def _process(self):
     debug.dbg('Start environment subprocess %s', self._executable)
     parso_path = sys.modules['parso'].__file__
     args = (
         self._executable,
         _MAIN_PATH,
         os.path.dirname(os.path.dirname(parso_path)),
         '.'.join(str(x) for x in sys.version_info[:3]),
     )
     process = GeneralizedPopen(
         args,
         stdin=subprocess.PIPE,
         stdout=subprocess.PIPE,
         stderr=subprocess.PIPE,
         # Use system default buffering on Python 2 to improve performance
         # (this is already the case on Python 3).
         bufsize=-1
     )
     self._stderr_queue = Queue()
     self._stderr_thread = t = Thread(
         target=_enqueue_output,
         args=(process.stderr, self._stderr_queue)
     )
     t.daemon = True
     t.start()
     return process
Example #20
0
    def execute(self, arguments):
        """
        In contrast to py__call__ this function is always available.

        `hasattr(x, py__call__)` can also be checked to see if a context is
        executable.
        """
        if self.evaluator.is_analysis:
            arguments.eval_all()

        debug.dbg('execute: %s %s', self, arguments)
        from jedi.evaluate import stdlib
        try:
            # Some stdlib functions like super(), namedtuple(), etc. have been
            # hard-coded in Jedi to support them.
            return stdlib.execute(self.evaluator, self, arguments)
        except stdlib.NotInStdLib:
            pass

        try:
            func = self.py__call__
        except AttributeError:
            debug.warning("no execution possible %s", self)
            return NO_CONTEXTS
        else:
            context_set = func(arguments)
            debug.dbg('execute result: %s in %s', context_set, self)
            return context_set

        return self.evaluator.execute(self, arguments)
Example #21
0
        def follow_str(ns_path, string):
            debug.dbg('follow_module', ns_path, string)
            path = None
            if ns_path:
                path = ns_path
            elif self.import_stmt.relative_count:
                path = self.get_relative_path()

            global imports_processed
            imports_processed += 1
            importing = None
            if path is not None:
                importing = find_module(string, [path])
            else:
                debug.dbg('search_module', string, self.file_path)
                # Override the sys.path. It works only good that way.
                # Injecting the path directly into `find_module` did not work.
                sys.path, temp = sys_path_mod, sys.path
                try:
                    importing = find_module(string)
                except ImportError:
                    sys.path = temp
                    raise
                sys.path = temp

            return importing
def calculate_children(evaluator, children):
    """
    Calculate a list of children with operators.
    """
    iterator = iter(children)
    types = evaluator.eval_element(next(iterator))
    for operator in iterator:
        right = next(iterator)
        if tree.is_node(operator, 'comp_op'):  # not in / is not
            operator = ' '.join(str(c.value) for c in operator.children)

        # handle lazy evaluation of and/or here.
        if operator in ('and', 'or'):
            left_bools = set([left.py__bool__() for left in types])
            if left_bools == set([True]):
                if operator == 'and':
                    types = evaluator.eval_element(right)
            elif left_bools == set([False]):
                if operator != 'and':
                    types = evaluator.eval_element(right)
            # Otherwise continue, because of uncertainty.
        else:
            types = calculate(evaluator, types, operator,
                              evaluator.eval_element(right))
    debug.dbg('calculate_children types %s', types)
    return types
Example #23
0
    def eval_statement(self, stmt, seek_name=None):
        """
        The starting point of the completion. A statement always owns a call
        list, which are the calls, that a statement does. In case multiple
        names are defined in the statement, `seek_name` returns the result for
        this name.

        :param stmt: A `pr.ExprStmt`.
        """
        debug.dbg('eval_statement %s (%s)', stmt, seek_name)
        types = self.eval_element(stmt.get_rhs())

        if seek_name:
            types = finder.check_tuple_assignments(types, seek_name)

        first_operation = stmt.first_operation()
        if first_operation not in ('=', None) and not isinstance(stmt, er.InstanceElement):  # TODO don't check for this.
            # `=` is always the last character in aug assignments -> -1
            operator = copy.copy(first_operation)
            operator.value = operator.value[:-1]
            name = str(stmt.get_defined_names()[0])
            parent = er.wrap(self, stmt.get_parent_scope())
            left = self.find_types(parent, name, stmt.start_pos, search_global=True)
            if isinstance(stmt.get_parent_until(pr.ForStmt), pr.ForStmt):
                # Iterate through result and add the values, that's possible
                # only in for loops without clutter, because they are
                # predictable.
                for r in types:
                    left = precedence.calculate(self, left, operator, [r])
                types = left
            else:
                types = precedence.calculate(self, left, operator, types)
        debug.dbg('eval_statement result %s', types)
        return types
Example #24
0
    def _prepare_goto(self, goto_path, is_completion=False):
        """
        Base for completions/goto. Basically it returns the resolved scopes
        under cursor.
        """
        debug.dbg('start: %s in %s', goto_path, self._parser.user_scope())

        user_stmt = self._parser.user_stmt_with_whitespace()
        if not user_stmt and len(goto_path.split('\n')) > 1:
            # If the user_stmt is not defined and the goto_path is multi line,
            # something's strange. Most probably the backwards tokenizer
            # matched to much.
            return []

        if isinstance(user_stmt, pr.Import):
            scopes = [helpers.get_on_import_stmt(self._evaluator, self._user_context,
                                                 user_stmt, is_completion)[0]]
        else:
            # just parse one statement, take it and evaluate it
            eval_stmt = self._get_under_cursor_stmt(goto_path)

            if not is_completion:
                # goto_definition returns definitions of its statements if the
                # cursor is on the assignee. By changing the start_pos of our
                # "pseud" statement, the Jedi evaluator can find the assignees.
                if user_stmt is not None:
                    eval_stmt.start_pos = user_stmt.end_pos
            scopes = self._evaluator.eval_statement(eval_stmt)
        return scopes
Example #25
0
    def get_return_types(self):
        func = self.base

        if func.listeners:
            # Feed the listeners, with the params.
            for listener in func.listeners:
                listener.execute(self._get_params())
            # If we do have listeners, that means that there's not a regular
            # execution ongoing. In this case Jedi is interested in the
            # inserted params, not in the actual execution of the function.
            return []

        types = list(docstrings.find_return_types(self._evaluator, func))
        for r in self.returns:
            if isinstance(r, pr.KeywordStatement):
                stmt = r.stmt
            else:
                stmt = r  # Lambdas

            if stmt is None:
                continue

            check = flow_analysis.break_check(self._evaluator, self, r.parent)
            if check is flow_analysis.UNREACHABLE:
                debug.dbg('Return unreachable: %s', r)
            else:
                types += self._evaluator.eval_statement(stmt)
            if check is flow_analysis.REACHABLE:
                debug.dbg('Return reachable: %s', r)
                break
        return types
Example #26
0
def _parse_function_doc(doc):
    """
    Takes a function and returns the params and return value as a tuple.
    This is nothing more than a docstring parser.

    TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None
    TODO docstrings like 'tuple of integers'
    """
    doc = force_unicode(doc)
    # parse round parentheses: def func(a, (b,c))
    try:
        count = 0
        start = doc.index('(')
        for i, s in enumerate(doc[start:]):
            if s == '(':
                count += 1
            elif s == ')':
                count -= 1
            if count == 0:
                end = start + i
                break
        param_str = doc[start + 1:end]
    except (ValueError, UnboundLocalError):
        # ValueError for doc.index
        # UnboundLocalError for undefined end in last line
        debug.dbg('no brackets found - no param')
        end = 0
        param_str = u''
    else:
        # remove square brackets, that show an optional param ( = None)
        def change_options(m):
            args = m.group(1).split(',')
            for i, a in enumerate(args):
                if a and '=' not in a:
                    args[i] += '=None'
            return ','.join(args)

        while True:
            param_str, changes = re.subn(r' ?\[([^\[\]]+)\]',
                                         change_options, param_str)
            if changes == 0:
                break
    param_str = param_str.replace('-', '_')  # see: isinstance.__doc__

    # parse return value
    r = re.search(u'-[>-]* ', doc[end:end + 7])
    if r is None:
        ret = u''
    else:
        index = end + r.end()
        # get result type, which can contain newlines
        pattern = re.compile(r'(,\n|[^\n-])+')
        ret_str = pattern.match(doc, index).group(0).strip()
        # New object -> object()
        ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str)

        ret = docstr_defaults.get(ret_str, ret_str)

    return param_str, ret
Example #27
0
 def _eval_element_not_cached(self, context, element):
     debug.dbg('eval_element %s@%s', element, element.start_pos)
     types = set()
     typ = element.type
     if typ in ('name', 'number', 'string', 'atom'):
         types = self.eval_atom(context, element)
     elif typ == 'keyword':
         # For False/True/None
         if element.value in ('False', 'True', 'None'):
             types.add(compiled.builtin_from_name(self, element.value))
         # else: print e.g. could be evaluated like this in Python 2.7
     elif typ == 'lambdef':
         types = set([er.FunctionContext(self, context, element)])
     elif typ == 'expr_stmt':
         types = self.eval_statement(context, element)
     elif typ in ('power', 'atom_expr'):
         first_child = element.children[0]
         if not (first_child.type == 'keyword' and first_child.value == 'await'):
             types = self.eval_atom(context, first_child)
             for trailer in element.children[1:]:
                 if trailer == '**':  # has a power operation.
                     right = self.eval_element(context, element.children[2])
                     types = set(precedence.calculate(self, context, types, trailer, right))
                     break
                 types = self.eval_trailer(context, types, trailer)
     elif typ in ('testlist_star_expr', 'testlist',):
         # The implicit tuple in statements.
         types = set([iterable.SequenceLiteralContext(self, context, element)])
     elif typ in ('not_test', 'factor'):
         types = self.eval_element(context, element.children[-1])
         for operator in element.children[:-1]:
             types = set(precedence.factor_calculate(self, types, operator))
     elif typ == 'test':
         # `x if foo else y` case.
         types = (self.eval_element(context, element.children[0]) |
                  self.eval_element(context, element.children[-1]))
     elif typ == 'operator':
         # Must be an ellipsis, other operators are not evaluated.
         # In Python 2 ellipsis is coded as three single dot tokens, not
         # as one token 3 dot token.
         assert element.value in ('.', '...')
         types = set([compiled.create(self, Ellipsis)])
     elif typ == 'dotted_name':
         types = self.eval_atom(context, element.children[0])
         for next_name in element.children[2::2]:
             # TODO add search_global=True?
             types = unite(
                 typ.py__getattribute__(next_name, name_context=context)
                 for typ in types
             )
         types = types
     elif typ == 'eval_input':
         types = self._eval_element_not_cached(context, element.children[0])
     elif typ == 'annassign':
         types = pep0484._evaluate_for_annotation(context, element.children[1])
     else:
         types = precedence.calculate_children(self, context, element.children)
     debug.dbg('eval_element result %s', types)
     return types
 def __call__(self, execution):
     debug.dbg('Execution recursions: %s', execution, self.recursion_level,
               self.execution_count, len(self.execution_funcs))
     if self.check_recursion(execution):
         result = set()
     else:
         result = self.func(execution)
     self.pop_execution()
     return result
Example #29
0
    def follow(self, is_goto=False):
        if self._evaluator.recursion_detector.push_stmt(self.import_stmt):
            # check recursion
            return []

        try:
            if self.import_path:
                try:
                    module, rest = self._importer.follow_file_system()
                except ModuleNotFound as e:
                    analysis.add(self._evaluator, 'import-error', e.name_part)
                    return []

                if module is None:
                    return []

                if self.import_stmt.is_nested() and not self.nested_resolve:
                    scopes = [NestedImportModule(module, self.import_stmt)]
                else:
                    scopes = [module]

                star_imports = remove_star_imports(self._evaluator, module)
                if star_imports:
                    scopes = [StarImportModule(scopes[0], star_imports)]

                # goto only accepts `Name`
                if is_goto and not rest:
                    scopes = [s.name for s in scopes]

                # follow the rest of the import (not FS -> classes, functions)
                if len(rest) > 1 or rest and self.is_like_search:
                    scopes = []
                    if ('os', 'path') == self.import_path[:2] \
                            and not self._is_relative_import():
                        # This is a huge exception, we follow a nested import
                        # ``os.path``, because it's a very important one in Python
                        # that is being achieved by messing with ``sys.modules`` in
                        # ``os``.
                        scopes = self._evaluator.follow_path(iter(rest), [module], module)
                elif rest:
                    if is_goto:
                        scopes = list(chain.from_iterable(
                            self._evaluator.find_types(s, rest[0], is_goto=True)
                            for s in scopes))
                    else:
                        scopes = list(chain.from_iterable(
                            self._evaluator.follow_path(iter(rest), [s], s)
                            for s in scopes))
            else:
                scopes = [ImportWrapper.GlobalNamespace]
            debug.dbg('after import: %s', scopes)
            if not scopes:
                analysis.add(self._evaluator, 'import-error',
                             self._importer.import_path[-1])
        finally:
            self._evaluator.recursion_detector.pop_stmt()
        return scopes
Example #30
0
 def __call__(self, execution, evaluate_generator=False):
     debug.dbg('Execution recursions: %s' % execution, self.recursion_level,
                         self.execution_count, len(self.execution_funcs))
     if self.check_recursion(execution, evaluate_generator):
         result = []
     else:
         result = self.func(execution, evaluate_generator)
     self.cleanup()
     return result
Example #31
0
def _infer_node(context, element):
    debug.dbg('infer_node %s@%s in %s', element, element.start_pos, context)
    inference_state = context.inference_state
    typ = element.type
    if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword',
               'fstring'):
        return infer_atom(context, element)
    elif typ == 'lambdef':
        return ValueSet([FunctionValue.from_context(context, element)])
    elif typ == 'expr_stmt':
        return infer_expr_stmt(context, element)
    elif typ in ('power', 'atom_expr'):
        first_child = element.children[0]
        children = element.children[1:]
        had_await = False
        if first_child.type == 'keyword' and first_child.value == 'await':
            had_await = True
            first_child = children.pop(0)

        value_set = context.infer_node(first_child)
        for (i, trailer) in enumerate(children):
            if trailer == '**':  # has a power operation.
                right = context.infer_node(children[i + 1])
                value_set = _infer_comparison(context, value_set, trailer,
                                              right)
                break
            value_set = infer_trailer(context, value_set, trailer)

        if had_await:
            return value_set.py__await__().py__stop_iteration_returns()
        return value_set
    elif typ in (
            'testlist_star_expr',
            'testlist',
    ):
        # The implicit tuple in statements.
        return ValueSet(
            [iterable.SequenceLiteralValue(inference_state, context, element)])
    elif typ in ('not_test', 'factor'):
        value_set = context.infer_node(element.children[-1])
        for operator in element.children[:-1]:
            value_set = infer_factor(value_set, operator)
        return value_set
    elif typ == 'test':
        # `x if foo else y` case.
        return (context.infer_node(element.children[0])
                | context.infer_node(element.children[-1]))
    elif typ == 'operator':
        # Must be an ellipsis, other operators are not inferred.
        # In Python 2 ellipsis is coded as three single dot tokens, not
        # as one token 3 dot token.
        if element.value not in ('.', '...'):
            origin = element.parent
            raise AssertionError("unhandled operator %s in %s " %
                                 (repr(element.value), origin))
        return ValueSet(
            [compiled.builtin_from_name(inference_state, u'Ellipsis')])
    elif typ == 'dotted_name':
        value_set = infer_atom(context, element.children[0])
        for next_name in element.children[2::2]:
            value_set = value_set.py__getattribute__(next_name,
                                                     name_context=context)
        return value_set
    elif typ == 'eval_input':
        return context.infer_node(element.children[0])
    elif typ == 'annassign':
        return annotation.infer_annotation(context, element.children[1]) \
            .execute_annotation()
    elif typ == 'yield_expr':
        if len(element.children) and element.children[1].type == 'yield_arg':
            # Implies that it's a yield from.
            element = element.children[1].children[1]
            generators = context.infer_node(element) \
                .py__getattribute__('__iter__').execute_with_values()
            return generators.py__stop_iteration_returns()

        # Generator.send() is not implemented.
        return NO_VALUES
    elif typ == 'namedexpr_test':
        return context.infer_node(element.children[2])
    else:
        return infer_or_test(context, element)
 def _complete_trailer(self, previous_leaf):
     inferred_context = self._module_context.create_context(previous_leaf)
     values = infer_call_of_leaf(inferred_context, previous_leaf)
     debug.dbg('trailer completion values: %s', values, color='MAGENTA')
     return self._complete_trailer_for_values(values)
Example #33
0
def test_simple():
    jedi.set_debug_function()
    debug.speed('foo')
    debug.dbg('bar')
    debug.warning('baz')
    jedi.set_debug_function(None, False, False, False)
Example #34
0
 def _eval_element_not_cached(self, context, element):
     debug.dbg('eval_element %s@%s', element, element.start_pos)
     types = set()
     typ = element.type
     if typ in ('name', 'number', 'string', 'atom'):
         types = self.eval_atom(context, element)
     elif typ == 'keyword':
         # For False/True/None
         if element.value in ('False', 'True', 'None'):
             types.add(compiled.builtin_from_name(self, element.value))
         # else: print e.g. could be evaluated like this in Python 2.7
     elif typ == 'lambda':
         types = set([er.FunctionContext(self, context, element)])
     elif typ == 'expr_stmt':
         types = self.eval_statement(context, element)
     elif typ in ('power', 'atom_expr'):
         first_child = element.children[0]
         if not (first_child.type == 'keyword'
                 and first_child.value == 'await'):
             types = self.eval_atom(context, first_child)
             for trailer in element.children[1:]:
                 if trailer == '**':  # has a power operation.
                     right = self.eval_element(context, element.children[2])
                     types = set(
                         precedence.calculate(self, context, types, trailer,
                                              right))
                     break
                 types = self.eval_trailer(context, types, trailer)
     elif typ in (
             'testlist_star_expr',
             'testlist',
     ):
         # The implicit tuple in statements.
         types = set(
             [iterable.SequenceLiteralContext(self, context, element)])
     elif typ in ('not_test', 'factor'):
         types = self.eval_element(context, element.children[-1])
         for operator in element.children[:-1]:
             types = set(precedence.factor_calculate(self, types, operator))
     elif typ == 'test':
         # `x if foo else y` case.
         types = (self.eval_element(context, element.children[0])
                  | self.eval_element(context, element.children[-1]))
     elif typ == 'operator':
         # Must be an ellipsis, other operators are not evaluated.
         assert element.value == '...'
         types = set([compiled.create(self, Ellipsis)])
     elif typ == 'dotted_name':
         types = self.eval_atom(context, element.children[0])
         for next_name in element.children[2::2]:
             # TODO add search_global=True?
             types = unite(
                 typ.py__getattribute__(next_name, name_context=context)
                 for typ in types)
         types = types
     elif typ == 'eval_input':
         types = self._eval_element_not_cached(context, element.children[0])
     elif typ == 'annassign':
         types = pep0484._evaluate_for_annotation(context,
                                                  element.children[1])
     else:
         types = precedence.calculate_children(self, context,
                                               element.children)
     debug.dbg('eval_element result %s', types)
     return types
Example #35
0
    def update(self, old_lines, new_lines):
        '''
        The algorithm works as follows:

        Equal:
            - Assure that the start is a newline, otherwise parse until we get
              one.
            - Copy from parsed_until_line + 1 to max(i2 + 1)
            - Make sure that the indentation is correct (e.g. add DEDENT)
            - Add old and change positions
        Insert:
            - Parse from parsed_until_line + 1 to min(j2 + 1), hopefully not
              much more.

        Returns the new module node.
        '''
        debug.speed('diff parser start')
        # Reset the used names cache so they get regenerated.
        self._module._used_names = None

        self._parser_lines_new = new_lines
        self._added_newline = False
        if new_lines[-1] != '':
            # The Python grammar needs a newline at the end of a file, but for
            # everything else we keep working with new_lines here.
            self._parser_lines_new = list(new_lines)
            self._parser_lines_new[-1] += '\n'
            self._parser_lines_new.append('')
            self._added_newline = True

        self._reset()

        line_length = len(new_lines)
        sm = difflib.SequenceMatcher(None, old_lines, self._parser_lines_new)
        opcodes = sm.get_opcodes()
        debug.speed('diff parser calculated')
        debug.dbg('diff: line_lengths old: %s, new: %s' %
                  (len(old_lines), line_length))

        for operation, i1, i2, j1, j2 in opcodes:
            debug.dbg('diff %s old[%s:%s] new[%s:%s]', operation, i1 + 1, i2,
                      j1 + 1, j2)

            if j2 == line_length + int(self._added_newline):
                # The empty part after the last newline is not relevant.
                j2 -= 1

            if operation == 'equal':
                line_offset = j1 - i1
                self._copy_from_old_parser(line_offset, i2, j2)
            elif operation == 'replace':
                self._parse(until_line=j2)
            elif operation == 'insert':
                self._parse(until_line=j2)
            else:
                assert operation == 'delete'

        # With this action all change will finally be applied and we have a
        # changed module.
        self._nodes_stack.close()

        if self._added_newline:
            _remove_last_newline(self._module)

        # Good for debugging.
        if debug.debug_function:
            self._enabled_debugging(old_lines, new_lines)
        last_pos = self._module.end_pos[0]
        if last_pos != line_length:
            current_lines = splitlines(self._module.get_code(), keepends=True)
            diff = difflib.unified_diff(current_lines, new_lines)
            raise Exception(
                "There's an issue (%s != %s) with the diff parser. Please report:\n%s"
                % (last_pos, line_length, ''.join(diff)))

        debug.speed('diff parser end')
        return self._module
Example #36
0
def import_module(evaluator, import_names, parent_module_context, sys_path):
    """
    This method is very similar to importlib's `_gcd_import`.
    """
    if import_names[0] in settings.auto_import_modules:
        module = _load_builtin_module(evaluator, import_names, sys_path)
        if module is None:
            return NO_CONTEXTS
        return ContextSet([module])

    module_name = '.'.join(import_names)
    if parent_module_context is None:
        # Override the sys.path. It works only good that way.
        # Injecting the path directly into `find_module` did not work.
        file_io_or_ns, is_pkg = evaluator.compiled_subprocess.get_module_info(
            string=import_names[-1],
            full_name=module_name,
            sys_path=sys_path,
            is_global_search=True,
        )
        if is_pkg is None:
            return NO_CONTEXTS
    else:
        try:
            method = parent_module_context.py__path__
        except AttributeError:
            # The module is not a package.
            return NO_CONTEXTS
        else:
            paths = method()
            for path in paths:
                # At the moment we are only using one path. So this is
                # not important to be correct.
                if not isinstance(path, list):
                    path = [path]
                file_io_or_ns, is_pkg = evaluator.compiled_subprocess.get_module_info(
                    string=import_names[-1],
                    path=path,
                    full_name=module_name,
                    is_global_search=False,
                )
                if is_pkg is not None:
                    break
            else:
                return NO_CONTEXTS

    if isinstance(file_io_or_ns, ImplicitNSInfo):
        from jedi.evaluate.context.namespace import ImplicitNamespaceContext
        module = ImplicitNamespaceContext(
            evaluator,
            fullname=file_io_or_ns.name,
            paths=file_io_or_ns.paths,
        )
    elif file_io_or_ns is None:
        module = _load_builtin_module(evaluator, import_names, sys_path)
        if module is None:
            return NO_CONTEXTS
    else:
        module = _load_python_module(
            evaluator, file_io_or_ns, sys_path,
            import_names=import_names,
            is_package=is_pkg,
        )

    if parent_module_context is None:
        debug.dbg('global search_module %s: %s', import_names[-1], module)
    else:
        debug.dbg('search_module %s in paths %s: %s', module_name, paths, module)
    return ContextSet([module])
Example #37
0
    def _parse(self, source):
        """ :type source: str """
        added_newline = False
        if not source or source[-1] != '\n':
            # To be compatible with Pythons grammar, we need a newline at the
            # end. The parser would handle it, but since the fast parser abuses
            # the normal parser in various ways, we need to care for this
            # ourselves.
            source += '\n'
            added_newline = True

        next_line_offset = line_offset = 0
        start = 0
        nodes = list(self.current_node.all_sub_nodes())
        # Now we can reset the node, because we have all the old nodes.
        self.current_node.reset_node()
        last_end_line = 1

        for code_part in self._split_parts(source):
            next_line_offset += code_part.count('\n')
            # If the last code part parsed isn't equal to the current end_pos,
            # we know that the parser went further (`def` start in a
            # docstring). So just parse the next part.
            if line_offset + 1 == last_end_line:
                self.current_node = self._get_node(code_part, source[start:],
                                                   line_offset, nodes)
            else:
                # Means that some lines where not fully parsed. Parse it now.
                # This is a very rare case. Should only happens with very
                # strange code bits.
                self.number_of_misses += 1
                while last_end_line < next_line_offset + 1:
                    line_offset = last_end_line - 1
                    # We could calculate the src in a more complicated way to
                    # make caching here possible as well. However, this is
                    # complicated and error-prone. Since this is not very often
                    # called - just ignore it.
                    src = ''.join(self._lines[line_offset:])
                    self.current_node = self._get_node(code_part, src,
                                                       line_offset, nodes)
                    last_end_line = self.current_node.parser.module.end_pos[0]

                debug.dbg(
                    'While parsing %s, line %s slowed down the fast parser.',
                    self.module_path, line_offset + 1)

            line_offset = next_line_offset
            start += len(code_part)

            last_end_line = self.current_node.parser.module.end_pos[0]

        if added_newline:
            self.current_node.remove_last_newline()

        # Now that the for loop is finished, we still want to close all nodes.
        self.current_node = self.current_node.parent_until_indent()
        self.current_node.close()

        debug.dbg('Parsed %s, with %s parsers in %s splits.' %
                  (self.module_path, self.number_parsers_used,
                   self.number_of_splits))
Example #38
0
 def goto(self, name_or_str, name_context=None, analysis_errors=True):
     from jedi.inference import finder
     filters = self._get_value_filters(name_or_str)
     names = finder.filter_name(filters, name_or_str)
     debug.dbg('context.goto %s in (%s): %s', name_or_str, self, names)
     return names
Example #39
0
 def find(self, scopes, resolve_decorator=True):
     names = self.filter_name(scopes)
     types = self._names_to_types(names, resolve_decorator)
     debug.dbg('finder._names_to_types: %s, old: %s', names, types)
     return self._resolve_descriptors(types)
Example #40
0
def eval_node(context, element):
    debug.dbg('eval_node %s@%s', element, element.start_pos)
    evaluator = context.evaluator
    typ = element.type
    if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword'):
        return eval_atom(context, element)
    elif typ == 'lambdef':
        return ContextSet(FunctionContext(evaluator, context, element))
    elif typ == 'expr_stmt':
        return eval_expr_stmt(context, element)
    elif typ in ('power', 'atom_expr'):
        first_child = element.children[0]
        children = element.children[1:]
        had_await = False
        if first_child.type == 'keyword' and first_child.value == 'await':
            had_await = True
            first_child = children.pop(0)

        context_set = eval_atom(context, first_child)
        for trailer in children:
            if trailer == '**':  # has a power operation.
                right = context.eval_node(children[1])
                context_set = _eval_comparison(evaluator, context, context_set,
                                               trailer, right)
                break
            context_set = eval_trailer(context, context_set, trailer)

        if had_await:
            await_context_set = context_set.py__getattribute__(u"__await__")
            if not await_context_set:
                debug.warning('Tried to run py__await__ on context %s',
                              context)
            context_set = ContextSet()
            return _py__stop_iteration_returns(
                await_context_set.execute_evaluated())
        return context_set
    elif typ in (
            'testlist_star_expr',
            'testlist',
    ):
        # The implicit tuple in statements.
        return ContextSet(
            iterable.SequenceLiteralContext(evaluator, context, element))
    elif typ in ('not_test', 'factor'):
        context_set = context.eval_node(element.children[-1])
        for operator in element.children[:-1]:
            context_set = eval_factor(context_set, operator)
        return context_set
    elif typ == 'test':
        # `x if foo else y` case.
        return (context.eval_node(element.children[0])
                | context.eval_node(element.children[-1]))
    elif typ == 'operator':
        # Must be an ellipsis, other operators are not evaluated.
        # In Python 2 ellipsis is coded as three single dot tokens, not
        # as one token 3 dot token.
        if element.value not in ('.', '...'):
            origin = element.parent
            raise AssertionError("unhandled operator %s in %s " %
                                 (repr(element.value), origin))
        return ContextSet(compiled.builtin_from_name(evaluator, u'Ellipsis'))
    elif typ == 'dotted_name':
        context_set = eval_atom(context, element.children[0])
        for next_name in element.children[2::2]:
            # TODO add search_global=True?
            context_set = context_set.py__getattribute__(next_name,
                                                         name_context=context)
        return context_set
    elif typ == 'eval_input':
        return eval_node(context, element.children[0])
    elif typ == 'annassign':
        return pep0484._evaluate_for_annotation(context, element.children[1])
    elif typ == 'yield_expr':
        if len(element.children) and element.children[1].type == 'yield_arg':
            # Implies that it's a yield from.
            element = element.children[1].children[1]
            generators = context.eval_node(element)
            return _py__stop_iteration_returns(generators)

        # Generator.send() is not implemented.
        return NO_CONTEXTS
    else:
        return eval_or_test(context, element)
Example #41
0
 def get_metaclass_filters(self, metaclass):
     debug.dbg('Unprocessed metaclass %s', metaclass)
     return []
Example #42
0
    def eval_element(self, context, element):
        if isinstance(context, CompForContext):
            return eval_node(context, element)

        if_stmt = element
        while if_stmt is not None:
            if_stmt = if_stmt.parent
            if if_stmt.type in ('if_stmt', 'for_stmt'):
                break
            if parser_utils.is_scope(if_stmt):
                if_stmt = None
                break
        predefined_if_name_dict = context.predefined_names.get(if_stmt)
        # TODO there's a lot of issues with this one. We actually should do
        # this in a different way. Caching should only be active in certain
        # cases and this all sucks.
        if predefined_if_name_dict is None and if_stmt \
                and if_stmt.type == 'if_stmt' and self.is_analysis:
            if_stmt_test = if_stmt.children[1]
            name_dicts = [{}]
            # If we already did a check, we don't want to do it again -> If
            # context.predefined_names is filled, we stop.
            # We don't want to check the if stmt itself, it's just about
            # the content.
            if element.start_pos > if_stmt_test.end_pos:
                # Now we need to check if the names in the if_stmt match the
                # names in the suite.
                if_names = helpers.get_names_of_node(if_stmt_test)
                element_names = helpers.get_names_of_node(element)
                str_element_names = [e.value for e in element_names]
                if any(i.value in str_element_names for i in if_names):
                    for if_name in if_names:
                        definitions = self.goto_definitions(context, if_name)
                        # Every name that has multiple different definitions
                        # causes the complexity to rise. The complexity should
                        # never fall below 1.
                        if len(definitions) > 1:
                            if len(name_dicts) * len(definitions) > 16:
                                debug.dbg(
                                    'Too many options for if branch evaluation %s.',
                                    if_stmt)
                                # There's only a certain amount of branches
                                # Jedi can evaluate, otherwise it will take to
                                # long.
                                name_dicts = [{}]
                                break

                            original_name_dicts = list(name_dicts)
                            name_dicts = []
                            for definition in definitions:
                                new_name_dicts = list(original_name_dicts)
                                for i, name_dict in enumerate(new_name_dicts):
                                    new_name_dicts[i] = name_dict.copy()
                                    new_name_dicts[i][
                                        if_name.value] = ContextSet(definition)

                                name_dicts += new_name_dicts
                        else:
                            for name_dict in name_dicts:
                                name_dict[if_name.value] = definitions
            if len(name_dicts) > 1:
                result = ContextSet()
                for name_dict in name_dicts:
                    with helpers.predefine_names(context, if_stmt, name_dict):
                        result |= eval_node(context, element)
                return result
            else:
                return self._eval_element_if_evaluated(context, element)
        else:
            if predefined_if_name_dict:
                return eval_node(context, element)
            else:
                return self._eval_element_if_evaluated(context, element)
Example #43
0
def eval_node(context, element):
    debug.dbg('eval_element %s@%s', element, element.start_pos)
    evaluator = context.evaluator
    typ = element.type
    if typ in ('name', 'number', 'string', 'atom'):
        return eval_atom(context, element)
    elif typ == 'keyword':
        # For False/True/None
        if element.value in ('False', 'True', 'None'):
            return ContextSet(
                compiled.builtin_from_name(evaluator, element.value))
        # else: print e.g. could be evaluated like this in Python 2.7
        return NO_CONTEXTS
    elif typ == 'lambdef':
        return ContextSet(FunctionContext(evaluator, context, element))
    elif typ == 'expr_stmt':
        return eval_expr_stmt(context, element)
    elif typ in ('power', 'atom_expr'):
        first_child = element.children[0]
        if not (first_child.type == 'keyword'
                and first_child.value == 'await'):
            context_set = eval_atom(context, first_child)
            for trailer in element.children[1:]:
                if trailer == '**':  # has a power operation.
                    right = evaluator.eval_element(context,
                                                   element.children[2])
                    context_set = _eval_comparison(evaluator, context,
                                                   context_set, trailer, right)
                    break
                context_set = eval_trailer(context, context_set, trailer)
            return context_set
        return NO_CONTEXTS
    elif typ in (
            'testlist_star_expr',
            'testlist',
    ):
        # The implicit tuple in statements.
        return ContextSet(
            iterable.SequenceLiteralContext(evaluator, context, element))
    elif typ in ('not_test', 'factor'):
        context_set = context.eval_node(element.children[-1])
        for operator in element.children[:-1]:
            context_set = eval_factor(context_set, operator)
        return context_set
    elif typ == 'test':
        # `x if foo else y` case.
        return (context.eval_node(element.children[0])
                | context.eval_node(element.children[-1]))
    elif typ == 'operator':
        # Must be an ellipsis, other operators are not evaluated.
        # In Python 2 ellipsis is coded as three single dot tokens, not
        # as one token 3 dot token.
        assert element.value in ('.',
                                 '...'), 'value is actually ' + element.value
        return ContextSet(compiled.create(evaluator, Ellipsis))
    elif typ == 'dotted_name':
        context_set = eval_atom(context, element.children[0])
        for next_name in element.children[2::2]:
            # TODO add search_global=True?
            context_set = context_set.py__getattribute__(next_name,
                                                         name_context=context)
        return context_set
    elif typ == 'eval_input':
        return eval_node(context, element.children[0])
    elif typ == 'annassign':
        return pep0484._evaluate_for_annotation(context, element.children[1])
    else:
        return eval_or_test(context, element)
Example #44
0
def _infer_comparison_part(inference_state, context, left, operator, right):
    l_is_num = is_number(left)
    r_is_num = is_number(right)
    if isinstance(operator, str):
        str_operator = operator
    else:
        str_operator = str(operator.value)

    if str_operator == '*':
        # for iterables, ignore * operations
        if isinstance(left, iterable.Sequence) or is_string(left):
            return ValueSet([left])
        elif isinstance(right, iterable.Sequence) or is_string(right):
            return ValueSet([right])
    elif str_operator == '+':
        if l_is_num and r_is_num or is_string(left) and is_string(right):
            return left.execute_operation(right, str_operator)
        elif _is_list(left) and _is_list(right) or _is_tuple(
                left) and _is_tuple(right):
            return ValueSet(
                [iterable.MergedArray(inference_state, (left, right))])
    elif str_operator == '-':
        if l_is_num and r_is_num:
            return left.execute_operation(right, str_operator)
    elif str_operator == '%':
        # With strings and numbers the left type typically remains. Except for
        # `int() % float()`.
        return ValueSet([left])
    elif str_operator in COMPARISON_OPERATORS:
        if left.is_compiled() and right.is_compiled():
            # Possible, because the return is not an option. Just compare.
            result = left.execute_operation(right, str_operator)
            if result:
                return result
        else:
            if str_operator in ('is', '!=', '==', 'is not'):
                operation = COMPARISON_OPERATORS[str_operator]
                bool_ = operation(left, right)
                # Only if == returns True or != returns False, we can continue.
                # There's no guarantee that they are not equal. This can help
                # in some cases, but does not cover everything.
                if (str_operator in ('is', '==')) == bool_:
                    return ValueSet([_bool_to_value(inference_state, bool_)])

            if isinstance(left, VersionInfo):
                version_info = _get_tuple_ints(right)
                if version_info is not None:
                    bool_result = compiled.access.COMPARISON_OPERATORS[
                        operator](inference_state.environment.version_info,
                                  tuple(version_info))
                    return ValueSet(
                        [_bool_to_value(inference_state, bool_result)])

        return ValueSet([
            _bool_to_value(inference_state, True),
            _bool_to_value(inference_state, False)
        ])
    elif str_operator in ('in', 'not in'):
        return NO_VALUES

    def check(obj):
        """Checks if a Jedi object is either a float or an int."""
        return isinstance(obj, TreeInstance) and \
            obj.name.string_name in ('int', 'float')

    # Static analysis, one is a number, the other one is not.
    if str_operator in ('+', '-') and l_is_num != r_is_num \
            and not (check(left) or check(right)):
        message = "TypeError: unsupported operand type(s) for +: %s and %s"
        analysis.add(context, 'type-error-operation', operator,
                     message % (left, right))

    if left.is_class() or right.is_class():
        return NO_VALUES

    method_name = operator_to_magic_method[str_operator]
    magic_methods = left.py__getattribute__(method_name)
    if magic_methods:
        result = magic_methods.execute_with_values(right)
        if result:
            return result

    if not magic_methods:
        reverse_method_name = reverse_operator_to_magic_method[str_operator]
        magic_methods = right.py__getattribute__(reverse_method_name)

        result = magic_methods.execute_with_values(left)
        if result:
            return result

    result = ValueSet([left, right])
    debug.dbg('Used operator %s resulting in %s', operator, result)
    return result
Example #45
0
    def _search_func(self, string, complete=False, all_scopes=False):
        # Using a Script is they easiest way to get an empty module context.
        from jedi import Script
        s = Script('', project=self)
        inference_state = s._inference_state
        empty_module_context = s._get_module_context()

        debug.dbg('Search for string %s, complete=%s', string, complete)
        wanted_type, wanted_names = split_search_string(string)
        name = wanted_names[0]
        stub_folder_name = name + '-stubs'

        ios = recurse_find_python_folders_and_files(FolderIO(str(self._path)))
        file_ios = []

        # 1. Search for modules in the current project
        for folder_io, file_io in ios:
            if file_io is None:
                file_name = folder_io.get_base_name()
                if file_name == name or file_name == stub_folder_name:
                    f = folder_io.get_file_io('__init__.py')
                    try:
                        m = load_module_from_path(inference_state,
                                                  f).as_context()
                    except FileNotFoundError:
                        f = folder_io.get_file_io('__init__.pyi')
                        try:
                            m = load_module_from_path(inference_state,
                                                      f).as_context()
                        except FileNotFoundError:
                            m = load_namespace_from_path(
                                inference_state, folder_io).as_context()
                else:
                    continue
            else:
                file_ios.append(file_io)
                if Path(file_io.path).name in (name + '.py', name + '.pyi'):
                    m = load_module_from_path(inference_state,
                                              file_io).as_context()
                else:
                    continue

            debug.dbg('Search of a specific module %s', m)
            yield from search_in_module(
                inference_state,
                m,
                names=[m.name],
                wanted_type=wanted_type,
                wanted_names=wanted_names,
                complete=complete,
                convert=True,
                ignore_imports=True,
            )

        # 2. Search for identifiers in the project.
        for module_context in search_in_file_ios(inference_state, file_ios,
                                                 name):
            names = get_module_names(module_context.tree_node,
                                     all_scopes=all_scopes)
            names = [module_context.create_name(n) for n in names]
            names = _remove_imports(names)
            yield from search_in_module(
                inference_state,
                module_context,
                names=names,
                wanted_type=wanted_type,
                wanted_names=wanted_names,
                complete=complete,
                ignore_imports=True,
            )

        # 3. Search for modules on sys.path
        sys_path = [
            p for p in self._get_sys_path(inference_state)
            # Exclude folders that are handled by recursing of the Python
            # folders.
            if not p.startswith(str(self._path))
        ]
        names = list(
            iter_module_names(inference_state, empty_module_context, sys_path))
        yield from search_in_module(
            inference_state,
            empty_module_context,
            names=names,
            wanted_type=wanted_type,
            wanted_names=wanted_names,
            complete=complete,
            convert=True,
        )
Example #46
0
    def get_return_types(self, evaluate_generator=False):
        """ Get the return types of a function. """
        base = self.decorated
        stmts = []
        if base.parent == builtin.Builtin.scope \
                and not isinstance(base, (Generator, Array)):
            func_name = str(base.name)

            # some implementations of builtins:
            if func_name == 'getattr':
                # follow the first param
                objects = self.follow_var_arg(0)
                names = self.follow_var_arg(1)
                for obj in objects:
                    if not isinstance(obj, (Instance, Class, pr.Module)):
                        debug.warning('getattr called without instance')
                        continue

                    for arr_name in names:
                        if not isinstance(arr_name, Instance):
                            debug.warning('getattr called without str')
                            continue
                        if len(arr_name.var_args) != 1:
                            debug.warning('jedi getattr is too simple')
                        key = arr_name.var_args[0]
                        stmts += evaluate.follow_path(iter([key]), obj, base)
                return stmts
            elif func_name == 'type':
                # otherwise it would be a metaclass
                if len(self.var_args) == 1:
                    objects = self.follow_var_arg(0)
                    return [o.base for o in objects if isinstance(o, Instance)]
            elif func_name == 'super':
                # TODO make this able to detect multiple inheritance supers
                accept = (pr.Function, )
                func = self.var_args.get_parent_until(accept)
                if func.isinstance(*accept):
                    cls = func.get_parent_until(accept + (pr.Class, ),
                                                include_current=False)
                    if isinstance(cls, pr.Class):
                        cls = Class(cls)
                        su = cls.get_super_classes()
                        if su:
                            return [Instance(su[0])]
                return []

        if base.isinstance(Class):
            # There maybe executions of executions.
            return [Instance(base, self.var_args)]
        elif isinstance(base, Generator):
            return base.iter_content()
        else:
            try:
                base.returns  # Test if it is a function
            except AttributeError:
                if hasattr(base, 'execute_subscope_by_name'):
                    try:
                        stmts = base.execute_subscope_by_name(
                            '__call__', self.var_args)
                    except KeyError:
                        debug.warning("no __call__ func available", base)
                else:
                    debug.warning("no execution possible", base)
            else:
                stmts = self._get_function_returns(base, evaluate_generator)

        debug.dbg('exec result: %s in %s' % (stmts, self))

        return imports.strip_imports(stmts)
Example #47
0
 def execute(value, arguments):
     debug.dbg('execute: %s %s', value, arguments)
     with debug.increase_indent_cm():
         value_set = value.py__call__(arguments=arguments)
     debug.dbg('execute result: %s in %s', value_set, value)
     return value_set
Example #48
0
    def eval_element(self, element):
        if isinstance(element, iterable.AlreadyEvaluated):
            return set(element)
        elif isinstance(element, iterable.MergedNodes):
            return iterable.unite(self.eval_element(e) for e in element)

        if_stmt = element.get_parent_until(
            (tree.IfStmt, tree.ForStmt, tree.IsScope))
        predefined_if_name_dict = self.predefined_if_name_dict_dict.get(
            if_stmt)
        if predefined_if_name_dict is None and isinstance(
                if_stmt, tree.IfStmt):
            if_stmt_test = if_stmt.children[1]
            name_dicts = [{}]
            # If we already did a check, we don't want to do it again -> If
            # predefined_if_name_dict_dict is filled, we stop.
            # We don't want to check the if stmt itself, it's just about
            # the content.
            if element.start_pos > if_stmt_test.end_pos:
                # Now we need to check if the names in the if_stmt match the
                # names in the suite.
                if_names = helpers.get_names_of_node(if_stmt_test)
                element_names = helpers.get_names_of_node(element)
                str_element_names = [str(e) for e in element_names]
                if any(str(i) in str_element_names for i in if_names):
                    for if_name in if_names:
                        definitions = self.goto_definitions(if_name)
                        # Every name that has multiple different definitions
                        # causes the complexity to rise. The complexity should
                        # never fall below 1.
                        if len(definitions) > 1:
                            if len(name_dicts) * len(definitions) > 16:
                                debug.dbg(
                                    'Too many options for if branch evaluation %s.',
                                    if_stmt)
                                # There's only a certain amount of branches
                                # Jedi can evaluate, otherwise it will take to
                                # long.
                                name_dicts = [{}]
                                break

                            original_name_dicts = list(name_dicts)
                            name_dicts = []
                            for definition in definitions:
                                new_name_dicts = list(original_name_dicts)
                                for i, name_dict in enumerate(new_name_dicts):
                                    new_name_dicts[i] = name_dict.copy()
                                    new_name_dicts[i][str(if_name)] = [
                                        definition
                                    ]

                                name_dicts += new_name_dicts
                        else:
                            for name_dict in name_dicts:
                                name_dict[str(if_name)] = definitions
            if len(name_dicts) > 1:
                result = set()
                for name_dict in name_dicts:
                    self.predefined_if_name_dict_dict[if_stmt] = name_dict
                    try:
                        result |= self._eval_element_not_cached(element)
                    finally:
                        del self.predefined_if_name_dict_dict[if_stmt]
                return result
            else:
                return self._eval_element_if_evaluated(element)
                return self._eval_element_cached(element)
        else:
            if predefined_if_name_dict:
                return self._eval_element_not_cached(element)
            else:
                return self._eval_element_if_evaluated(element)
                return self._eval_element_cached(element)
def _execute(context, arguments):
    debug.dbg('execute: %s %s', context, arguments)
    with debug.increase_indent_cm():
        context_set = context.py__call__(arguments=arguments)
    debug.dbg('execute result: %s in %s', context_set, context)
    return context_set
Example #50
0
def _check_array_additions(evaluator, compare_array, module, is_list):
    """
    Checks if a `Array` has "add" (append, insert, extend) statements:

    >>> a = [""]
    >>> a.append(1)
    """
    debug.dbg('Dynamic array search for %s' % compare_array, color='MAGENTA')
    if not settings.dynamic_array_additions or isinstance(
            module, compiled.CompiledObject):
        debug.dbg('Dynamic array search aborted.', color='MAGENTA')
        return set()

    def check_additions(arglist, add_name):
        params = list(param.Arguments(evaluator, arglist).unpack())
        result = set()
        if add_name in ['insert']:
            params = params[1:]
        if add_name in ['append', 'add', 'insert']:
            for key, nodes in params:
                result |= unite(evaluator.eval_element(node) for node in nodes)
        elif add_name in ['extend', 'update']:
            for key, nodes in params:
                for node in nodes:
                    types = evaluator.eval_element(node)
                    result |= py__iter__types(evaluator, types, node)
        return result

    from jedi.evaluate import representation as er, param

    def get_execution_parent(element):
        """ Used to get an Instance/FunctionExecution parent """
        if isinstance(element, Array):
            node = element.atom
        else:
            # Is an Instance with an
            # Arguments([AlreadyEvaluated([_ArrayInstance])]) inside
            # Yeah... I know... It's complicated ;-)
            node = list(element.var_args.argument_node[0])[0].var_args.trailer
        if isinstance(node, er.InstanceElement) or node is None:
            return node
        return node.get_parent_until(er.FunctionExecution)

    temp_param_add, settings.dynamic_params_for_other_modules = \
        settings.dynamic_params_for_other_modules, False

    search_names = ['append', 'extend', 'insert'
                    ] if is_list else ['add', 'update']
    comp_arr_parent = get_execution_parent(compare_array)

    added_types = set()
    for add_name in search_names:
        try:
            possible_names = module.used_names[add_name]
        except KeyError:
            continue
        else:
            for name in possible_names:
                # Check if the original scope is an execution. If it is, one
                # can search for the same statement, that is in the module
                # dict. Executions are somewhat special in jedi, since they
                # literally copy the contents of a function.
                if isinstance(comp_arr_parent, er.FunctionExecution):
                    if comp_arr_parent.start_pos < name.start_pos < comp_arr_parent.end_pos:
                        name = comp_arr_parent.name_for_position(
                            name.start_pos)
                    else:
                        # Don't check definitions that are not defined in the
                        # same function. This is not "proper" anyway. It also
                        # improves Jedi's speed for array lookups, since we
                        # don't have to check the whole source tree anymore.
                        continue
                trailer = name.parent
                power = trailer.parent
                trailer_pos = power.children.index(trailer)
                try:
                    execution_trailer = power.children[trailer_pos + 1]
                except IndexError:
                    continue
                else:
                    if execution_trailer.type != 'trailer' \
                            or execution_trailer.children[0] != '(' \
                            or execution_trailer.children[1] == ')':
                        continue
                power = helpers.call_of_leaf(name, cut_own_trailer=True)
                # InstanceElements are special, because they don't get copied,
                # but have this wrapper around them.
                if isinstance(comp_arr_parent, er.InstanceElement):
                    power = er.get_instance_el(evaluator,
                                               comp_arr_parent.instance, power)

                if evaluator.recursion_detector.push_stmt(power):
                    # Check for recursion. Possible by using 'extend' in
                    # combination with function calls.
                    continue
                try:
                    if compare_array in evaluator.eval_element(power):
                        # The arrays match. Now add the results
                        added_types |= check_additions(
                            execution_trailer.children[1], add_name)
                finally:
                    evaluator.recursion_detector.pop_stmt()
    # reset settings
    settings.dynamic_params_for_other_modules = temp_param_add
    debug.dbg('Dynamic array result %s' % added_types, color='MAGENTA')
    return added_types
Example #51
0
def _parse_function_doc(func):
    """
    Takes a function and returns the params and return value as a tuple.
    This is nothing more than a docstring parser.
    """
    # TODO: things like utime(path, (atime, mtime)) and a(b [, b]) -> None
    doc = inspect.getdoc(func)

    if doc is None:
        return '', 'pass'

    # get full string, parse round parentheses: def func(a, (b,c))
    try:
        count = 0
        debug.dbg(func, func.__name__, doc)
        start = doc.index('(')
        for i, s in enumerate(doc[start:]):
            if s == '(':
                count += 1
            elif s == ')':
                count -= 1
            if count == 0:
                end = start + i
                break
        param_str = doc[start + 1:end]
    except (ValueError, UnboundLocalError, AttributeError):
        # ValueError for doc.index
        # UnboundLocalError for undefined end in last line
        debug.dbg('no brackets found - no param')
        end = 0
        param_str = ''
    else:
        # remove square brackets, that show an optional param ( = None)
        def change_options(m):
            args = m.group(1).split(',')
            for i, a in enumerate(args):
                if a and '=' not in a:
                    args[i] += '=None'
            return ','.join(args)

        while True:
            param_str, changes = re.subn(r' ?\[([^\[\]]+)\]', change_options,
                                         param_str)
            if changes == 0:
                break
    param_str = param_str.replace('-', '_')  # see: isinstance.__doc__

    # parse return value
    r = re.search('-[>-]* ', doc[end:end + 7])
    if r is None:
        ret = ''
    else:
        index = end + r.end()
        # get result type, which can contain newlines
        pattern = re.compile(r'(,\n|[^\n-])+')
        ret_str = pattern.match(doc, index).group(0).strip()
        # New object -> object()
        ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str)

        ret = BuiltinModule.map_types.get(ret_str, ret_str)
        if ret == ret_str and ret not in ['None', 'object', 'tuple', 'set']:
            debug.dbg('not working', ret_str)

        ret = ('return ' if 'return' not in ret else '') + ret
    return param_str, ret
Example #52
0
    def _do_import(self, import_path, sys_path):
        """
        This method is very similar to importlib's `_gcd_import`.
        """
        import_parts = [str(i) for i in import_path]

        # Handle "magic" Flask extension imports:
        # ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``.
        if len(import_path) > 2 and import_parts[:2] == ['flask', 'ext']:
            # New style.
            ipath = ('flask_' + str(import_parts[2]), ) + import_path[3:]
            modules = self._do_import(ipath, sys_path)
            if modules:
                return modules
            else:
                # Old style
                return self._do_import(('flaskext', ) + import_path[2:],
                                       sys_path)

        module_name = '.'.join(import_parts)
        try:
            return set([self._evaluator.modules[module_name]])
        except KeyError:
            pass

        if len(import_path) > 1:
            # This is a recursive way of importing that works great with
            # the module cache.
            bases = self._do_import(import_path[:-1], sys_path)
            if not bases:
                return set()
            # We can take the first element, because only the os special
            # case yields multiple modules, which is not important for
            # further imports.
            parent_module = list(bases)[0]

            # This is a huge exception, we follow a nested import
            # ``os.path``, because it's a very important one in Python
            # that is being achieved by messing with ``sys.modules`` in
            # ``os``.
            if [str(i) for i in import_path] == ['os', 'path']:
                return parent_module.py__getattribute__('path')

            try:
                method = parent_module.py__path__
            except AttributeError:
                # The module is not a package.
                _add_error(parent_module, import_path[-1])
                return set()
            else:
                paths = method()
                debug.dbg('search_module %s in paths %s', module_name, paths)
                for path in paths:
                    # At the moment we are only using one path. So this is
                    # not important to be correct.
                    try:
                        if not isinstance(path, list):
                            path = [path]
                        module_file, module_path, is_pkg = \
                            find_module(import_parts[-1], path, fullname=module_name)
                        break
                    except ImportError:
                        module_path = None
                if module_path is None:
                    _add_error(parent_module, import_path[-1])
                    return set()
        else:
            parent_module = None
            try:
                debug.dbg('search_module %s in %s', import_parts[-1],
                          self.file_path)
                # Override the sys.path. It works only good that way.
                # Injecting the path directly into `find_module` did not work.
                sys.path, temp = sys_path, sys.path
                try:
                    module_file, module_path, is_pkg = \
                        find_module(import_parts[-1], fullname=module_name)
                finally:
                    sys.path = temp
            except ImportError:
                # The module is not a package.
                _add_error(self.module_context, import_path[-1])
                return set()

        source = None
        if is_pkg:
            # In this case, we don't have a file yet. Search for the
            # __init__ file.
            if module_path.endswith(('.zip', '.egg')):
                source = module_file.loader.get_source(module_name)
            else:
                module_path = get_init_path(module_path)
        elif module_file:
            source = module_file.read()
            module_file.close()

        if isinstance(module_path, ImplicitNSInfo):
            from jedi.evaluate.representation import ImplicitNamespaceContext
            fullname, paths = module_path.name, module_path.paths
            module = ImplicitNamespaceContext(self._evaluator,
                                              fullname=fullname)
            module.paths = paths
        elif module_file is None and not module_path.endswith(
            ('.py', '.zip', '.egg')):
            module = compiled.load_module(self._evaluator, module_path)
        else:
            module = _load_module(self._evaluator, module_path, source,
                                  sys_path, parent_module)

        if module is None:
            # The file might raise an ImportError e.g. and therefore not be
            # importable.
            return set()

        self._evaluator.modules[module_name] = module
        return set([module])
Example #53
0
    def _do_import(self, import_path, sys_path):
        """
        This method is very similar to importlib's `_gcd_import`.
        """
        import_parts = [str(i) for i in import_path]

        # Handle "magic" Flask extension imports:
        # ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``.
        if len(import_path) > 2 and import_parts[:2] == ['flask', 'ext']:
            # New style.
            ipath = ('flask_' + str(import_parts[2]),) + import_path[3:]
            modules = self._do_import(ipath, sys_path)
            if modules:
                return modules
            else:
                # Old style
                return self._do_import(('flaskext',) + import_path[2:], sys_path)

        module_name = '.'.join(import_parts)
        try:
            return set([self._evaluator.modules[module_name]])
        except KeyError:
            pass

        if len(import_path) > 1:
            # This is a recursive way of importing that works great with
            # the module cache.
            bases = self._do_import(import_path[:-1], sys_path)
            if not bases:
                return set()
            # We can take the first element, because only the os special
            # case yields multiple modules, which is not important for
            # further imports.
            base = list(bases)[0]

            # This is a huge exception, we follow a nested import
            # ``os.path``, because it's a very important one in Python
            # that is being achieved by messing with ``sys.modules`` in
            # ``os``.
            if [str(i) for i in import_path] == ['os', 'path']:
                return self._evaluator.find_types(base, 'path')

            try:
                # It's possible that by giving it always the sys path (and not
                # the __path__ attribute of the parent, we get wrong results
                # and nested namespace packages don't work.  But I'm not sure.
                paths = base.py__path__(sys_path)
            except AttributeError:
                # The module is not a package.
                _add_error(self._evaluator, import_path[-1])
                return set()
            else:
                debug.dbg('search_module %s in paths %s', module_name, paths)
                for path in paths:
                    # At the moment we are only using one path. So this is
                    # not important to be correct.
                    try:
                        module_file, module_path, is_pkg = \
                            find_module(import_parts[-1], [path])
                        break
                    except ImportError:
                        module_path = None
                if module_path is None:
                    _add_error(self._evaluator, import_path[-1])
                    return set()
        else:
            try:
                debug.dbg('search_module %s in %s', import_parts[-1], self.file_path)
                # Override the sys.path. It works only good that way.
                # Injecting the path directly into `find_module` did not work.
                sys.path, temp = sys_path, sys.path
                try:
                    module_file, module_path, is_pkg = \
                        find_module(import_parts[-1])
                finally:
                    sys.path = temp
            except ImportError:
                # The module is not a package.
                _add_error(self._evaluator, import_path[-1])
                return set()

        source = None
        if is_pkg:
            # In this case, we don't have a file yet. Search for the
            # __init__ file.
            module_path = get_init_path(module_path)
        elif module_file:
            source = module_file.read()
            module_file.close()

        if module_file is None and not module_path.endswith('.py'):
            module = compiled.load_module(self._evaluator, module_path)
        else:
            module = _load_module(self._evaluator, module_path, source, sys_path)

        self._evaluator.modules[module_name] = module
        return set([module])
Example #54
0
def import_module(inference_state, import_names, parent_module_value,
                  sys_path):
    """
    This method is very similar to importlib's `_gcd_import`.
    """
    if import_names[0] in settings.auto_import_modules:
        module = _load_builtin_module(inference_state, import_names, sys_path)
        if module is None:
            return NO_VALUES
        return ValueSet([module])

    module_name = '.'.join(import_names)
    if parent_module_value is None:
        # Override the sys.path. It works only good that way.
        # Injecting the path directly into `find_module` did not work.
        file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info(
            string=import_names[-1],
            full_name=module_name,
            sys_path=sys_path,
            is_global_search=True,
        )
        if is_pkg is None:
            return NO_VALUES
    else:
        paths = parent_module_value.py__path__()
        if paths is None:
            # The module might not be a package.
            return NO_VALUES

        for path in paths:
            # At the moment we are only using one path. So this is
            # not important to be correct.
            if not isinstance(path, list):
                path = [path]
            file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info(
                string=import_names[-1],
                path=path,
                full_name=module_name,
                is_global_search=False,
            )
            if is_pkg is not None:
                break
        else:
            return NO_VALUES

    if isinstance(file_io_or_ns, ImplicitNSInfo):
        from jedi.inference.value.namespace import ImplicitNamespaceValue
        module = ImplicitNamespaceValue(
            inference_state,
            string_names=tuple(file_io_or_ns.name.split('.')),
            paths=file_io_or_ns.paths,
        )
    elif file_io_or_ns is None:
        module = _load_builtin_module(inference_state, import_names, sys_path)
        if module is None:
            return NO_VALUES
    else:
        module = _load_python_module(
            inference_state,
            file_io_or_ns,
            import_names=import_names,
            is_package=is_pkg,
        )

    if parent_module_value is None:
        debug.dbg('global search_module %s: %s', import_names[-1], module)
    else:
        debug.dbg('search_module %s in paths %s: %s', module_name, paths,
                  module)
    return ValueSet([module])
 def initialize(self, code):
     debug.dbg('differ: initialize', color='YELLOW')
     self.lines = splitlines(code, keepends=True)
     parser_cache.pop(None, None)
     self.module = parse(code, diff_cache=True, cache=True)
     return self.module
Example #56
0
def _check_array_additions(context, sequence):
    """
    Checks if a `Array` has "add" (append, insert, extend) statements:

    >>> a = [""]
    >>> a.append(1)
    """
    from jedi.evaluate import arguments

    debug.dbg('Dynamic array search for %s' % sequence, color='MAGENTA')
    module_context = context.get_root_context()
    if not settings.dynamic_array_additions or isinstance(
            module_context, compiled.CompiledObject):
        debug.dbg('Dynamic array search aborted.', color='MAGENTA')
        return ContextSet()

    def find_additions(context, arglist, add_name):
        params = list(
            arguments.TreeArguments(context.evaluator, context,
                                    arglist).unpack())
        result = set()
        if add_name in ['insert']:
            params = params[1:]
        if add_name in ['append', 'add', 'insert']:
            for key, whatever in params:
                result.add(whatever)
        elif add_name in ['extend', 'update']:
            for key, lazy_context in params:
                result |= set(lazy_context.infer().iterate())
        return result

    temp_param_add, settings.dynamic_params_for_other_modules = \
        settings.dynamic_params_for_other_modules, False

    is_list = sequence.name.string_name == 'list'
    search_names = (['append', 'extend', 'insert']
                    if is_list else ['add', 'update'])

    added_types = set()
    for add_name in search_names:
        try:
            possible_names = module_context.tree_node.get_used_names(
            )[add_name]
        except KeyError:
            continue
        else:
            for name in possible_names:
                context_node = context.tree_node
                if not (context_node.start_pos < name.start_pos <
                        context_node.end_pos):
                    continue
                trailer = name.parent
                power = trailer.parent
                trailer_pos = power.children.index(trailer)
                try:
                    execution_trailer = power.children[trailer_pos + 1]
                except IndexError:
                    continue
                else:
                    if execution_trailer.type != 'trailer' \
                            or execution_trailer.children[0] != '(' \
                            or execution_trailer.children[1] == ')':
                        continue

                random_context = context.create_context(name)

                with recursion.execution_allowed(context.evaluator,
                                                 power) as allowed:
                    if allowed:
                        found = evaluate_call_of_leaf(random_context,
                                                      name,
                                                      cut_own_trailer=True)
                        if sequence in found:
                            # The arrays match. Now add the results
                            added_types |= find_additions(
                                random_context, execution_trailer.children[1],
                                add_name)

    # reset settings
    settings.dynamic_params_for_other_modules = temp_param_add
    debug.dbg('Dynamic array result %s' % added_types, color='MAGENTA')
    return added_types
Example #57
0
def _infer_comparison_part(inference_state, context, left, operator, right):
    l_is_num = is_number(left)
    r_is_num = is_number(right)
    if isinstance(operator, unicode):
        str_operator = operator
    else:
        str_operator = force_unicode(str(operator.value))

    if str_operator == '*':
        # for iterables, ignore * operations
        if isinstance(left, iterable.Sequence) or is_string(left):
            return ValueSet([left])
        elif isinstance(right, iterable.Sequence) or is_string(right):
            return ValueSet([right])
    elif str_operator == '+':
        if l_is_num and r_is_num or is_string(left) and is_string(right):
            return ValueSet([left.execute_operation(right, str_operator)])
        elif _is_tuple(left) and _is_tuple(right) or _is_list(
                left) and _is_list(right):
            return ValueSet(
                [iterable.MergedArray(inference_state, (left, right))])
    elif str_operator == '-':
        if l_is_num and r_is_num:
            return ValueSet([left.execute_operation(right, str_operator)])
    elif str_operator == '%':
        # With strings and numbers the left type typically remains. Except for
        # `int() % float()`.
        return ValueSet([left])
    elif str_operator in COMPARISON_OPERATORS:
        if left.is_compiled() and right.is_compiled():
            # Possible, because the return is not an option. Just compare.
            try:
                return ValueSet([left.execute_operation(right, str_operator)])
            except TypeError:
                # Could be True or False.
                pass
        else:
            if str_operator in ('is', '!=', '==', 'is not'):
                operation = COMPARISON_OPERATORS[str_operator]
                bool_ = operation(left, right)
                return ValueSet([_bool_to_value(inference_state, bool_)])

            if isinstance(left, VersionInfo):
                version_info = _get_tuple_ints(right)
                if version_info is not None:
                    bool_result = compiled.access.COMPARISON_OPERATORS[
                        operator](inference_state.environment.version_info,
                                  tuple(version_info))
                    return ValueSet(
                        [_bool_to_value(inference_state, bool_result)])

        return ValueSet([
            _bool_to_value(inference_state, True),
            _bool_to_value(inference_state, False)
        ])
    elif str_operator == 'in':
        return NO_VALUES

    def check(obj):
        """Checks if a Jedi object is either a float or an int."""
        return isinstance(obj, TreeInstance) and \
            obj.name.string_name in ('int', 'float')

    # Static analysis, one is a number, the other one is not.
    if str_operator in ('+', '-') and l_is_num != r_is_num \
            and not (check(left) or check(right)):
        message = "TypeError: unsupported operand type(s) for +: %s and %s"
        analysis.add(context, 'type-error-operation', operator,
                     message % (left, right))

    result = ValueSet([left, right])
    debug.dbg('Used operator %s resulting in %s', operator, result)
    return result
Example #58
0
def _infer_expr_stmt(context, stmt, seek_name=None):
    """
    The starting point of the completion. A statement always owns a call
    list, which are the calls, that a statement does. In case multiple
    names are defined in the statement, `seek_name` returns the result for
    this name.

    expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
                     ('=' (yield_expr|testlist_star_expr))*)
    annassign: ':' test ['=' test]
    augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
                '<<=' | '>>=' | '**=' | '//=')

    :param stmt: A `tree.ExprStmt`.
    """
    def check_setitem(stmt):
        atom_expr = stmt.children[0]
        if atom_expr.type not in ('atom_expr', 'power'):
            return False, None
        name = atom_expr.children[0]
        if name.type != 'name' or len(atom_expr.children) != 2:
            return False, None
        trailer = atom_expr.children[-1]
        return trailer.children[0] == '[', trailer.children[1]

    debug.dbg('infer_expr_stmt %s (%s)', stmt, seek_name)
    rhs = stmt.get_rhs()
    value_set = context.infer_node(rhs)

    if seek_name:
        n = TreeNameDefinition(context, seek_name)
        value_set = check_tuple_assignments(n, value_set)

    first_operator = next(stmt.yield_operators(), None)
    is_setitem, subscriptlist = check_setitem(stmt)
    is_annassign = first_operator not in (
        '=', None) and first_operator.type == 'operator'
    if is_annassign or is_setitem:
        # `=` is always the last character in aug assignments -> -1
        name = stmt.get_defined_names(include_setitem=True)[0].value
        left_values = context.py__getattribute__(name, position=stmt.start_pos)

        if is_setitem:

            def to_mod(v):
                c = ContextualizedSubscriptListNode(context, subscriptlist)
                if v.array_type == 'dict':
                    return DictModification(v, value_set, c)
                elif v.array_type == 'list':
                    return ListModification(v, value_set, c)
                return v

            value_set = ValueSet(to_mod(v) for v in left_values)
        else:
            operator = copy.copy(first_operator)
            operator.value = operator.value[:-1]
            for_stmt = tree.search_ancestor(stmt, 'for_stmt')
            if for_stmt is not None and for_stmt.type == 'for_stmt' and value_set \
                    and parser_utils.for_stmt_defines_one_name(for_stmt):
                # Iterate through result and add the values, that's possible
                # only in for loops without clutter, because they are
                # predictable. Also only do it, if the variable is not a tuple.
                node = for_stmt.get_testlist()
                cn = ContextualizedNode(context, node)
                ordered = list(cn.infer().iterate(cn))

                for lazy_value in ordered:
                    dct = {for_stmt.children[1].value: lazy_value.infer()}
                    with context.predefine_names(for_stmt, dct):
                        t = context.infer_node(rhs)
                        left_values = _infer_comparison(
                            context, left_values, operator, t)
                value_set = left_values
            else:
                value_set = _infer_comparison(context, left_values, operator,
                                              value_set)
    debug.dbg('infer_expr_stmt result %s', value_set)
    return value_set
Example #59
0
    def _do_import(self, import_path, sys_path):
        """
        This method is very similar to importlib's `_gcd_import`.
        """
        import_parts = [
            force_unicode(i.value if isinstance(i, tree.Name) else i)
            for i in import_path
        ]

        # Handle "magic" Flask extension imports:
        # ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``.
        if len(import_path) > 2 and import_parts[:2] == ['flask', 'ext']:
            # New style.
            ipath = ('flask_' + str(import_parts[2]), ) + import_path[3:]
            modules = self._do_import(ipath, sys_path)
            if modules:
                return modules
            else:
                # Old style
                return self._do_import(('flaskext', ) + import_path[2:],
                                       sys_path)

        module_name = '.'.join(import_parts)
        try:
            return ContextSet(self._evaluator.module_cache.get(module_name))
        except KeyError:
            pass

        if len(import_path) > 1:
            # This is a recursive way of importing that works great with
            # the module cache.
            bases = self._do_import(import_path[:-1], sys_path)
            if not bases:
                return NO_CONTEXTS
            # We can take the first element, because only the os special
            # case yields multiple modules, which is not important for
            # further imports.
            parent_module = list(bases)[0]

            # This is a huge exception, we follow a nested import
            # ``os.path``, because it's a very important one in Python
            # that is being achieved by messing with ``sys.modules`` in
            # ``os``.
            if import_parts == ['os', 'path']:
                return parent_module.py__getattribute__('path')

            try:
                method = parent_module.py__path__
            except AttributeError:
                # The module is not a package.
                _add_error(self.module_context, import_path[-1])
                return NO_CONTEXTS
            else:
                paths = method()
                debug.dbg('search_module %s in paths %s', module_name, paths)
                for path in paths:
                    # At the moment we are only using one path. So this is
                    # not important to be correct.
                    if not isinstance(path, list):
                        path = [path]
                    code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info(
                        string=import_parts[-1],
                        path=path,
                        full_name=module_name)
                    if module_path is not None:
                        break
                else:
                    _add_error(self.module_context, import_path[-1])
                    return NO_CONTEXTS
        else:
            debug.dbg('search_module %s in %s', import_parts[-1],
                      self.file_path)
            # Override the sys.path. It works only good that way.
            # Injecting the path directly into `find_module` did not work.
            code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info(
                string=import_parts[-1],
                full_name=module_name,
                sys_path=sys_path,
            )
            if module_path is None:
                # The module is not a package.
                _add_error(self.module_context, import_path[-1])
                return NO_CONTEXTS

        module = _load_module(
            self._evaluator,
            module_path,
            code,
            sys_path,
            module_name=module_name,
            safe_module_name=True,
        )

        if module is None:
            # The file might raise an ImportError e.g. and therefore not be
            # importable.
            return NO_CONTEXTS

        return ContextSet(module)
Example #60
0
    def completions(self):
        """
        Return :class:`api_classes.Completion` objects. Those objects contain
        information about the completions, more than just names.

        :return: Completion objects, sorted by name and __ comes last.
        :rtype: list of :class:`api_classes.Completion`
        """
        debug.speed('completions start')
        path = self._module.get_path_until_cursor()
        if re.search('^\.|\.\.$', path):
            return []
        path, dot, like = self._get_completion_parts(path)
        completion_line = self._module.get_line(self.pos[0])[:self.pos[1]]

        try:
            scopes = list(self._prepare_goto(path, True))
        except NotFoundError:
            scopes = []
            scope_generator = evaluate.get_names_of_scope(
                self._parser.user_scope, self.pos)
            completions = []
            for scope, name_list in scope_generator:
                for c in name_list:
                    completions.append((c, scope))
        else:
            completions = []
            debug.dbg('possible scopes', scopes)
            for s in scopes:
                if s.isinstance(er.Function):
                    names = s.get_magic_method_names()
                else:
                    if isinstance(s, imports.ImportPath):
                        if like == 'import':
                            if not completion_line.endswith('import import'):
                                continue
                        a = s.import_stmt.alias
                        if a and a.start_pos <= self.pos <= a.end_pos:
                            continue
                        names = s.get_defined_names(on_import_stmt=True)
                    else:
                        names = s.get_defined_names()

                for c in names:
                    completions.append((c, s))

        if not dot:  # named params have no dots
            for call_def in self.call_signatures():
                if not call_def.module.is_builtin():
                    for p in call_def.params:
                        completions.append((p.get_name(), p))

            # Do the completion if there is no path before and no import stmt.
            u = self._parser.user_stmt
            bs = builtin.Builtin.scope
            if isinstance(u, pr.Import):
                if (u.relative_count > 0 or u.from_ns) and not re.search(
                        r'(,|from)\s*$|import\s+$', completion_line):
                    completions += ((k, bs)
                                    for k in keywords.get_keywords('import'))

            if not path and not isinstance(u, pr.Import):
                # add keywords
                completions += ((k, bs)
                                for k in keywords.get_keywords(all=True))

        needs_dot = not dot and path

        comps = []
        comp_dct = {}
        for c, s in set(completions):
            n = c.names[-1]
            if settings.case_insensitive_completion \
                    and n.lower().startswith(like.lower()) \
                    or n.startswith(like):
                if not evaluate.filter_private_variable(
                        s, self._parser.user_stmt, n):
                    new = api_classes.Completion(c, needs_dot, len(like), s)
                    k = (new.name, new.complete)  # key
                    if k in comp_dct and settings.no_completion_duplicates:
                        comp_dct[k]._same_name_completions.append(new)
                    else:
                        comp_dct[k] = new
                        comps.append(new)

        debug.speed('completions end')

        return sorted(
            comps,
            key=lambda x:
            (x.name.startswith('__'), x.name.startswith('_'), x.name.lower()))