def docstring(self, fast=True, raw=True): """ The docstring ``__doc__`` for any object. See :attr:`doc` for example. """ full_doc = '' # Using the first docstring that we see. for context in self._get_contexts(fast=fast): if full_doc: # In case we have multiple contexts, just return all of them # separated by a few dashes. full_doc += '\n' + '-' * 30 + '\n' doc = context.py__doc__() signature_text = '' if self._name.is_context_name: if not raw: signature_text = _format_signatures(context) if not doc and context.is_stub(): for c in convert_contexts(ContextSet({context}), ignore_compiled=False): doc = c.py__doc__() if doc: break if signature_text and doc: full_doc += signature_text + '\n\n' + doc else: full_doc += signature_text + doc return full_doc
def definition(correct, correct_start, path): should_be = set() for match in re.finditer('(?:[^ ]+)', correct): string = match.group(0) parser = grammar36.parse(string, start_symbol='eval_input', error_recovery=False) parser_utils.move(parser.get_root_node(), self.line_nr) element = parser.get_root_node() module_context = script._get_module() # The context shouldn't matter for the test results. user_context = get_user_scope(module_context, (self.line_nr, 0)) if user_context.api_type == 'function': user_context = user_context.get_function_execution() element.parent = user_context.tree_node results = convert_contexts( evaluator.eval_element(user_context, element), ) if not results: raise Exception('Could not resolve %s on line %s' % (match.string, self.line_nr - 1)) should_be |= set(Definition(evaluator, r.name) for r in results) debug.dbg('Finished getting types', color='YELLOW') # Because the objects have different ids, `repr`, then compare. should = set(comparison(r) for r in should_be) return should
def _get_context_filters(self, origin_scope): for f in self._context.get_filters(False, self._position, origin_scope=origin_scope): yield f # This covers the case where a stub files are incomplete. if self._context.is_stub(): for c in convert_contexts(ContextSet({self._context})): for f in c.get_filters(): yield f
def _infer(self, only_stubs=False, prefer_stubs=False): assert not (only_stubs and prefer_stubs) if not self._name.is_context_name: return [] contexts = convert_contexts( self._name.infer(), only_stubs=only_stubs, prefer_stubs=prefer_stubs, ) names = [c.name for c in contexts] return [ self if n == self._name else Definition(self._evaluator, n) for n in names ]
def completion_names(self, evaluator, only_modules=False): """ :param only_modules: Indicates wheter it's possible to import a definition that is not defined in a module. """ if not self._inference_possible: return [] names = [] if self.import_path: # flask if self._str_import_path == ('flask', 'ext'): # List Flask extensions like ``flask_foo`` for mod in self._get_module_names(): modname = mod.string_name if modname.startswith('flask_'): extname = modname[len('flask_'):] names.append(ImportName(self.module_context, extname)) # Now the old style: ``flaskext.foo`` for dir in self._sys_path_with_modifications(): flaskext = os.path.join(dir, 'flaskext') if os.path.isdir(flaskext): names += self._get_module_names([flaskext]) contexts = self.follow() for context in contexts: # Non-modules are not completable. if context.api_type != 'module': # not a module continue names += context.sub_modules_dict().values() if not only_modules: from jedi.evaluate.gradual.conversion import convert_contexts both_contexts = contexts | convert_contexts(contexts) for c in both_contexts: for filter in c.get_filters(search_global=False): names += filter.values() else: if self.level: # We only get here if the level cannot be properly calculated. names += self._get_module_names(self._fixed_sys_path) else: # This is just the list of global imports. names += self._get_module_names() return names
def _infer(self, only_stubs=False, prefer_stubs=False): assert not (only_stubs and prefer_stubs) if not self._name.is_context_name: return [] # First we need to make sure that we have stub names (if possible) that # we can follow. If we don't do that, we can end up with the inferred # results of Python objects instead of stubs. names = convert_names([self._name], prefer_stubs=True) contexts = convert_contexts( ContextSet.from_sets(n.infer() for n in names), only_stubs=only_stubs, prefer_stubs=prefer_stubs, ) resulting_names = [c.name for c in contexts] return [self if n == self._name else Definition(self._evaluator, n) for n in resulting_names]
def _trailer_completions(self, previous_leaf): user_context = get_user_scope(self._module_context, self._position) evaluation_context = self._evaluator.create_context( self._module_context, previous_leaf) contexts = evaluate_call_of_leaf(evaluation_context, previous_leaf) completion_names = [] debug.dbg('trailer completion contexts: %s', contexts, color='MAGENTA') for context in contexts: for filter in context.get_filters( search_global=False, origin_scope=user_context.tree_node): completion_names += filter.values() python_contexts = convert_contexts(contexts) for c in python_contexts: if c not in contexts: for filter in c.get_filters( search_global=False, origin_scope=user_context.tree_node): completion_names += filter.values() return completion_names
def _goto_definitions(self, only_stubs=False, prefer_stubs=False): leaf = self._module_node.get_name_of_position(self._pos) if leaf is None: leaf = self._module_node.get_leaf_for_position(self._pos) if leaf is None: return [] context = self._evaluator.create_context(self._get_module(), leaf) contexts = helpers.evaluate_goto_definition(self._evaluator, context, leaf) contexts = convert_contexts( contexts, only_stubs=only_stubs, prefer_stubs=prefer_stubs, ) defs = [classes.Definition(self._evaluator, c.name) for c in contexts] # The additional set here allows the definitions to become unique in an # API sense. In the internals we want to separate more things than in # the API. return helpers.sorted_definitions(set(defs))