def _load_module(self): source = self._get_source() self._parser = parsing.PyFuzzyParser(source, self.path or self.name) p_time = None if not self.path else os.path.getmtime(self.path) if self.path or self.name: self.cache[self.path or self.name] = p_time, self._parser
def _get_under_cursor_stmt(self, cursor_txt): r = parsing.PyFuzzyParser(cursor_txt, self.source_path, no_docstr=True) try: stmt = r.module.statements[0] except IndexError: raise NotFoundError() stmt.start_pos = self.pos stmt.parent = self.parser.user_scope return stmt
def follow_param(param): func = param.parent_function #print func, param, param.parent_function param_str = search_param_in_docstr(func.docstr, str(param.get_name())) if param_str is not None: p = parsing.PyFuzzyParser(param_str, None, (1, 0), no_docstr=True) p.user_stmt.parent = func return evaluate.follow_statement(p.user_stmt) return []
def __call__(self, code, module_path=None, user_position=None): if not settings.fast_parser: return parsing.PyFuzzyParser(code, module_path, user_position) if module_path is None or module_path not in parser_cache: p = super(CachedFastParser, self).__call__(code, module_path, user_position) parser_cache[module_path] = p else: p = parser_cache[module_path] p.update(code, user_position) return p
def __call__(self, source, module_path=None, user_position=None): if not settings.fast_parser: return parsing.PyFuzzyParser(source, module_path, user_position) pi = cache.parser_cache.get(module_path, None) if pi is None or isinstance(pi.parser, parsing.PyFuzzyParser): p = super(CachedFastParser, self).__call__(source, module_path, user_position) else: p = pi.parser # pi is a `cache.ParserCacheItem` p.update(source, user_position) return p
def find_return_types(func): if isinstance(func, evaluate.InstanceElement): func = func.var if isinstance(func, evaluate.Function): func = func.base_func type_str = search_return_in_docstr(func.docstr) if not type_str: return [] p = parsing.PyFuzzyParser(type_str, None, (1, 0), no_docstr=True) p.user_stmt.parent = func return list(evaluate.follow_statement(p.user_stmt))
def magic_function_scope(self): try: return self._magic_function_scope except AttributeError: # depth = 1 because this is not a module class Container(object): FunctionType = types.FunctionType source = _generate_code(Container, depth=0) parser = parsing.PyFuzzyParser(source, None) module = parser.module module.parent = self.scope typ = evaluate.follow_path(iter(['FunctionType']), module, module) s = self._magic_function_scope = typ.pop() return s
def get_part_parser(self): """ Returns a parser that contains only part of the source code. This exists only because of performance reasons. """ if self._part_parser: return self._part_parser # TODO check for docstrings length = settings.part_line_length offset = max(self.position[0] - length, 0) s = '\n'.join(self.source.split('\n')[offset:offset + length]) self._part_parser = parsing.PyFuzzyParser(s, self.path, self.position, line_offset=offset) return self._part_parser
def parser(self): """ get the parser lazy """ if not self._parser: try: ts, parser = builtin.CachedModule.cache[self.path] imports.invalidate_star_import_cache(parser.module) del builtin.CachedModule.cache[self.path] except KeyError: pass # Call the parser already here, because it will be used anyways. # Also, the position is here important (which will not be used by # default), therefore fill the cache here. self._parser = parsing.PyFuzzyParser(self.source, self.path, self.position) if self.path is not None: builtin.CachedModule.cache[self.path] = time.time(), \ self._parser return self._parser
def _parse(self, code): """ :type code: str """ r = r'(?:\n(?:def|class|@.*?\n(?:def|class))|^).*?' \ r'(?=\n(?:def|class|@)|$)' parts = re.findall(r, code, re.DOTALL) if len(parts) > 1 and not re.match('def|class|@', parts[0]): # Merge the first two because `common.NoErrorTokenizer` is not able # to know if there's a class/func or not. # Therefore every part has it's own class/func. Exactly one. parts[0] += parts[1] parts.pop(1) if settings.fast_parser_always_reparse: self.parsers[:] = [] # dict comprehensions are not available in py2.5/2.6 :-( hashes = dict((p.hash, p) for p in self.parsers) line_offset = 0 start = 0 p = None parser_order = 0 for code_part in parts: lines = code_part.count('\n') # the parser is using additional newlines, therefore substract if p is None or line_offset >= p.end_pos[0] - 2: # check if code_part has already been parsed h = hash(code_part) if h in hashes and hashes[h].code == code_part: p = hashes[h] del hashes[h] m = p.module m.line_offset += line_offset + 1 - m.start_pos[0] if self.user_position is not None and \ m.start_pos <= self.user_position <= m.end_pos: # It's important to take care of the whole user # positioning stuff, if no reparsing is being done. p.user_stmt = m.get_statement_for_position( self.user_position, include_imports=True) if p.user_stmt: p.user_scope = p.user_stmt.parent else: p.user_scope = self.scan_user_scope(m) \ or self.module else: p = parsing.PyFuzzyParser(code[start:], self.module_path, self.user_position, line_offset=line_offset, stop_on_scope=True, top_module=self.module) p.hash = h p.code = code_part p.module.parent = self.module self.parsers.insert(parser_order, p) parser_order += 1 line_offset += lines start += len(code_part) self.parsers[parser_order + 1:] = []