Пример #1
0
    def _calc_path_until_cursor(self, start_pos=None):
        """
        Something like a reverse tokenizer that tokenizes the reversed strings.
        """
        def fetch_line():
            if self._is_first:
                self._is_first = False
                self._line_length = self._column_temp
                line = first_line
            else:
                line = self.get_line(self._line_temp)
                self._line_length = len(line)
            line = '\n' + line

            # add lines with a backslash at the end
            while True:
                self._line_temp -= 1
                last_line = self.get_line(self._line_temp)
                if last_line and last_line[-1] == '\\':
                    line = last_line[:-1] + ' ' + line
                    self._line_length = len(last_line)
                else:
                    break
            return line[::-1]

        self._is_first = True
        self._line_temp, self._column_temp = start_cursor = start_pos
        first_line = self.get_line(self._line_temp)[:self._column_temp]

        open_brackets = ['(', '[', '{']
        close_brackets = [')', ']', '}']

        gen = PushBackIterator(tokenize.generate_tokens(fetch_line))
        string = u('')
        level = 0
        force_point = False
        last_type = None
        is_first = True
        for tok in gen:
            tok_type = tok.type
            tok_str = tok.string
            end = tok.end_pos
            self._column_temp = self._line_length - end[1]
            if is_first:
                if tok.start_pos != (1, 0):  # whitespace is not a path
                    return u(''), start_cursor
                is_first = False

            # print 'tok', token_type, tok_str, force_point
            if last_type == tok_type == tokenize.NAME:
                string += ' '

            if level > 0:
                if tok_str in close_brackets:
                    level += 1
                if tok_str in open_brackets:
                    level -= 1
            elif tok_str == '.':
                force_point = False
            elif force_point:
                # it is reversed, therefore a number is getting recognized
                # as a floating point number
                if tok_type == tokenize.NUMBER and tok_str[0] == '.':
                    force_point = False
                else:
                    break
            elif tok_str in close_brackets:
                level += 1
            elif tok_type in [tokenize.NAME, tokenize.STRING]:
                force_point = True
            elif tok_type == tokenize.NUMBER:
                pass
            else:
                if tok_str == '-':
                    next_tok = next(gen)
                    if next_tok.string == 'e':
                        gen.push_back(next_tok)
                    else:
                        break
                else:
                    break

            x = start_pos[0] - end[0] + 1
            l = self.get_line(x)
            l = first_line if x == start_pos[0] else l
            start_cursor = x, len(l) - end[1]
            string += tok_str
            last_type = tok_type

        # string can still contain spaces at the end
        return string[::-1].strip(), start_cursor
Пример #2
0
    def _calc_path_until_cursor(self, start_pos):
        """
        Something like a reverse tokenizer that tokenizes the reversed strings.
        """
        open_brackets = ['(', '[', '{']
        close_brackets = [')', ']', '}']

        start_cursor = start_pos
        gen = PushBackIterator(self._get_backwards_tokenizer(start_pos))
        string = u('')
        level = 0
        force_point = False
        last_type = None
        is_first = True
        for tok_type, tok_str, tok_start_pos, prefix in gen:
            if is_first:
                if prefix:  # whitespace is not a path
                    return u(''), start_cursor
                is_first = False

            if last_type == tok_type == tokenize.NAME:
                string = ' ' + string

            if level:
                if tok_str in close_brackets:
                    level += 1
                elif tok_str in open_brackets:
                    level -= 1
            elif tok_str == '.':
                force_point = False
            elif force_point:
                # Reversed tokenizing, therefore a number is recognized as a
                # floating point number.
                # The same is true for string prefixes -> represented as a
                # combination of string and name.
                if tok_type == tokenize.NUMBER and tok_str[-1] == '.' \
                        or tok_type == tokenize.NAME and last_type == tokenize.STRING \
                        and tok_str.lower() in ('b', 'u', 'r', 'br', 'ur'):
                    force_point = False
                else:
                    break
            elif tok_str in close_brackets:
                level += 1
            elif tok_type in [tokenize.NAME, tokenize.STRING]:
                if keyword.iskeyword(tok_str) and string:
                    # If there's already something in the string, a keyword
                    # never adds any meaning to the current statement.
                    break
                force_point = True
            elif tok_type == tokenize.NUMBER:
                pass
            else:
                if tok_str == '-':
                    next_tok = next(gen)
                    if next_tok[1] == 'e':
                        gen.push_back(next_tok)
                    else:
                        break
                else:
                    break

            start_cursor = tok_start_pos
            string = tok_str + prefix + string
            last_type = tok_type

        # Don't need whitespace around a statement.
        return string.strip(), start_cursor
Пример #3
0
    def _calc_path_until_cursor(self, start_pos=None):
        """
        Something like a reverse tokenizer that tokenizes the reversed strings.
        """
        def fetch_line():
            if self._is_first:
                self._is_first = False
                self._line_length = self._column_temp
                line = first_line
            else:
                line = self.get_line(self._line_temp)
                self._line_length = len(line)
            line = '\n' + line

            # add lines with a backslash at the end
            while True:
                self._line_temp -= 1
                last_line = self.get_line(self._line_temp)
                if last_line and last_line[-1] == '\\':
                    line = last_line[:-1] + ' ' + line
                    self._line_length = len(last_line)
                else:
                    break
            return line[::-1]

        self._is_first = True
        self._line_temp, self._column_temp = start_cursor = start_pos
        first_line = self.get_line(self._line_temp)[:self._column_temp]

        open_brackets = ['(', '[', '{']
        close_brackets = [')', ']', '}']

        gen = PushBackIterator(tokenize.generate_tokens(fetch_line))
        string = u('')
        level = 0
        force_point = False
        last_type = None
        is_first = True
        for tok in gen:
            tok_type = tok.type
            tok_str = tok.string
            end = tok.end_pos
            self._column_temp = self._line_length - end[1]
            if is_first:
                if tok.start_pos != (1, 0):  # whitespace is not a path
                    return u(''), start_cursor
                is_first = False

            # print 'tok', token_type, tok_str, force_point
            if last_type == tok_type == tokenize.NAME:
                string += ' '

            if level > 0:
                if tok_str in close_brackets:
                    level += 1
                if tok_str in open_brackets:
                    level -= 1
            elif tok_str == '.':
                force_point = False
            elif force_point:
                # it is reversed, therefore a number is getting recognized
                # as a floating point number
                if tok_type == tokenize.NUMBER and tok_str[0] == '.':
                    force_point = False
                else:
                    break
            elif tok_str in close_brackets:
                level += 1
            elif tok_type in [tokenize.NAME, tokenize.STRING]:
                force_point = True
            elif tok_type == tokenize.NUMBER:
                pass
            else:
                if tok_str == '-':
                    next_tok = next(gen)
                    if next_tok.string == 'e':
                        gen.push_back(next_tok)
                    else:
                        break
                else:
                    break

            x = start_pos[0] - end[0] + 1
            l = self.get_line(x)
            l = first_line if x == start_pos[0] else l
            start_cursor = x, len(l) - end[1]
            string += tok_str
            last_type = tok_type

        # string can still contain spaces at the end
        return string[::-1].strip(), start_cursor