def neighbors(atok, t):
    x = next_token(atok, t)
    x = x if x and x.type != 0 else None
    y = next_token(atok, x) if x else None
    z = previous_token(atok, t)
    w = previous_token(atok, z) if z else None
    return [t, x, y, w, z]
Beispiel #2
0
def expand_to_line_or_statement(atok, origin, l=None, b=None):
    l = LineInfo(atok) if not l else l
    b = BracketMatcher(atok) if not b else b
    origin = previous_token(atok, origin) if origin.string == ";" else origin
    left, right = b.find_enclosing(origin)
    # print("found stuff",left,right)
    left = left if left else origin
    right = right if right else origin
    left, move_left = l.get_first(left)
    right, move_right = l.get_last(right)
    move_left = True
    move_right = True
    i = 0
    while move_left:

        new_left = left
        while new_left:
            left, move_left = l.get_first(new_left)
            # print( left,move_left,new_left)
            new_left, _ = b.find_enclosing(left, False)
            i = i + 1
            if i > 5: return None
        left = l.get_last_up(left) if move_left else left
    # print("entering right loop we ",right)
    while move_right:
        new_right = right
        while new_right:
            right, move_right = l.get_last(new_right)
            # print( right,move_right,[new_right],right.type)
            _, new_right = b.find_enclosing(right, False)
            # there was a bug where we would get in a fit loopif the last token was a closing bracket or parentheses
            i = i + 1
            if i > 5: return None, None
        right = l.get_first_down(right) if move_right else right
    return left, right
def fix_exception_handler(root, atok):
    if already_fixed(root):
        return True
    if not root.type or not root.name:
        mark_fixed(root)
        return True

    token = root.type.last_token
    token = atok.find_token(next_token(atok, token), tokenize.NAME, root.name)
    f = root.type.first_token
    f = atok.find_token(previous_token(atok, f),
                        tokenize.NAME,
                        "except",
                        reverse=True)
    fake_name_node = create_fake(root,
                                 ast.Name,
                                 real_tokens=token,
                                 id=token.string,
                                 ctx=ast.Load(),
                                 parent=root,
                                 parent_field="name")
    set_fake(root, "name", fake_name_node)
    # root.first_token=root.type.first_token
    # root.last_token = token
    mark_fixed(root)
    return True
Beispiel #4
0
 def handle_error(self, atok, token):
     if token in self.already_checked:
         return True
     final_error = token
     return False
     # this is wrong
     while final_error.type == tokenize.ERRORTOKEN:
         self.already_checked.add(final_error)
         final_error = next_token(atok, final_error)
     final_error = previous_token(atok, token)
def filter_await(atok, m=None, timestamp=0):
    m = m if m else ModificationHandler(atok.text)
    candidates = [x for x in atok.tokens if x.string == "await"]
    for c in candidates:
        y = previous_token(atok, c)
        z = next_token(atok, c)
        if y and not y.string.isspace():
            m.modify_from(timestamp, (c.startpos, c.endpos), "", "await")
        else:
            if z:
                m.modify_from(timestamp, (c.startpos, z.startpos),
                              "yield from ", "await")
            else:
                m.modify_from(timestamp, (c.startpos, c.endpos), "", "await")
    return m
Beispiel #6
0
def nearest_node_from_offset(root, atok, offset, special=False):
    converter = atok._line_numbers
    original_token = atok.get_token_from_offset(offset)
    token = original_token
    while token and (not token.string or token.string.isspace()):
        token = previous_token(atok, token)
    if not token or converter.offset_to_line(
            offset)[0] != converter.offset_to_line(token.startpos)[0]:
        following = next_token(atok, original_token)
        while following and following.string.isspace():
            token = following
            following = next_token(atok, token)
        if following:
            token = following
    s = token.startpos
    r = node_from_range(root, atok, (s, s), special=special, lenient=True)
    return r
Beispiel #7
0
 def handled_consecutive_names(self, atok, token):
     if token in self.already_checked:
         return True
     if token.type != tokenize.NAME or token.string in KEYWORDS:
         return False
     final_token = next_token(atok, token)
     while final_token.type in [tokenize.NAME, tokenize.ERRORTOKEN
                                ] and final_token.string not in KEYWORDS:
         final_token = next_token(atok, final_token)
     final_token = previous_token(atok, final_token)
     if final_token != token:
         for t in atok.token_range(token, final_token):
             self.already_checked.add(t)
         self.m.modify_from(self.start_time,
                            (token.startpos, final_token.endpos), self.d)
         return True
     return False
Beispiel #8
0
 def get_last(self, t):
     bp = self.find_breakpoint(t, False)
     if bp:
         return previous_token(self.atok, bp), False
     x = t.end[0]
     return self.last[x], self.continuation[x - 1]
Beispiel #9
0
 def correct_start_of_line(self, atok, token, before_information,
                           after_information):
     if before_information[0] and previous_token(
             atok, token).type == token.INDENT:
         p = previous_token(atok, token)
         n = next_token(atok, token)
Beispiel #10
0
def neighbors(atok, t):
    x = next_token(atok, t)
    y = next_token(atok, x) if x else None
    z = previous_token(atok, t)
    w = previous_token(atok, z) if z else None
    return [t, x, y, w, z]