Пример #1
0
    def _handle_token(self, token: TokenInfo):
        if token.annotation == A.STMT_END and not self.buffering:
            self.action = actions.StartBuffer()
        elif self.buffering and token.annotation in (
                A.BODY_RSQB,
                A.CLS_BODY_RSQB,
                A.STMT_START,
        ):

            if token.annotation != A.STMT_START:
                pos = self.insert_last_stmt_at
                if pos is None:
                    annotation = A.LAST_STMT_WITHOUT_COMMA
                else:
                    annotation = A.LAST_STMT_WITH_COMMA
                pos = pos or 1

                yield from self.buffer[:pos]
                yield TokenInfo.new_sentinel_after(self.buffer[pos - 1],
                                                   annotation)
                yield from self.buffer[pos:]
            else:
                yield from self.buffer

            self.insert_last_stmt_at = None
            self.action = actions.StopBuffer(dont_yield_buffer=True)
        elif self.buffering and token == A.STMT_START:
            self.insert_last_stmt_at = None
            self.action = actions.StopBuffer()
Пример #2
0
    def _handle_token(self, token: TokenInfo):
        if token.annotation == A.DECL_LPAR:
            self.scope_stack.append(token)
        elif token.annotation == A.DECL_RPAR:
            self.scope_stack.pop()
        elif not self.scope_stack:
            return

        if token.is_WS_NL:
            if not self.buffering:
                self.action = actions.StartBuffer()
            return

        if token.annotation in NORMALIZE_WHITESPACE_BEFORE:
            replacement = NORMALIZE_WHITESPACE_BEFORE[token.annotation]
            if (self.prev_non_ws_token.annotation != A.CLS_HEAD_RSQB
                    and token.annotation == A.CLS_BODY_LSQB):
                replacement = ""
            whitespace = TokenInfo(
                type=tk.WHITESPACE,
                string=replacement,
            )
            yield whitespace

            if self.buffering:
                self.action = actions.StopBuffer(dont_yield_buffer=True)
        elif self.buffering:
            self.action = actions.StopBuffer()

        self.prev_non_ws_token = token
    def _handle_token(self, token: TokenInfo):
        if token.annotation == A.DECL_LPAR:
            self.scope_stack.append(token)
        elif token.annotation == A.DECL_RPAR:
            self.scope_stack.pop()
        elif not self.scope_stack:
            return

        if token.is_NL:
            self.newlined = True
            return

        if token.is_WS:
            if self.newlined:
                self.leading = True

            if not self.buffering:
                self.action = actions.StartBuffer()
            self.newlined = False

            return

        if not token.is_CMT:
            if self.buffering:
                self.action = actions.StopBuffer()
            self.leading = False
            self.newlined = False

            return

        if not self.buffering and not self.newlined:
            yield TokenInfo(tk.WHITESPACE, "  ")
            yield token
            self.action = actions.Default(dont_store=True)
            return

        if self.buffering:
            if any("\\" in x.string for x in self.buffer) or self.leading:
                self.action = actions.StopBuffer()
            else:
                yield TokenInfo(tk.WHITESPACE, "  ")
                yield token
                self.action = actions.StopBuffer(dont_store=True,
                                                 dont_yield_buffer=True)

            self.leading = False
            self.newlined = False
            return
        self.newlined = False
Пример #4
0
def r(ctx: Context, token: TokenInfo):
    ctx.pop_state()

    ctx.cache.annotation = A.DECL
    ctx.push_state(State.IN_LBDX_CALL)
    token.annotation = A.DECL_LPAR
    ctx.push_op(token)
    return actions.StopBuffer()
Пример #5
0
    def _handle_token(self, token):
        if token.annotation is None:
            return

        if not self.buffering:
            if token.annotation in START_TOKENS:
                self.action = actions.StartBuffer()
                self.pattern = [token.annotation]
            return

        self.pattern.append(token.annotation)

        matched, rule, exhausted = _match_rule(self.pattern, COLLECT_BACKWARD)
        if matched and exhausted:
            comments, others = self._split_buffer()

            yield from others
            if rule.insert_at == BEFORE:
                yield from comments
                yield token
            else:
                yield token
                yield from comments

            self.action = actions.StopBuffer(dont_yield_buffer=True,
                                             dont_store=True)
            return
        elif matched:
            return

        matched, rule, exhausted = _match_rule(self.pattern, COLLECT_FORWARD)
        if matched and exhausted:
            comments, others = self._split_buffer()
            assert rule.insert_at == BEFORE

            yield from comments
            yield from others
            yield token

            self.action = actions.StopBuffer(dont_yield_buffer=True,
                                             dont_store=True)
            return
        elif matched:
            return

        self.action = actions.StopBuffer(dont_consume=True)
Пример #6
0
def r(ctx: Context, token: TokenInfo):
    token.annotation = A.CLS_HEAD_LSQB
    ctx.push_op(token)
    ctx.pop_state()
    ctx.push_state(State.IN_LBDX_CLS_HEAD)

    _annotate_clause_declarer(ctx)
    ctx.cache = None
    if ctx.is_buffering():
        return actions.StopBuffer()
Пример #7
0
    def _handle_token(self, token):
        if token.annotation is None:
            self._memorize_NL(token)
            return ()

        if token.annotation in START_TOKENS:
            if self.buffering:
                self.action = actions.StopBuffer(dont_consume=True)
                return ()
            self.action = actions.StartBuffer()
            return ()

        if (self.last_token.annotation, token.annotation) in INSERT_BETWEEN:
            self._ensure_NL_exists_in_buffer()
            self._annotate_NL_before_RSQB(token)

            self._reset()
            self.action = actions.StopBuffer()
            return ()

        self._reset()
        return ()
    def _handle_token(self, token: TokenInfo):
        if token.annotation == A.DECL_LPAR:
            self.scope_stack.append(token)
        elif token.annotation == A.DECL_RPAR:
            self.scope_stack.pop()
        elif not self.scope_stack:
            return

        if token.is_WS_NL:
            if not self.buffering:
                self.action = actions.StartBuffer()
            return

        if token.annotation in NORMALIZE_WHITESPACE_BEFORE:
            whitespace = TokenInfo(
                type=tk.WHITESPACE,
                string=NORMALIZE_WHITESPACE_BEFORE[token.annotation],
            )
            yield whitespace

            if self.buffering:
                self.action = actions.StopBuffer(dont_yield_buffer=True)
        elif self.buffering:
            self.action = actions.StopBuffer()
Пример #9
0
def r(ctx: Context, token: TokenInfo):
    ctx.pop_state()
    sentinel = TokenInfo.new_sentinel_before(token, A.STMT_END)
    ctx.push_ret(sentinel)

    token.annotation = A.STMT_COMMA
    ctx.push_ret(token)

    sentinel = TokenInfo.new_sentinel_after(token, A.STMT_START)
    ctx.push_ret(sentinel)

    if ctx.is_buffering():
        return actions.StopBuffer(dont_store=True)

    return actions.Default(dont_store=True)
Пример #10
0
def r(ctx: Context, token: TokenInfo):
    if ctx.is_buffering():
        return actions.StopBuffer(dont_consume=True)

    ctx.pop_state()
    ctx.push_state(State.IN_LBDX_CLS_BODY)
    token.annotation = A.CLS_BODY_LSQB
    ctx.push_op(token)

    _annotate_clause_declarer(ctx)
    ctx.cache = None

    ctx.push_ret(token)
    sentinel = TokenInfo.new_sentinel_after(token, A.STMT_START)
    ctx.push_ret(sentinel)

    return actions.Default(dont_store=True)
Пример #11
0
    def _handle_token(self, token: TokenInfo):
        if token.is_WS_NL_CMT or token == A.STMT_START:
            if not self.buffering:
                self.action = actions.StartBuffer()
        elif self.buffering:
            if self.stmt_start_in_buffer is not None:
                yield from self.buffer

                # collapse STMT_START and STMT_END if adjcent
                if token != A.STMT_END:
                    yield self.stmt_start_in_buffer
                    yield token
            elif token == A.STMT_END:
                yield token
                yield from self.buffer
            else:
                yield from self.buffer
                yield token
            self.stmt_start_in_buffer = None
            self.action = actions.StopBuffer(dont_yield_buffer=True,
                                             dont_store=True)
Пример #12
0
def r(ctx: Context, token: TokenInfo):
    ctx.pop_state()
    ctx.cache = None
    return actions.StopBuffer(dont_consume=True)
Пример #13
0
def r(ctx: Context, token: TokenInfo):
    ctx.pop_state()
    ctx.cache[0].annotation = A.AUGASSIGN_START
    token.annotation = A.AUGASSIGN_END
    return actions.StopBuffer()
Пример #14
0
def r(ctx: Context, token: TokenInfo):
    ctx.pop_state()
    return actions.StopBuffer()
Пример #15
0
def r(ctx: Context, token: TokenInfo):
    ctx.pop_state()
    if ctx.is_buffering():
        return actions.StopBuffer(dont_consume=True)
    return actions.Default(dont_consume=True)