예제 #1
0
파일: tokenize.py 프로젝트: hsfzxjy/lambdex
    def _handle_token(self, token: TokenInfo):
        ws_start = ws_end = None
        last_token = self.last_token
        if last_token is TokenInfo.fake or last_token.type == tk.ENCODING:
            pass
        elif last_token.is_WS_NL:  # NEWLINE or CONTINUE
            assert token.start[0] == last_token.end[0] + 1
            if token.start[1] != 0:
                ws_start = (token.start[0], 0)
                ws_end = token.start
        elif last_token.end != token.start:
            assert last_token.end[0] == token.start[0]
            ws_start = last_token.end
            ws_end = token.start

        if ws_start is not None:
            whitespace_token = TokenInfo(
                tk.WHITESPACE,
                token.line[ws_start[1]:ws_end[1]],
                ws_start,
                ws_end,
                token.line,
            )
            yield whitespace_token

        self.last_token = token
        yield token

        self.action = actions.Default(dont_store=True)
예제 #2
0
파일: Reindent.py 프로젝트: hsfzxjy/lambdex
    def _handle_token(self, token):
        if token.type == tk.ENCODING:
            return
        self._store_constant(token)

        if self.newlined:
            new_whitespace, action = self._process_leading_whitespace(token)
            if action == REPLACE:
                token = new_whitespace
            elif action == INSERT:
                yield new_whitespace

            self.last_leading_whitespace = new_whitespace.string
            self.newlined = False

        if token.annotation == A.DECL_LPAR:
            self.scopes.append(Scope(self.last_leading_whitespace))
        elif token.annotation == A.DECL_RPAR:
            self.scopes.pop()

        if token.annotation in (A.BODY_LSQB, A.CLS_BODY_LSQB):
            self.scopes[-1].indent_level += 1

        if token.annotation in (A.LAST_NL_BEFORE_RSQB, ):
            self.scopes[-1].indent_level -= 1

        if token.is_NL:
            self.newlined = True

        yield token
        self.action = actions.Default(dont_store=True)
예제 #3
0
def r(ctx: Context, token: TokenInfo):
    if ctx.pop_op()[0].annotation == A.CLS_BODY_LSQB:
        ctx.pop_state()
        ctx.push_state(State.EXPECT_SUBCLS_DOT)

        sentinel = TokenInfo.new_sentinel_before(token, A.STMT_END)
        ctx.push_ret(sentinel)
        token.annotation = A.CLS_BODY_RSQB
        ctx.push_ret(token)
        return actions.Default(dont_store=True)
예제 #4
0
def r(ctx: Context, token: TokenInfo):
    sentinel = TokenInfo.new_sentinel_before(token, A.STMT_END)
    ctx.push_ret(sentinel)

    token.annotation = A.STMT_COMMA
    ctx.push_ret(token)

    sentinel = TokenInfo.new_sentinel_after(token, A.STMT_START)
    ctx.push_ret(sentinel)

    return actions.Default(dont_store=True)
예제 #5
0
def r(ctx: Context, token: TokenInfo):

    ctx.pop_state()
    ctx.push_state(State.IN_LBDX_BODY_LIST)
    token.annotation = A.BODY_LSQB
    ctx.push_op(token)
    ctx.push_ret(token)

    sentinel = TokenInfo.new_sentinel_after(token, A.STMT_START)
    ctx.push_ret(sentinel)

    return actions.Default(dont_store=True)
예제 #6
0
def _annotate_clause_declarer(ctx: Context):
    if ctx.cache is None:
        return actions.Default()
    if not isinstance(ctx.cache, list):
        ctx.error()
    length = len(ctx.cache)
    if length == 1:
        ctx.cache[0].annotation = A.CLS_DECL
    elif length == 2:
        ctx.cache[0].annotation = A.CLS_DOT
        ctx.cache[1].annotation = A.CLS_DECL
    else:
        ctx.error()
예제 #7
0
def r(ctx: Context, token: TokenInfo):
    if ctx.last_op[0].annotation not in (A.BODY_LSQB, A.CLS_BODY_LSQB):
        return

    sentinel = TokenInfo.new_sentinel_before(token, A.STMT_END)
    ctx.push_ret(sentinel)

    token.annotation = A.STMT_COMMA
    ctx.push_ret(token)

    sentinel = TokenInfo.new_sentinel_after(token, A.STMT_START)
    ctx.push_ret(sentinel)

    return actions.Default(dont_store=True)
    def _handle_token(self, token: TokenInfo):
        if token.annotation == A.DECL_LPAR:
            self.scope_stack.append(token)
        elif token.annotation == A.DECL_RPAR:
            self.scope_stack.pop()
        elif not self.scope_stack:
            return

        if token.is_NL:
            self.newlined = True
            return

        if token.is_WS:
            if self.newlined:
                self.leading = True

            if not self.buffering:
                self.action = actions.StartBuffer()
            self.newlined = False

            return

        if not token.is_CMT:
            if self.buffering:
                self.action = actions.StopBuffer()
            self.leading = False
            self.newlined = False

            return

        if not self.buffering and not self.newlined:
            yield TokenInfo(tk.WHITESPACE, "  ")
            yield token
            self.action = actions.Default(dont_store=True)
            return

        if self.buffering:
            if any("\\" in x.string for x in self.buffer) or self.leading:
                self.action = actions.StopBuffer()
            else:
                yield TokenInfo(tk.WHITESPACE, "  ")
                yield token
                self.action = actions.StopBuffer(dont_store=True,
                                                 dont_yield_buffer=True)

            self.leading = False
            self.newlined = False
            return
        self.newlined = False
예제 #9
0
def r(ctx: Context, token: TokenInfo):
    ctx.pop_state()
    sentinel = TokenInfo.new_sentinel_before(token, A.STMT_END)
    ctx.push_ret(sentinel)

    token.annotation = A.STMT_COMMA
    ctx.push_ret(token)

    sentinel = TokenInfo.new_sentinel_after(token, A.STMT_START)
    ctx.push_ret(sentinel)

    if ctx.is_buffering():
        return actions.StopBuffer(dont_store=True)

    return actions.Default(dont_store=True)
예제 #10
0
def r(ctx: Context, token: TokenInfo):
    if ctx.is_buffering():
        return actions.StopBuffer(dont_consume=True)

    ctx.pop_state()
    ctx.push_state(State.IN_LBDX_CLS_BODY)
    token.annotation = A.CLS_BODY_LSQB
    ctx.push_op(token)

    _annotate_clause_declarer(ctx)
    ctx.cache = None

    ctx.push_ret(token)
    sentinel = TokenInfo.new_sentinel_after(token, A.STMT_START)
    ctx.push_ret(sentinel)

    return actions.Default(dont_store=True)
예제 #11
0
 def _handle_token(self, token):
     if self.last_token.annotation in SUPPRESS_WHITESPACE_AFTER and token.is_WS_NL:
         self.action = actions.Default(dont_store=True)
     return ()
예제 #12
0
def r(ctx: Context, token: TokenInfo):
    ctx.pop_state()
    if ctx.is_buffering():
        return actions.StopBuffer(dont_consume=True)
    return actions.Default(dont_consume=True)
예제 #13
0
 def _handle_token(self, token: TokenInfo):
     if token.annotation in ANNOTATIONS_TO_DROP:
         self.action = actions.Default(dont_store=True)
     return ()