def _handle_token(self, token: TokenInfo): if token.annotation == A.STMT_END and not self.buffering: self.action = actions.StartBuffer() elif self.buffering and token.annotation in ( A.BODY_RSQB, A.CLS_BODY_RSQB, A.STMT_START, ): if token.annotation != A.STMT_START: pos = self.insert_last_stmt_at if pos is None: annotation = A.LAST_STMT_WITHOUT_COMMA else: annotation = A.LAST_STMT_WITH_COMMA pos = pos or 1 yield from self.buffer[:pos] yield TokenInfo.new_sentinel_after(self.buffer[pos - 1], annotation) yield from self.buffer[pos:] else: yield from self.buffer self.insert_last_stmt_at = None self.action = actions.StopBuffer(dont_yield_buffer=True) elif self.buffering and token == A.STMT_START: self.insert_last_stmt_at = None self.action = actions.StopBuffer()
def _handle_token(self, token: TokenInfo): if token.annotation == A.DECL_LPAR: self.scope_stack.append(token) elif token.annotation == A.DECL_RPAR: self.scope_stack.pop() elif not self.scope_stack: return if token.is_WS_NL: if not self.buffering: self.action = actions.StartBuffer() return if token.annotation in NORMALIZE_WHITESPACE_BEFORE: replacement = NORMALIZE_WHITESPACE_BEFORE[token.annotation] if (self.prev_non_ws_token.annotation != A.CLS_HEAD_RSQB and token.annotation == A.CLS_BODY_LSQB): replacement = "" whitespace = TokenInfo( type=tk.WHITESPACE, string=replacement, ) yield whitespace if self.buffering: self.action = actions.StopBuffer(dont_yield_buffer=True) elif self.buffering: self.action = actions.StopBuffer() self.prev_non_ws_token = token
def r(ctx: Context, token: TokenInfo): if not ctx.last_op[0].annotation in (A.CLS_BODY_LSQB, A.BODY_LSQB): return ctx.cache = [token] ctx.push_state(State.EXPECT_AUGASSIGN_DASH) return actions.StartBuffer()
def r(ctx: Context, token: TokenInfo): last_state = ctx.pop_state() new_state = { State.EXPECT_SUBCLS_DOT: State.EXPECT_SUBCLS_NAME, State.MUST_SUBCLS_DOT_WITH_HEAD: State.MUST_SUBCLS_NAME_WITH_HEAD, State.MUST_SUBCLS_DOT_WITH_BODY: State.MUST_SUBCLS_NAME_WITH_BODY, }[last_state] ctx.push_state(new_state) ctx.cache = [token] return actions.StartBuffer()
def _handle_token(self, token: TokenInfo): if token.annotation == A.DECL_LPAR: self.scope_stack.append(token) elif token.annotation == A.DECL_RPAR: self.scope_stack.pop() elif not self.scope_stack: return if token.is_NL: self.newlined = True return if token.is_WS: if self.newlined: self.leading = True if not self.buffering: self.action = actions.StartBuffer() self.newlined = False return if not token.is_CMT: if self.buffering: self.action = actions.StopBuffer() self.leading = False self.newlined = False return if not self.buffering and not self.newlined: yield TokenInfo(tk.WHITESPACE, " ") yield token self.action = actions.Default(dont_store=True) return if self.buffering: if any("\\" in x.string for x in self.buffer) or self.leading: self.action = actions.StopBuffer() else: yield TokenInfo(tk.WHITESPACE, " ") yield token self.action = actions.StopBuffer(dont_store=True, dont_yield_buffer=True) self.leading = False self.newlined = False return self.newlined = False
def _handle_token(self, token): if token.annotation is None: return if not self.buffering: if token.annotation in START_TOKENS: self.action = actions.StartBuffer() self.pattern = [token.annotation] return self.pattern.append(token.annotation) matched, rule, exhausted = _match_rule(self.pattern, COLLECT_BACKWARD) if matched and exhausted: comments, others = self._split_buffer() yield from others if rule.insert_at == BEFORE: yield from comments yield token else: yield token yield from comments self.action = actions.StopBuffer(dont_yield_buffer=True, dont_store=True) return elif matched: return matched, rule, exhausted = _match_rule(self.pattern, COLLECT_FORWARD) if matched and exhausted: comments, others = self._split_buffer() assert rule.insert_at == BEFORE yield from comments yield from others yield token self.action = actions.StopBuffer(dont_yield_buffer=True, dont_store=True) return elif matched: return self.action = actions.StopBuffer(dont_consume=True)
def _handle_token(self, token: TokenInfo): if token.is_WS_NL_CMT or token == A.STMT_START: if not self.buffering: self.action = actions.StartBuffer() elif self.buffering: if self.stmt_start_in_buffer is not None: yield from self.buffer # collapse STMT_START and STMT_END if adjcent if token != A.STMT_END: yield self.stmt_start_in_buffer yield token elif token == A.STMT_END: yield token yield from self.buffer else: yield from self.buffer yield token self.stmt_start_in_buffer = None self.action = actions.StopBuffer(dont_yield_buffer=True, dont_store=True)
def _handle_token(self, token): if token.annotation is None: self._memorize_NL(token) return () if token.annotation in START_TOKENS: if self.buffering: self.action = actions.StopBuffer(dont_consume=True) return () self.action = actions.StartBuffer() return () if (self.last_token.annotation, token.annotation) in INSERT_BETWEEN: self._ensure_NL_exists_in_buffer() self._annotate_NL_before_RSQB(token) self._reset() self.action = actions.StopBuffer() return () self._reset() return ()
def _handle_token(self, token: TokenInfo): if token.annotation == A.DECL_LPAR: self.scope_stack.append(token) elif token.annotation == A.DECL_RPAR: self.scope_stack.pop() elif not self.scope_stack: return if token.is_WS_NL: if not self.buffering: self.action = actions.StartBuffer() return if token.annotation in NORMALIZE_WHITESPACE_BEFORE: whitespace = TokenInfo( type=tk.WHITESPACE, string=NORMALIZE_WHITESPACE_BEFORE[token.annotation], ) yield whitespace if self.buffering: self.action = actions.StopBuffer(dont_yield_buffer=True) elif self.buffering: self.action = actions.StopBuffer()
def r(ctx: Context, token: TokenInfo): ctx.push_state(State.EXPECT_LBDX_LPAR) ctx.cache = token return actions.StartBuffer()
def r(ctx: Context, token: TokenInfo): ctx.push_state(State.EXPECT_CLS_BODY_LSQB) ctx.cache = [token] return actions.StartBuffer()