예제 #1
0
파일: output_util.py 프로젝트: rcoup/sno
def dump_json_output(output, output_path, json_style="pretty"):
    """
    Dumps the output to JSON in the output file.
    """
    fp = resolve_output_path(output_path)

    highlit = json_style == "pretty" and fp == sys.stdout and fp.isatty()
    json_encoder = ExtendedJsonEncoder(**JSON_PARAMS[json_style])
    if highlit:
        ex_json_lexer = ExtendedJsonLexer()
        # The LexerContext stores the state of the lexer after each call to get_tokens_unprocessed
        lexer_context = LexerContext("", 0)

        for chunk in json_encoder.iterencode(output):
            lexer_context.text = chunk
            lexer_context.pos = 0
            lexer_context.end = len(chunk)
            token_generator = (
                (token_type, value)
                for (index, token_type,
                     value) in ex_json_lexer.get_tokens_unprocessed(
                         context=lexer_context))
            fp.write(pygments.format(token_generator,
                                     get_terminal_formatter()))

    else:
        for chunk in json_encoder.iterencode(output):
            fp.write(chunk)
    fp.write("\n")
예제 #2
0
 def intp_string_callback(self, match, ctx):
     yield match.start(1), String.Other, match.group(1)
     nctx = LexerContext(match.group(3), 0, ['interpolated-string'])
     for i, t, v in self.get_tokens_unprocessed(context=nctx):
         yield match.start(3) + i, t, v
     yield match.start(4), String.Other, match.group(4)  # end
     ctx.pos = match.end()
예제 #3
0
파일: ruby.py 프로젝트: zoulianmp/PythonQt
 def intp_regex_callback(self, match, ctx):
     yield match.start(1), String.Regex, match.group(1)  # begin
     nctx = LexerContext(match.group(3), 0, ['interpolated-regex'])
     for i, t, v in self.get_tokens_unprocessed(context=nctx):
         yield match.start(3)+i, t, v
     yield match.start(4), String.Regex, match.group(4)  # end[mixounse]*
     ctx.pos = match.end()
예제 #4
0
    def get_tokens_unprocessed(self, text, stack=('root', )):
        """
        Split ``text`` into (tokentype, text) pairs.

        ``stack`` is the inital stack (default: ``['root']``)
        """
        tokendefs = self._tokens
        self.ctx = ctx = LexerContext(text, 0)
        ctx.stack = list(stack)
        statetokens = tokendefs[ctx.stack[-1]]
        while 1:
            for rexmatch, action, new_state in statetokens:
                self.m = m = rexmatch(text, ctx.pos, ctx.end)
                if m:
                    if action is not None:
                        if type(action) is _TokenType:
                            yield ctx.pos, action, m.group()
                            ctx.pos = m.end()
                        else:
                            if not isinstance(self, ExtendedRegexLexer):
                                for item in action(self, m):
                                    yield item
                                ctx.pos = m.end()
                            else:
                                for item in action(self, m, ctx):
                                    yield item
                                if not new_state:
                                    # altered the state stack?
                                    statetokens = tokendefs[ctx.stack[-1]]
                    if new_state is not None:
                        # state transition
                        if isinstance(new_state, tuple):
                            for state in new_state:
                                if state == '#pop':
                                    ctx.stack.pop()
                                elif state == '#push':
                                    ctx.stack.append(ctx.stack[-1])
                                else:
                                    ctx.stack.append(state)
                        elif isinstance(new_state, int):
                            # pop
                            del ctx.stack[new_state:]
                        elif new_state == '#push':
                            ctx.stack.append(ctx.stack[-1])
                        else:
                            assert False, 'wrong state def: %r' % new_state
                        statetokens = tokendefs[ctx.stack[-1]]
                    break
            else:
                try:
                    if ctx.pos >= ctx.end:
                        break
                    if text[ctx.pos] == '\n':
                        # at EOL, reset state to 'root'
                        ctx.stack = ['root']
                        statetokens = tokendefs['root']
                        yield ctx.pos, Text, u'\n'
                        ctx.pos += 1
                        continue
                    yield ctx.pos, Error, text[ctx.pos]
                    ctx.pos += 1
                except IndexError:
                    break
예제 #5
0
 def get_tokens_unprocessed(self, text=None, context=None):
     ctx = LexerContext(text, 0)
     ctx.indent = 0
     return ExtendedRegexLexer.get_tokens_unprocessed(self,
                                                      text,
                                                      context=ctx)
예제 #6
0
 def get_tokens_unprocessed(self, text=None, context=None):
     ctx = LexerContext(text, 0)
     ctx.indent = 0
     return ExtendedRegexLexer.get_tokens_unprocessed(self, text, context=ctx)