コード例 #1
0
ファイル: memory.py プロジェクト: huayl/voltron
    def render(self, results):
        target = None
        self.trunc_top = self.args.reverse

        t_res, m_res = results

        if t_res and t_res.is_success and len(t_res.targets) > 0:
            target = t_res.targets[0]

            if self.args.deref or self.args.words:
                self.args.bytes = target['addr_size']

            f = pygments.formatters.get_formatter_by_name(self.config.format.pygments_formatter,
                                                          style=self.config.format.pygments_style)

            if m_res and m_res.is_success:
                lines = pygments.format(self.generate_tokens(results), f).split('\n')
                self.body = '\n'.join(reversed(lines)).strip() if self.args.reverse else '\n'.join(lines)
                self.info = '[0x{0:0=4x}:'.format(len(m_res.memory)) + self.config.format.addr_format.format(m_res.address) + ']'
            else:
                log.error("Error reading memory: {}".format(m_res.message))
                self.body = pygments.format([(Error, m_res.message)], f)
                self.info = ''

            # Store the memory
            if self.args.track:
                self.last_address = m_res.address
                self.last_memory = m_res.memory
        else:
            self.body = self.colour("Failed to get targets", 'red')

        if not self.title:
            self.title = "[memory]"

        super(MemoryView, self).render(results)
コード例 #2
0
ファイル: test_basic_api.py プロジェクト: erickt/pygments
    def test_unicode_handling(self):
        # test that the formatter supports encoding and Unicode
        tokens = list(lexers.PythonLexer(encoding='utf-8').
                      get_tokens("def f(): 'ä'"))
        for formatter, info in formatters.FORMATTERS.iteritems():
            try:
                inst = formatter(encoding=None)
            except (ImportError, FontNotFound):
                # some dependency or font not installed
                continue

            if formatter.name != 'Raw tokens':
                out = format(tokens, inst)
                if formatter.unicodeoutput:
                    self.assert_(type(out) is unicode)

                inst = formatter(encoding='utf-8')
                out = format(tokens, inst)
                self.assert_(type(out) is bytes, '%s: %r' % (formatter, out))
                # Cannot test for encoding, since formatters may have to escape
                # non-ASCII characters.
            else:
                inst = formatter()
                out = format(tokens, inst)
                self.assert_(type(out) is bytes, '%s: %r' % (formatter, out))
コード例 #3
0
ファイル: register.py プロジェクト: Stenean/voltron
 def format(tok, tik=None):
     if tik:
         tok = (tok, tik)
     if isinstance(tok, tuple):
         return pygments.format([tok], formatter)
     else:
         return pygments.format(tok, formatter)
コード例 #4
0
ファイル: test_basic_api.py プロジェクト: Oire/gobyexample
def test_bare_class_handler():
    from pygments.formatters import HtmlFormatter
    from pygments.lexers import PythonLexer
    try:
        lex('test\n', PythonLexer)
    except TypeError as e:
        assert 'lex() argument must be a lexer instance' in str(e)
    else:
        assert False, 'nothing raised'
    try:
        format([], HtmlFormatter)
    except TypeError as e:
        assert 'format() argument must be a formatter instance' in str(e)
    else:
        assert False, 'nothing raised'
コード例 #5
0
    def test_unicode_handling(self):
        # test that the formatter supports encoding and Unicode
        tokens = list(lexers.PythonLexer(encoding='utf-8').get_tokens("def f(): 'ä'"))
        for formatter, info in formatters.FORMATTERS.iteritems():
            try:
                inst = formatter(encoding=None)
            except (ImportError, FontNotFound):
                # some dependency or font not installed
                continue
            out = format(tokens, inst)
            if formatter.unicodeoutput:
                self.assert_(type(out) is unicode)

            inst = formatter(encoding='utf-8')
            out = format(tokens, inst)
            self.assert_(type(out) is str)
コード例 #6
0
ファイル: repl.py プロジェクト: dmlicht/scottwasright
    def push(self, line):
        """Push a line of code onto the buffer, run the buffer

        If the interpreter successfully runs the code, clear the buffer
        Return ("for stdout", "for_stderr", finished?)
        """
        self.buffer.append(line)
        indent = len(re.match(r'[ ]*', line).group())

        if line.endswith(':'):
            indent = max(0, indent + INDENT_AMOUNT)
        elif line and line.count(' ') == len(self._current_line):
            indent = max(0, indent - INDENT_AMOUNT)
        elif line and ':' not in line and line.strip().startswith(('return', 'pass', 'raise', 'yield')):
            indent = max(0, indent - INDENT_AMOUNT)
        out_spot = sys.stdout.tell()
        err_spot = sys.stderr.tell()
        #logging.debug('running %r in interpreter', self.buffer)
        unfinished = self.interp.runsource('\n'.join(self.buffer))
        self.display_buffer.append(bpythonparse(format(self.tokenize(line), self.formatter))) #current line not added to display buffer if quitting
        sys.stdout.seek(out_spot)
        sys.stderr.seek(err_spot)
        out = sys.stdout.read()
        err = sys.stderr.read()
        if unfinished and not err:
            logging.debug('unfinished - line added to buffer')
            return (None, None, False, indent)
        else:
            logging.debug('finished - buffer cleared')
            self.display_lines.extend(self.display_buffer_lines)
            self.display_buffer = []
            self.buffer = []
            if err:
                indent = 0
            return (out[:-1], err[:-1], True, indent)
コード例 #7
0
ファイル: bpythonparse.py プロジェクト: 0x0all/curtsies
def string_to_fmtstr(x):
    from pygments import format
    from bpython.formatter import BPythonFormatter
    from bpython._py3compat import PythonLexer
    from bpython.config import Struct, loadini, default_config_path
    config = Struct()
    loadini(config, default_config_path())
    return parse(format(PythonLexer().get_tokens(x), BPythonFormatter(config.color_scheme)))
コード例 #8
0
ファイル: highlighter.py プロジェクト: TokinT-Mac/Jot
	def highlightBlock(self, text):
		"""Takes a block, applies format to the document. 
		according to what's in it.
		"""
		
		# I need to know where in the document we are,
		# because our formatting info is global to
		# the document
		cb = self.currentBlock()
		p = cb.position()
		'''print cb
		print p
		print cb.text()'''
		blockText = unicode(cb.text())+'\n'
		
		# The \n is not really needed, but sometimes  
		# you are in an empty last block, so your position is
		# **after** the end of the document.
		
		text=unicode(self.document().toPlainText())+'\n'
		
		# Yes, re-highlight the whole document.
		# There **must** be some optimization possibilities
		# but it seems fast enough.
		
		#highlight(blockText,self.lexer,self.formatter)
		tokens = pygments.lex(blockText, self.lexer)
		self.docTokens[cb.blockNumber()] = tokens
		pygments.format(tokens, self.formatter)
		data = self.formatter.getData()
		
		
		# Just apply the formatting to this block.
		# For titles, it may be necessary to backtrack
		# and format a couple of blocks **earlier**.
		for i in range(len(unicode(blockText))):
			try:
				self.setFormat(i,1,data[i])
			except IndexError:
				pass
		
		# I may need to do something about this being called
		# too quickly.
		self.tstamp=time.time() 
コード例 #9
0
ファイル: output.py プロジェクト: 0xcharly/pycr
    def format(cls, tokens):
        """Format the given list of tokens

        :param tokens: the input list of token to format
        :type tokens: tuple[Token, str]
        :rtype: str
        """

        cls.__initialize()
        return pygments.format(tokens, cls.formatter)
コード例 #10
0
def test_formatter_encodings():
    from pygments.formatters import HtmlFormatter

    # unicode output
    fmt = HtmlFormatter()
    tokens = [(Text, u"ä")]
    out = format(tokens, fmt)
    assert type(out) is text_type
    assert u"ä" in out

    # encoding option
    fmt = HtmlFormatter(encoding="latin1")
    tokens = [(Text, u"ä")]
    assert u"ä".encode("latin1") in format(tokens, fmt)

    # encoding and outencoding option
    fmt = HtmlFormatter(encoding="latin1", outencoding="utf8")
    tokens = [(Text, u"ä")]
    assert u"ä".encode("utf8") in format(tokens, fmt)
コード例 #11
0
ファイル: test_basic_api.py プロジェクト: erickt/pygments
    def test_encodings(self):
        from pygments.formatters import HtmlFormatter

        # unicode output
        fmt = HtmlFormatter()
        tokens = [(Text, u"ä")]
        out = format(tokens, fmt)
        self.assert_(type(out) is unicode)
        self.assert_(u"ä" in out)

        # encoding option
        fmt = HtmlFormatter(encoding="latin1")
        tokens = [(Text, u"ä")]
        self.assert_(u"ä".encode("latin1") in format(tokens, fmt))

        # encoding and outencoding option
        fmt = HtmlFormatter(encoding="latin1", outencoding="utf8")
        tokens = [(Text, u"ä")]
        self.assert_(u"ä".encode("utf8") in format(tokens, fmt))
コード例 #12
0
def test_formatter_encodings():
    from pygments.formatters import HtmlFormatter

    # unicode output
    fmt = HtmlFormatter()
    tokens = [(Text, u"ä")]
    out = format(tokens, fmt)
    assert type(out) is unicode
    assert u"ä" in out

    # encoding option
    fmt = HtmlFormatter(encoding="latin1")
    tokens = [(Text, u"ä")]
    assert u"ä".encode("latin1") in format(tokens, fmt)

    # encoding and outencoding option
    fmt = HtmlFormatter(encoding="latin1", outencoding="utf8")
    tokens = [(Text, u"ä")]
    assert u"ä".encode("utf8") in format(tokens, fmt)
コード例 #13
0
ファイル: output.py プロジェクト: 0xcharly/pycr
    def raw_format(cls, tokens):
        """Format the given list of tokens as a simple string (no color)

        :param tokens: the input list of token to format
        :type tokens: tuple[Token, str]
        :rtype: str
        """

        formatter = get_formatter_by_name(Formatter.NO_COLOR, encoding=Formatter.ENCODING)
        return pygments.format(tokens, formatter)
コード例 #14
0
ファイル: readline_shell.py プロジェクト: PeterHancock/xonsh
 def print_color(self, string, hide=False, **kwargs):
     if isinstance(string, str):
         s = self.format_color(string, hide=hide)
     else:
         # assume this is a list of (Token, str) tuples and format it
         env = builtins.__xonsh_env__
         self.styler.style_name = env.get('XONSH_COLOR_STYLE')
         style_proxy = pyghooks.xonsh_style_proxy(self.styler)
         formatter = Terminal256Formatter(style=style_proxy)
         s = pygments.format(string, formatter).rstrip()
     print(s, **kwargs)
コード例 #15
0
 def print_color(self, string, hide=False, **kwargs):
     if isinstance(string, str):
         s = self.format_color(string, hide=hide)
     else:
         # assume this is a list of (Token, str) tuples and format it
         env = builtins.__xonsh_env__
         self.styler.style_name = env.get('XONSH_COLOR_STYLE')
         style_proxy = pyghooks.xonsh_style_proxy(self.styler)
         formatter = Terminal256Formatter(style=style_proxy)
         s = pygments.format(string, formatter).rstrip()
     print(s, **kwargs)
コード例 #16
0
ファイル: output.py プロジェクト: sjl421/pycr
    def raw_format(cls, tokens):
        """Format the given list of tokens as a simple string (no color)

        :param tokens: the input list of token to format
        :type tokens: tuple[Token, str]
        :rtype: str
        """

        formatter = get_formatter_by_name(Formatter.NO_COLOR,
                                          encoding=Formatter.ENCODING)
        return pygments.format(tokens, formatter)
コード例 #17
0
    def terminal(self):
        """
        Return a Terminal-friendly (with ANSI color sequences) representation of the snippet.
        """
        formatter = TerminalFormatter(
            linenos=True,
            colorscheme=None,
            linenostart=self._start_line,
        )

        return pygments.format(self.src_tokens(), formatter)
コード例 #18
0
    def html(self):
        """
        Return an HTML representation of the snippet.
        """
        formatter = HtmlFormatter(cssclass=self.DIV_CSS_CLASS,
                                  linenos=True,
                                  linenostart=self._start_line,
                                  hl_lines=self._shift_lines(
                                      self._violation_lines, self._start_line),
                                  lineanchors=self._src_filename)

        return pygments.format(self.src_tokens(), formatter)
コード例 #19
0
ファイル: repl.py プロジェクト: NaveenPrasanth/testingrepo
 def current_line_formatted(self):
     """The colored current line (no prompt, not wrapped)"""
     if self.config.syntax:
         fs = bpythonparse(format(self.tokenize(self._current_line), self.formatter))
         logging.debug('Display line %r -> %r', self._current_line, fs)
     else:
         fs = fmtstr(self._current_line)
     if hasattr(self, 'old_fs') and str(fs) != str(self.old_fs):
         pass
         #logging.debug('calculating current formatted line: %r', repr(fs))
     self.old_fs = fs
     return fs
コード例 #20
0
def extract_algorithms(ps_infile, env):
    """
    Вытаскиваем части кода-алгоритмы
    """
    import pygments
    from pygments.lexers import get_lexer_by_name
    from pygments.formatters import LatexFormatter
    
    algorithm_regexp = re.compile(
        r"(?ms)\#ALGORITHM\s+(?P<name>[a-zA-z-0-9]+)\s*(?P<code>.+?)\s*\#ENDALGORITHM")
    hideline_regexps = [re.compile(r"(?m)^.*\#hide *\n"), re.compile(r"(?m)\n.*\#hide *") ]
    ls = ut.file2string(ps_infile) 
    for algorithm in algorithm_regexp.finditer(ls):
        algfilename = lib.get_target(ps_infile, algorithm.group('name')+".py")
        texfilename = lib.get_target(ps_infile, algorithm.group('name')+".tex")
        #lf = open(algfilename, 'w')
        code = algorithm.group('code')
        for r in hideline_regexps:
            code = re.sub(r, "", code)
        #code = lib.utf8anyway(code)    
        #lf.write(code)
        #lf.close()
        
        #tempblock = os.path.join(tempfile.gettempdir(), tempfile.gettempprefix())
        #ls = ''.join([env.project_db['paths']['python'],
        #             r'\scripts\pygmentize -f latex -l python ',
        #             ' -o "%(tempblock)s" "%(algfilename)s" ' % vars() ])
        #os.system(ls)
        
        lexer = get_lexer_by_name('python')
        code = ut.unicodeanyway(code)
        latex_tokens = pygments.lex(code, lexer)
        
#        sblock = ut.file2string(tempblock)
#        from pygments.formatters import LatexFormatter
        latex_formatter = LatexFormatter(texcomments = True)
        latex = pygments.format(latex_tokens, latex_formatter)
        stexblock = r"""
\documentclass{minimal}
\usepackage{xecyr}
\XeTeXdefaultencoding "utf-8"
\XeTeXinputencoding "utf-8"
\defaultfontfeatures{Mapping=tex-text}
\setmonofont{Consolas}
\usepackage{color}
\usepackage{fancyvrb}
\usepackage[russian,english]{babel} 
        """ + latex_formatter.get_style_defs() + r"""
\begin{document}
        """ + latex + r"""
\end{document}
        """
        ut.string2file(stexblock, texfilename, encoding='utf-8')
コード例 #21
0
    def verify(formatter):
        try:
            inst = formatter(encoding=None)
        except (ImportError, FontNotFound):
            # some dependency or font not installed
            raise support.SkipTest

        if formatter.name != 'Raw tokens':
            out = format(tokens, inst)
            if formatter.unicodeoutput:
                assert type(out) is text_type, '%s: %r' % (formatter, out)

            inst = formatter(encoding='utf-8')
            out = format(tokens, inst)
            assert type(out) is bytes, '%s: %r' % (formatter, out)
            # Cannot test for encoding, since formatters may have to escape
            # non-ASCII characters.
        else:
            inst = formatter()
            out = format(tokens, inst)
            assert type(out) is bytes, '%s: %r' % (formatter, out)
コード例 #22
0
    def verify(formatter):
        try:
            inst = formatter(encoding=None)
        except (ImportError, FontNotFound):
            # some dependency or font not installed
            raise support.SkipTest

        if formatter.name != 'Raw tokens':
            out = format(tokens, inst)
            if formatter.unicodeoutput:
                assert type(out) is text_type, '%s: %r' % (formatter, out)

            inst = formatter(encoding='utf-8')
            out = format(tokens, inst)
            assert type(out) is bytes, '%s: %r' % (formatter, out)
            # Cannot test for encoding, since formatters may have to escape
            # non-ASCII characters.
        else:
            inst = formatter()
            out = format(tokens, inst)
            assert type(out) is bytes, '%s: %r' % (formatter, out)
コード例 #23
0
    def verify(formatter):
        try:
            inst = formatter(encoding=None)
        except (ImportError, FontNotFound):
            # some dependency or font not installed
            return

        if formatter.name != "Raw tokens":
            out = format(tokens, inst)
            if formatter.unicodeoutput:
                assert type(out) is unicode

            inst = formatter(encoding="utf-8")
            out = format(tokens, inst)
            assert type(out) is bytes, "%s: %r" % (formatter, out)
            # Cannot test for encoding, since formatters may have to escape
            # non-ASCII characters.
        else:
            inst = formatter()
            out = format(tokens, inst)
            assert type(out) is bytes, "%s: %r" % (formatter, out)
コード例 #24
0
    def log(self, tokens, stream=sys.stdout):
        """Log the input token stream with the standard Python logging
        mecanism.

        PARAMETERS
            log_fn: the logging function to use
            tokens: the input tokens stream
        """

        assert self.formatter is not None, 'Internal error'
        print >> stream, pygments.format(tokens, self.formatter)
        stream.flush()
コード例 #25
0
ファイル: output_util.py プロジェクト: koordinates/kart
def format_wkt_for_output(output, fp=None, syntax_highlight=True):
    """
    Formats WKT whitespace for readability.
    Adds syntax highlighting if fp is a terminal and syntax_highlight=True.
    Doesn't print the formatted WKT to fp, just returns it.
    """
    token_iter = WKTLexer().get_tokens(output, pretty_print=True)
    if syntax_highlight and can_output_colour(fp):
        return pygments.format(token_iter, get_terminal_formatter())
    else:
        token_value = (value for token_type, value in token_iter)
        return "".join(token_value)
コード例 #26
0
ファイル: repl.py プロジェクト: dmlicht/scottwasright
 def unhighlight_paren(self):
     """set self.display_buffer after """
     if self.highlighted_paren is not None:
         lineno, saved_tokens = self.highlighted_paren
         if lineno == len(self.display_buffer):
             # then this is the current line, so don't worry about it
             return
         self.highlighted_paren = None
         logging.debug('trying to unhighlight a paren on line %r', lineno)
         logging.debug('with these tokens: %r', saved_tokens)
         new = bpythonparse(format(saved_tokens, self.formatter))
         self.display_buffer[lineno][:len(new)] = new
コード例 #27
0
    def print_result(self,
                     result,
                     prompt: bool,
                     output_style="",
                     strict_wl_output=False):
        if result is None:
            # FIXME decide what to do here
            return

        last_eval = result.last_eval

        if last_eval is not None:
            try:
                eval_type = last_eval.get_head_name()
            except:
                print(sys.exc_info()[1])
                return

            out_str = str(result.result)
            use_highlight = True
            if eval_type == "System`String":
                if strict_wl_output:  # exact-wl-compatibility
                    out_str = (format([(MToken.STRING, out_str.rstrip())],
                                      self.terminal_formatter) + "\n")
                    use_highlight = False
                else:
                    out_str = '"' + out_str.replace('"', r"\"") + '"'
            if eval_type == "System`Graph":
                out_str = "-Graph-"
            elif self.terminal_formatter:  # pygmentize
                show_pygments_tokens = self.definitions.get_ownvalue(
                    "Settings`$PygmentsShowTokens").replace.to_python()
                pygments_style = self.definitions.get_ownvalue(
                    "Settings`$PygmentsStyle").replace.get_string_value()
                if pygments_style != self.pygments_style:
                    if not self.change_pygments_style(pygments_style):
                        self.definitions.set_ownvalue(
                            "Settings`$PygmentsStyle",
                            String(self.pygments_style))

                if show_pygments_tokens:
                    print(list(lex(out_str, mma_lexer)))
                if use_highlight:
                    out_str = highlight(out_str, mma_lexer,
                                        self.terminal_formatter)
            output = self.to_output(out_str)
            if output_style == "text" or not prompt:
                print(output)
            elif self.session:
                print_formatted_text(self.get_out_prompt(), end="")
                print(output + "\n")
            else:
                print(self.get_out_prompt() + output + "\n")
コード例 #28
0
ファイル: memory.py プロジェクト: zengchunyun/voltron
    def render(self, results):
        target = None
        self.trunc_top = self.args.reverse

        t_res, m_res = results

        if t_res and t_res.is_success and len(t_res.targets) > 0:
            target = t_res.targets[0]

            if self.args.deref or self.args.words:
                self.args.bytes = target['addr_size']

            f = pygments.formatters.get_formatter_by_name(
                self.config.format.pygments_formatter,
                style=self.config.format.pygments_style)

            if m_res and m_res.is_success:
                lines = pygments.format(self.generate_tokens(results),
                                        f).split('\n')
                self.body = '\n'.join(reversed(
                    lines)).strip() if self.args.reverse else '\n'.join(lines)
                self.info = '[0x{0:0=4x}:'.format(len(
                    m_res.memory)) + self.config.format.addr_format.format(
                        m_res.address) + ']'
            else:
                log.error("Error reading memory: {}".format(m_res.message))
                self.body = pygments.format([(Error, m_res.message)], f)
                self.info = ''

            # Store the memory
            if self.args.track:
                self.last_address = m_res.address
                self.last_memory = m_res.memory
        else:
            self.body = self.colour("Failed to get targets", 'red')

        if not self.title:
            self.title = "[memory]"

        super(MemoryView, self).render(results)
コード例 #29
0
        def insert_formatted_code(tb,
                                  language,
                                  code,
                                  mark=None,
                                  line_break=False):
            lexer = None

            if language is None:
                log.info(
                    "No Language specified. Falling back to default lexer: %s.",
                    str(self.config['default_lexer']))
                lexer = get_lexer(self.config['default_lexer'])
            else:
                log.debug("Using lexer for %s.", str(language))
                lexer = get_lexer_with_fallback(language,
                                                self.config['default_lexer'])

            if lexer is None:
                it = tb.get_iter_at_mark(mark)
                tb.insert(it, '\n')
            else:
                tokens = pygments.lex(code, lexer)

                if line_break:
                    log.debug("Inserting newline before code.")
                    it = tb.get_iter_at_mark(mark)
                    tb.insert(it, '\n')
                    it.forward_char()
                    tb.move_mark(mark, it)

                formatter = GTKFormatter(start_mark=mark)
                pygments.format(tokens, formatter, tb)

                endmark = formatter.get_last_mark()
                if line_break and not endmark is None:
                    it = tb.get_iter_at_mark(endmark)
                    tb.insert(it, '\n')
                    log.debug("Inserting newline after code.")

            return tb
コード例 #30
0
ファイル: test_basic_api.py プロジェクト: amitkummer/pygments
def test_bare_class_handler():
    from pygments.formatters import HtmlFormatter
    from pygments.lexers import PythonLexer
    try:
        lex('test\n', PythonLexer)
    except TypeError as e:
        assert 'lex() argument must be a lexer instance' in str(e)
    else:
        assert False, 'nothing raised'
    try:
        format([], HtmlFormatter)
    except TypeError as e:
        assert 'format() argument must be a formatter instance' in str(e)
    else:
        assert False, 'nothing raised'

    # These cases should not trigger this heuristic.
    class BuggyLexer(RegexLexer):
        def get_tokens(self, text, extra_argument):
            pass

        tokens = {'root': []}

    try:
        list(lex('dummy', BuggyLexer()))
    except TypeError as e:
        assert 'lex() argument must be a lexer instance' not in str(e)
    else:
        assert False, 'no error raised by buggy lexer?'

    class BuggyFormatter(Formatter):
        def format(self, tokensource, outfile, extra_argument):
            pass

    try:
        format([], BuggyFormatter())
    except TypeError as e:
        assert 'format() argument must be a formatter instance' not in str(e)
    else:
        assert False, 'no error raised by buggy formatter?'
コード例 #31
0
ファイル: cover.py プロジェクト: adamhaney/djangodevtools
    def write_module_coverage_file(self, app, moduleName, sourceFileName, num_of_lines, not_covered_lines = [], excluded_lines = []):
        ''' Set and writes the coverage report '''
        from django.template import Context
        from django.template.loader import get_template
        #Decode a file
        fo = codecs.open(sourceFileName, 'rb', "utf-8")
        try:
            source = fo.read()
        finally:
            fo.close()

        try:
            offset = 0
            lines = source.split("\n")
            while lines[ offset ] == "":
                offset += 1
                if offset > 0:
                    not_covered_lines = [x - 1 for x in not_covered_lines]
        except IndexError:
            offset = 0

        #Lexer tokenize an input string
        lexer = get_lexer_by_name("py")

        tokens = lex(source, lexer)
        fmt = CssHtmlFormatter(linenos = 'inline', hl_lines = not_covered_lines, noclasses = False, css_lines = {"skipped" : excluded_lines})
        fmt.lineseparator = "\n"
        source_html = format(tokens, fmt)

        ncl = len(not_covered_lines) # uncovered lines
        cl = num_of_lines - ncl # number covered lines
        el = len(excluded_lines)
        co = cl > 0 and float(cl * 100 / num_of_lines) or 0

        t = get_template('coverage_module.tpl.html')
        now = datetime.datetime.now()
        html = t.render(Context({'now': now,
                                 'module':moduleName,
                                 'app':moduleName.split('.')[0],
                                 'pkgs':moduleName.split("."),
                                 'tl': num_of_lines,
                                 'cl': cl,
                                 'el': el,
                                 'co': co,
                                 'title': "%s coverage" % moduleName,
                                 'code': source_html,
                                 'GLOBAL_COVERAGE': GLOBAL_COVERAGE,
                                 }))

        fp = self.get_file_handle(app, moduleName)
        fp.write(html.encode('utf-8'))
        fp.close()
コード例 #32
0
ファイル: output.py プロジェクト: enzbang/pycr
    def format(cls, tokens):
        """
        Format the given list of tokens.

        PARAMETERS
            tokens: the input list of token to format

        RETURNS
            the formatted string
        """

        cls.__initialize()
        return pygments.format(tokens, cls.formatter)
コード例 #33
0
ファイル: snippets.py プロジェクト: milin/diff-cover
    def html(self):
        """
        Return an HTML representation of the snippet.
        """
        formatter = HtmlFormatter(
            cssclass=self.DIV_CSS_CLASS,
            linenos=True,
            linenostart=self._start_line,
            hl_lines=self._shift_lines(self._violation_lines, self._start_line),
            lineanchors=self._src_filename,
        )

        return pygments.format(self.src_tokens(), formatter)
コード例 #34
0
ファイル: output_util.py プロジェクト: OrangeOranges/sno
def format_wkt_for_output(output, fp):
    """
    If the given filelike object is a terminal, adds whitespace and syntax highlighting to the output.
    Doesn't actually write the output, just returns it.
    """
    tokens_with_whitespace = wkt_whitespace_format(
        WKTLexer().get_tokens(output))
    if fp == sys.stdout and fp.isatty():
        return pygments.format(tokens_with_whitespace,
                               get_terminal_formatter())
    else:
        token_value = (value for token_type, value in tokens_with_whitespace)
        return "".join(token_value)
コード例 #35
0
    def format_code(self, buf, s_tag, s_code, e_tag, e_code, language):
        style = self.config.get_style_name()
        if self.config.get_code_marker_setting() == CodeMarkerOptions.HIDE:
            self.hide_code_markup(buf, s_tag, s_code)
            self.hide_code_markup(buf, e_code, e_tag)
        else:
            comment_tag = GTKFormatter.create_tag_for_token(
                pygments.token.Comment,
                pygments.styles.get_style_by_name(style))
            buf.get_tag_table().add(comment_tag)
            buf.apply_tag(comment_tag, s_tag, s_code)
            buf.apply_tag(comment_tag, e_tag, e_code)

        code = s_code.get_text(e_code)
        log.debug('full text to encode: %s.', code)

        start_mark = buf.create_mark(None, s_code, False)

        lexer = None

        if language is None:
            lexer = self.config.get_default_lexer()
            log.info(
                'No Language specified. '
                'Falling back to default lexer: %s.',
                self.config.get_default_lexer_name())
        else:
            log.debug('Using lexer for %s.', str(language))
            lexer = self.config.get_lexer_with_fallback(language)

        if lexer is None:
            iterator = buf.get_iter_at_mark(start_mark)
            buf.insert(iterator, '\n')
        elif not self.config.is_internal_none_lexer(lexer):
            tokens = pygments.lex(code, lexer)

            formatter = GTKFormatter(style=style, start_mark=start_mark)
            pygments.format(tokens, formatter, buf)
コード例 #36
0
ファイル: output.py プロジェクト: enzbang/pycr
    def raw_format(cls, tokens):
        """
        Format the given list of tokens as a simple string (no color).

        PARAMETERS
            tokens: the input list of token to format

        RETURNS
            the formatted string
        """

        formatter = get_formatter_by_name(Formatter.NO_COLOR,
                                          encoding=Formatter.ENCODING)
        return pygments.format(tokens, formatter)
コード例 #37
0
    def _format_preview_text(self):
        buf = self._ui.preview_textview.get_buffer()
        start_iter = buf.get_start_iter()
        start_mark = buf.create_mark(None, start_iter, True)
        buf.remove_all_tags(start_iter, buf.get_end_iter())

        formatter = GTKFormatter(style=self.config.get_style_name(),
                                 start_mark=start_mark)

        code = start_iter.get_text(buf.get_end_iter())
        lexer = self.config.get_default_lexer()
        if not self.config.is_internal_none_lexer(lexer):
            tokens = pygments.lex(code, lexer)
            pygments.format(tokens, formatter, buf)

        buf.delete_mark(start_mark)

        self._ui.preview_textview.override_font(
            FontDescription.from_string(self.config.get_font()))

        color = Gdk.RGBA()
        if color.parse(self.config.get_bgcolor()):
            self._ui.preview_textview.override_background_color(
                Gtk.StateFlags.NORMAL, color)
コード例 #38
0
ファイル: shell.py プロジェクト: aweltsch/xonsh
 def format_color(self, string, hide=False, force_string=False, **kwargs):
     """Formats a color string using Pygments. This, therefore, returns
     a list of (Token, str) tuples. If force_string is set to true, though,
     this will return a color fomtatted string.
     """
     tokens = partial_color_tokenize(string)
     if force_string:
         env = builtins.__xonsh_env__
         self.styler.style_name = env.get('XONSH_COLOR_STYLE')
         proxy_style = xonsh_style_proxy(self.styler)
         formatter = XonshTerminal256Formatter(style=proxy_style)
         s = pygments.format(tokens, formatter)
         return s
     else:
         return tokens
コード例 #39
0
ファイル: __init__.py プロジェクト: efeslab/hase
 def fill_read_cache(self, filename: str, line: int) -> None:
     try:
         lexer = pygments.lexers.get_lexer_for_filename(
             str(filename))  # type: RegexLexer
         formatter_opts = dict(linenos="inline",
                               linespans="line",
                               hl_lines=[line])
         html_formatter = pygments.formatters.get_formatter_by_name(
             "html", **formatter_opts)
         css = html_formatter.get_style_defs(".highlight")
         with open(str(filename)) as f:
             lines = f.readlines()
         if len(lines) < 1000:
             content = "".join(lines)
             tokens = lexer.get_tokens(content)
             source = pygments.format(tokens, html_formatter)
             self.file_cache[filename][line] = (css, source)
             self.file_read_cache[filename] = (lexer, content, False)
         else:
             minl = max(0, line - 30)
             maxl = min(len(lines), line + 30)
             formatter_opts = dict(linenos="inline",
                                   linespans="line",
                                   hl_lines=[line])
             html_formatter = pygments.formatters.get_formatter_by_name(
                 "html", **formatter_opts)
             css = html_formatter.get_style_defs(".highlight")
             source = pygments.format(
                 lexer.get_tokens("".join(lines[minl:maxl])),
                 html_formatter)
             self.file_cache[filename][line] = (css, source)
             self.file_read_cache[filename] = (lexer, lines, True)
     except Exception as e:
         l.exception(e)
         self.file_cache[filename][line] = (None, None)
         self.file_read_cache[filename] = (None, None, False)
コード例 #40
0
ファイル: repl.py プロジェクト: NaveenPrasanth/testingrepo
    def unhighlight_paren(self):
        """modify line in self.display_buffer to unhighlight a paren if possible

        self.highlighted_paren should be a line in ?
        """
        if self.highlighted_paren is not None and self.config.syntax:
            lineno, saved_tokens = self.highlighted_paren
            if lineno == len(self.display_buffer):
                # then this is the current line, so don't worry about it
                return
            self.highlighted_paren = None
            logging.debug('trying to unhighlight a paren on line %r', lineno)
            logging.debug('with these tokens: %r', saved_tokens)
            new = bpythonparse(format(saved_tokens, self.formatter))
            self.display_buffer[lineno] = self.display_buffer[lineno].setslice_with_length(0, len(new), new, len(self.display_buffer[lineno]))
コード例 #41
0
ファイル: shell.py プロジェクト: nicfit/xonsh
 def format_color(self, string, hide=False, force_string=False, **kwargs):
     """Formats a color string using Pygments. This, therefore, returns
     a list of (Token, str) tuples. If force_string is set to true, though,
     this will return a color fomtatted string.
     """
     tokens = partial_color_tokenize(string)
     if force_string:
         env = builtins.__xonsh_env__
         self.styler.style_name = env.get('XONSH_COLOR_STYLE')
         proxy_style = xonsh_style_proxy(self.styler)
         formatter = XonshTerminal256Formatter(style=proxy_style)
         s = pygments.format(tokens, formatter)
         return s
     else:
         return tokens
コード例 #42
0
ファイル: repl.py プロジェクト: NaveenPrasanth/testingrepo
    def push(self, line, insert_into_history=True):
        """Push a line of code onto the buffer, start running the buffer

        If the interpreter successfully runs the code, clear the buffer
        """
        if self.paste_mode:
            self.saved_indent = 0
        else:
            indent = len(re.match(r'[ ]*', line).group())
            if line.endswith(':'):
                indent = max(0, indent + self.config.tab_length)
            elif line and line.count(' ') == len(line):
                indent = max(0, indent - self.config.tab_length)
            elif line and ':' not in line and line.strip().startswith(('return', 'pass', 'raise', 'yield')):
                indent = max(0, indent - self.config.tab_length)
            self.saved_indent = indent

        #current line not added to display buffer if quitting #TODO I don't understand this comment
        if self.config.syntax:
            display_line = bpythonparse(format(self.tokenize(line), self.formatter))
            # careful: self.tokenize requires that the line not be in self.buffer yet!

            logging.debug('display line being pushed to buffer: %r -> %r', line, display_line)
            self.display_buffer.append(display_line)
        else:
            self.display_buffer.append(fmtstr(line))

        if insert_into_history:
            self.insert_into_history(line)
        self.buffer.append(line)

        code_to_run = '\n'.join(self.buffer)

        logging.debug('running %r in interpreter', self.buffer)
        try:
            c = bool(code.compile_command('\n'.join(self.buffer)))
            self.saved_predicted_parse_error = False
        except (ValueError, SyntaxError, OverflowError):
            c = self.saved_predicted_parse_error = True
        if c:
            logging.debug('finished - buffer cleared')
            self.display_lines.extend(self.display_buffer_lines)
            self.display_buffer = []
            self.buffer = []
            self.cursor_offset_in_line = 0

        self.coderunner.load_code(code_to_run)
        self.run_code_and_maybe_finish()
コード例 #43
0
ファイル: bpythonparse.py プロジェクト: amjith/curtsies
def test():
    from pygments import format
    from bpython.formatter import BPythonFormatter
    from bpython._py3compat import PythonLexer
    from bpython.config import Struct, loadini, default_config_path
    config = Struct()
    loadini(config, default_config_path())

    all_tokens = list(PythonLexer().get_tokens('print(1 + 2)'))
    formatted_line = format(all_tokens, BPythonFormatter(config.color_scheme))
    print((repr(formatted_line)))
    fs = parse(formatted_line)
    print((repr(fs)))
    print(fs)

    string_to_fmtstr('asdf')
コード例 #44
0
ファイル: rp2l.py プロジェクト: ryanGT/report_generation
def visit_code_block(self,node):
    inline = isinstance(node.parent, nodes.TextElement)
    attrs = node.attributes
    if inline:
        self.body.append('\\lstinline{%s}' % node.latex)
    else:
        if node.formatter == 'pygments':
            lexer = get_lexer_by_name(language)
            latex_tokens = pygments.lex(code, lexer)
            formatter = LatexFormatter()
            latex = [pygments.format(latex_tokens,formatter)]
        elif node.formatter == 'listings':
            latex = ['\\begin{lstlisting}[language=%s]\n'%node.language,
                          node.latex,
                          '\n\\end{lstlisting}\n']
        self.body.extend(latex)
コード例 #45
0
ファイル: bpythonparse.py プロジェクト: 0x0all/curtsies
def test():
    from pygments import format
    from bpython.formatter import BPythonFormatter
    from bpython._py3compat import PythonLexer
    from bpython.config import Struct, loadini, default_config_path
    config = Struct()
    loadini(config, default_config_path())

    all_tokens = list(PythonLexer().get_tokens('print 1 + 2'))
    formatted_line = format(all_tokens, BPythonFormatter(config.color_scheme))
    print((repr(formatted_line)))
    fs = parse(formatted_line)
    print((repr(fs)))
    print(fs)

    string_to_fmtstr('asdf')
コード例 #46
0
ファイル: views.py プロジェクト: CovertLab/WholeCellKB
def viewParameterInSimulation(request, species_wid, wid):
	#get associated simulation property
	qs = Parameter.objects.filter(species__wid = species_wid, wid=wid)
	if not qs[0].state is None:
		sim_class_name = 'edu.stanford.covert.cell.sim.state.%s' % qs[0].state.wid.replace('State_', '')
		verbose_class_name = '%s: %s' % (wid, qs[0].state.name)
	else:
		sim_class_name = 'edu.stanford.covert.cell.sim.process.%s' % qs[0].process.wid.replace('Process_', '')
		verbose_class_name = '%s: %s' % (wid, qs[0].process.name)
	sim_property_name = qs[0].name
	verbose_property_name = qs[0].name

	#highlight code for simulation class
	pathParts = sim_class_name.split('.')
	codePath = "%s/src/+%s/%s.m" % (MODEL_CODE_BASE_DIR, '/+'.join(pathParts[0:-1]), pathParts[-1])
	if not os.path.isfile(codePath):
		codePath = "%s/src/+%s/@%s/%s.m" % (MODEL_CODE_BASE_DIR, '/+'.join(pathParts[0:-1]), pathParts[-1], pathParts[-1])
	
	if os.path.isfile(codePath):
		with open (codePath, "r") as codeFile:
			code = codeFile.read()

		lexer = MatlabLexer()
		lexer.add_filter(PropertyDefinitionFilter(property_names = [sim_property_name], tokentype=Token.Name.Variable)) 
		
		tokens = lexer.get_tokens(code)
			
		object = {
			'class_name': sim_class_name,
			'property_names': [sim_property_name],
			'code': pygments.format(tokens, PygmentsFormatter(linenos='inline', linenostep=1, style=PygmentsStyle, noclasses=True)),
			}
	else:
		raise Http404
	
	#render response
	return render_queryset_to_response(
		species_wid = species_wid,		
		request = request, 
		models = [Parameter],
		queryset = qs,
		templateFile = 'public/viewPropertyInSimulation.html', 
		data = {
			'object_list': [object],
			'verbose_class_name': verbose_class_name,
			'verbose_property_name': verbose_property_name,
			})
コード例 #47
0
ファイル: __init__.py プロジェクト: ryanGT/restutils
 def run(self):
     formatter = self.state.document.settings.code_block_formatter
     if self.options.has_key('formatter'):
         echo = self.options['formatter']
     else:
         formatter = 'listings'
     language = self.arguments[0]
     code = ''
     for line in self.content:
         code+='%s\n'%line
     if formatter == 'pygments':
         lexer = get_lexer_by_name(language)
         latex_tokens = pygments.lex(code, lexer)
         formatter = LatexFormatter()
         latex = pygments.format(latex_tokens,formatter)
     elif formatter == 'listings':
         latex = code
     node = code_block(self.block_text,latex,language,formatter)
     return [node]
コード例 #48
0
def format_code(x, xlate=True, to_try=None, no_errors=False, with_meta=False):
    try:
        import pygments, pygments.formatters, pygments.lexers
    except Exception as e:
        print "exception trying to format code: {0}".format(e)
        return x

    f = pygments.formatters.TerminalFormatter()

    if to_try is None:
        to_try = ('json', 'yaml', 'python')

    if not isinstance(to_try, (tuple,list)):
        to_try = (to_try,)

    best_tokens = False
    for lname in to_try:
        lexer = pygments.lexers.get_lexer_by_name(lname)
        tokens,spare_tokens = itertools.tee(pygments.lex(x, lexer))

        c = 0
        for t in spare_tokens:
            if t[0][0] == 'Error':
                c += 1

        if no_errors and c:
            continue

        if best_tokens and c >= best_tokens[0]:
            continue

        best_tokens = (c, tokens, lname)

    if best_tokens:
        formatted = pygments.format(best_tokens[1],f)
        if xlate:
            formatted = xlate_ansi( formatted )
        if with_meta:
            return {'errors': best_tokens[0], 'formatted': formatted, 'lexer': best_tokens[2]}
        return formatted

    return x
コード例 #49
0
ファイル: gcdsp-decode.py プロジェクト: mewbak/decompil
 def output_stage(name, function):
     if 'dot' in args.dumps:
         dot_document = function_to_dot(function, style=args.style)
         if args.dot_format:
             dot_args = ['dot',
                 '-T{}'.format(args.dot_format),
                 '-o' '{}.{}'.format(name, args.dot_format),
             ]
             if args.dpi is not None:
                 dot_args.append('-Gdpi={}'.format(args.dpi))
             dot = subprocess.Popen(dot_args, stdin=subprocess.PIPE)
             dot.communicate(dot_document.encode('utf-8'))
         else:
             with open('{}.dot'.format(name), 'w') as f:
                 f.write(dot_document)
                 f.write('\n')
     if 'text' in args.dumps:
         with open('{}.ll'.format(name), 'w') as f:
             f.write(pygments.format(function.format(), text_formatter))
             f.write('\n')
コード例 #50
0
ファイル: demo.py プロジェクト: deep-jkl/ipython
 def highlight(self, block):
     """Method called on each block to highlight it content"""
     tokens = pygments.lex(block, self.python_lexer)
     if self.format_rst:
         from pygments.token import Token
         toks = []
         for token in tokens:
             if token[0] == Token.String.Doc and len(token[1]) > 6:
                 toks += pygments.lex(token[1][:3], self.python_lexer)
                 # parse doc string content by rst lexer
                 toks += pygments.lex(token[1][3:-3], self.rst_lexer)
                 toks += pygments.lex(token[1][-3:], self.python_lexer)
             elif token[0] == Token.Comment.Single:
                 toks.append((Token.Comment.Single, token[1][0]))
                 # parse comment content by rst lexer
                 # remove the extra newline added by rst lexer
                 toks += list(pygments.lex(token[1][1:], self.rst_lexer))[:-1]
             else:
                 toks.append(token)
         tokens = toks
     return pygments.format(tokens, self.formatter)
コード例 #51
0
 def output_stage(name, function):
     if 'dot' in args.dumps:
         dot_document = function_to_dot(function, style=args.style)
         if args.dot_format:
             dot_args = [
                 'dot',
                 '-T{}'.format(args.dot_format),
                 '-o'
                 '{}.{}'.format(name, args.dot_format),
             ]
             if args.dpi is not None:
                 dot_args.append('-Gdpi={}'.format(args.dpi))
             dot = subprocess.Popen(dot_args, stdin=subprocess.PIPE)
             dot.communicate(dot_document.encode('utf-8'))
         else:
             with open('{}.dot'.format(name), 'w') as f:
                 f.write(dot_document)
                 f.write('\n')
     if 'text' in args.dumps:
         with open('{}.ll'.format(name), 'w') as f:
             f.write(pygments.format(function.format(), text_formatter))
             f.write('\n')
コード例 #52
0
ファイル: pry.py プロジェクト: CrusaderW/pry
 def highlight(self, lines):
     pygments = self.module.pygments
     tokens = pygments.lexers.PythonLexer().get_tokens("\n".join(lines))
     source = pygments.format(tokens,
                              pygments.formatters.TerminalFormatter())
     return source.split("\n")
コード例 #53
0
def run_tests(test_files, index_file, index_file_base_path, amalgamation_file,
              amalgamation_filename):
    """Run all tests and produce HTML render results.

	Args:
		test_files: An iterable of filenames to lex, test and render
		index_file: A file object to write the HTML index to linking all render results
		            It must be opened as a text file with UTF-8 encoding.
		index_file_base_path: Base path to use for links in index
		amalgamation_file: A file object to write all HTML render results subsequently to
		                   It must be opened as a binary file and it will be written to with UTF-8 encoding.

	Return:
		The number of files that failed complete lexing. On success, this is 0.
	"""

    lexer = MMTLexer(encoding="utf-8")
    full_html_formatter = HtmlFormatter(full=True,
                                        encoding="utf-8",
                                        style=MMTDefaultStyle)
    snippet_html_formatter = HtmlFormatter(full=False,
                                           encoding="utf-8",
                                           style=MMTDefaultStyle)

    amalgamation_file.write(b"""
<!doctype html>
<html>
	<head>
		<meta charset="utf-8">

		<!-- Don't cache! -->
		<meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate">
		<meta http-equiv="Pragma" content="no-cache" />
		<meta http-equiv="Expires" content="0">

		<title>Amalgamation of Render Results - mmt-pygments-lexer</title>
	</head>
	<body>
		<h1>Amalgamation of Render Results</h1>
""")

    amalgamation_file.write(b"<style>")
    amalgamation_file.write(
        snippet_html_formatter.get_style_defs().encode("utf-8"))
    amalgamation_file.write(b"</style>")

    num_failing_files = 0
    num_succeeding_lines = 0
    out_statuses = []

    # Tokens that we interpret as signalling a lexer error
    # Token.Error is Pygment's standard error token whereas Generic.Error
    # is issued by MMTLexer for graceful degradation
    error_tokens = [Token.Error, Generic.Error]

    for test_filename in test_files:
        print("Running test for " + test_filename)

        # We read both input and output file in binary mode to circumvent encoding issues
        # Indeed, we specified above UTF-8 encoding for the lexer and formatter
        with io.open(test_filename, mode="rb") as test_file:
            tokens = list(lexer.get_tokens(test_file.read()))
            erroneous = any(token in error_tokens for (token, _) in tokens)

        if erroneous:
            num_failing_files = num_failing_files + 1
            print(
                "  --> Lexing error, see corresponding .html file for details\n"
            )
        else:
            num_succeeding_lines = num_succeeding_lines + count_lines(
                test_filename)

        out_filename = test_filename + ".html"
        out_statuses.append({"filename": out_filename, "error": erroneous})
        with io.open(out_filename, mode="wb") as out_file:
            pygments.format(tokens, full_html_formatter, out_file)
            pygments.format(tokens, snippet_html_formatter, amalgamation_file)

        print("  --> Output at " + out_filename)

    generate_index_file(out_statuses, num_succeeding_lines, num_failing_files,
                        index_file_base_path, amalgamation_filename,
                        index_file)

    amalgamation_file.write(b"</body></html>")

    return (num_succeeding_lines, num_failing_files)
コード例 #54
0
ファイル: parse.py プロジェクト: tarekziade/bpython
def string_to_fmtstr(x):
    config = Struct()
    loadini(config, default_config_path())
    return parse(format(PythonLexer().get_tokens(x), BPythonFormatter(config.color_scheme)))
コード例 #55
0
ファイル: formatting.py プロジェクト: eax64/gxf
 def format(self, *args, formatter=formatter, **kwargs):
     return pygments.format(self.fmttokens(*args, **kwargs), formatter)
コード例 #56
0
ファイル: repl.py プロジェクト: yarwelp/inspector
 def highlight(self, lines):
     tokens = CppLexer().get_tokens("\n".join(lines))
     source = pygments.format(tokens, TerminalFormatter())
     return source.split("\n")
コード例 #57
0
ファイル: __init__.py プロジェクト: Airtnp/hase
 def cache_tokens(self, addr_map):
     for filename, line in addr_map.values():
         l.warning('caching file: ' + str(filename) + ' at line: ' +
                   str(line))
         if filename != '??':
             if filename not in self.file_read_cache.keys():
                 self.file_cache[filename] = {}
                 self.file_read_cache[filename] = {}
                 try:
                     lexer = pygments.lexers.get_lexer_for_filename(
                         str(filename))
                     formatter_opts = dict(linenos="inline",
                                           linespans="line",
                                           hl_lines=[line])
                     html_formatter = pygments.formatters.get_formatter_by_name(
                         "html", **formatter_opts)
                     css = html_formatter.get_style_defs('.highlight')
                     with open(str(filename)) as f:
                         content = f.readlines()
                     if len(content) < 1000:
                         content = ''.join(content)
                         tokens = lexer.get_tokens(content)
                         source = pygments.format(tokens, html_formatter)
                         self.file_cache[filename][line] = (css, source)
                         self.file_read_cache[filename] = (lexer, content,
                                                           False)
                     else:
                         minl = max(0, line - 30)
                         maxl = min(len(content), line + 30)
                         formatter_opts = dict(linenos="inline",
                                               linespans="line",
                                               hl_lines=[line])
                         html_formatter = pygments.formatters.get_formatter_by_name(
                             "html", **formatter_opts)
                         css = html_formatter.get_style_defs('.highlight')
                         source = pygments.format(
                             lexer.get_tokens(''.join(content[minl:maxl])),
                             html_formatter)
                         self.file_cache[filename][line] = (css, source)
                         self.file_read_cache[filename] = (lexer, content,
                                                           True)
                 except Exception as e:
                     print(e)
                     self.file_cache[filename][line] = (None, None)
                     self.file_read_cache[filename] = (None, None, False)
             else:
                 lexer, content, is_largefile = self.file_read_cache[
                     filename]
                 if content:
                     try:
                         if not is_largefile:
                             formatter_opts = dict(linenos="inline",
                                                   linespans="line",
                                                   hl_lines=[line])
                             html_formatter = pygments.formatters.get_formatter_by_name(
                                 "html", **formatter_opts)
                             css = html_formatter.get_style_defs(
                                 '.highlight')
                             source = pygments.format(
                                 lexer.get_tokens(content), html_formatter)
                             self.file_cache[filename][line] = (css, source)
                         else:
                             minl = max(0, line - 30)
                             maxl = min(len(content), line + 30)
                             formatter_opts = dict(linenos="inline",
                                                   linespans="line",
                                                   hl_lines=[line - minl])
                             html_formatter = pygments.formatters.get_formatter_by_name(
                                 "html", **formatter_opts)
                             css = html_formatter.get_style_defs(
                                 '.highlight')
                             source = pygments.format(
                                 lexer.get_tokens(''.join(
                                     content[minl:maxl])), html_formatter)
                             self.file_cache[filename][line] = (css, source)
                     except Exception as e:
                         print(e)
                         self.file_cache[filename][line] = (None, None)
                 else:
                     self.file_cache[filename][line] = (None, None)
コード例 #58
0
ファイル: cli_utils.py プロジェクト: sthagen/xonsh
    def colorize(self, *tokens: tuple) -> str:
        from pygments import format

        return format(tokens, self.formatter)