コード例 #1
0
ファイル: mitakalab.py プロジェクト: mitakalab/pygments.rb
def _get_ttype_class(ttype):
	fname = STANDARD_TYPES.get(ttype)
	if fname:
		return fname
	aname = ''
	while fname is None:
		aname = '-' + ttype[-1] + aname
		ttype = ttype.parent
		fname = STANDARD_TYPES.get(ttype)
	return fname + aname
コード例 #2
0
ファイル: latex.py プロジェクト: austinbv/pygments
def _get_ttype_name(ttype):
    fname = STANDARD_TYPES.get(ttype)
    if fname:
        return fname
    aname = ""
    while fname is None:
        aname = ttype[-1] + aname
        ttype = ttype.parent
        fname = STANDARD_TYPES.get(ttype)
    return fname + aname
コード例 #3
0
def _get_ttype_name(ttype):
    fname = STANDARD_TYPES.get(ttype)
    if fname:
        return fname
    aname = ''
    while fname is None:
        aname = ttype[-1] + aname
        ttype = ttype.parent
        fname = STANDARD_TYPES.get(ttype)
    return fname + aname
コード例 #4
0
ファイル: themes.py プロジェクト: Rohitth007/zulip-terminal
def add_pygments_style(theme_meta: Dict[str, Any],
                       urwid_theme: ThemeSpec) -> None:
    """
    This function adds pygments styles for use in syntax
    highlighting of code blocks and inline code.
    pygments["styles"]:
        one of those available in pygments/styles.
    pygments["background"]:
        used to set a different background for codeblocks instead of the
        one used in the syntax style, if it doesn't match with
        the overall zt theme.
        The default is available as Eg: MaterialStyle.background_color
    pygments["overrides"]:
        used to override certain pygments styles to match to urwid format.
        It can also be used to customize the syntax style.
    """
    pygments = theme_meta["pygments"]
    pygments_styles = pygments["styles"]
    pygments_bg = pygments["background"]
    pygments_overrides = pygments["overrides"]

    term16_styles = term16.styles
    term16_bg = term16.background_color

    for token, css_class in STANDARD_TYPES.items():
        if css_class in pygments_overrides:
            pygments_styles[token] = pygments_overrides[css_class]

        # Inherit parent pygments style if not defined.
        # Eg: Use `String` if `String.Double` is not present.
        if pygments_styles[token] == "":
            try:
                t = [k for k, v in STANDARD_TYPES.items() if v == css_class[0]]
                pygments_styles[token] = pygments_styles[t[0]]
            except IndexError:
                pass

        if term16_styles[token] == "":
            try:
                t = [k for k, v in STANDARD_TYPES.items() if v == css_class[0]]
                term16_styles[token] = term16_styles[t[0]]
            except IndexError:
                pass

        new_style = (
            f"pygments:{css_class}",
            term16_styles[token],
            term16_bg,
            "bold",  # Mono style
            pygments_styles[token],
            pygments_bg,
        )
        urwid_theme.append(new_style)
コード例 #5
0
def _splitLines(
        tokens: Iterator[Tuple[TokenType, str]]) -> Iterator[XMLContent]:
    for ttype, value in tokens:
        cssclass = STANDARD_TYPES.get(ttype)
        span = xhtml.span(class_=cssclass) if cssclass else None

        first = True
        for part in value.split('\n'):
            if first:
                first = False
            else:
                yield '\n'
            if part:
                yield part if span is None else span[part]
コード例 #6
0
    def _output_token(self, ttype, value, pos, outfile):
        # Manually split things like the "os.path" in "import os.path" into separate tokens so we can annotate them separately
        if ttype == Token.Name.Namespace and '.' in value:
            names = value.split('.')
            r, c = pos
            for i, n in enumerate(names):
                if i:
                    self._output_token(Token.Text, u'.', (r, c), outfile)
                    c += 1
                self._output_token(Token.Name.Namespace, n, (r, c), outfile)
                c += len(n)
            return

        if ttype == Token.Text and pos[1] > 0:
            if '\n' in value:
                outfile.write('</span>')
                self.current_errors = []

        id_str = ' id="%d_%d"' % pos

        cls_str = ''
        cls = STANDARD_TYPES.get(ttype)
        classes = []
        if cls:
            classes.append(cls)
        type_idx = ''
        if ttype in Token.Name:
            classes.append("anno")
            classes.append(
                "usedef-" +
                value.encode("base64").replace('=', '').replace('\n', ''))

            # print pos, ttype
            node = self.pos_nodes.get(pos, None)
            u = self.node_types.get(node, None)
            if u:
                type_idx = ' type_idx="%s"' % ','.join(
                    str(self._get_type_idx(t)) for t in u.types())
            else:
                print "missed", pos, node
        if classes:
            cls_str = ' class="%s"' % ' '.join(classes)

        outfile.write('<span%s%s%s>' % (cls_str, id_str, type_idx))

        translated_val = value.translate(self._html_escape_table)
        outfile.write(translated_val.encode("utf8"))

        outfile.write('</span>')
コード例 #7
0
ファイル: browser.py プロジェクト: kmod/icbd
    def _output_token(self, ttype, value, pos, outfile):
        # Manually split things like the "os.path" in "import os.path" into separate tokens so we can annotate them separately
        if ttype == Token.Name.Namespace and '.' in value:
            names = value.split('.')
            r, c = pos
            for i, n in enumerate(names):
                if i:
                    self._output_token(Token.Text, u'.', (r, c), outfile)
                    c += 1
                self._output_token(Token.Name.Namespace, n, (r, c), outfile)
                c += len(n)
            return

        if ttype == Token.Text and pos[1] > 0:
            if '\n' in value:
                outfile.write('</span>')
                self.current_errors = []

        id_str = ' id="%d_%d"' % pos

        cls_str = ''
        cls = STANDARD_TYPES.get(ttype)
        classes = []
        if cls:
            classes.append(cls)
        type_idx = ''
        if ttype in Token.Name:
            classes.append("anno")
            classes.append("usedef-" + value.encode("base64").replace('=', '').replace('\n', ''))

            # print pos, ttype
            node = self.pos_nodes.get(pos, None)
            u = self.node_types.get(node, None)
            if u:
                type_idx = ' type_idx="%s"' % ','.join(str(self._get_type_idx(t)) for t in u.types())
            else:
                print "missed", pos, node
        if classes:
            cls_str = ' class="%s"' % ' '.join(classes)

        outfile.write('<span%s%s%s>' % (cls_str, id_str, type_idx))

        translated_val = value.translate(self._html_escape_table)
        outfile.write(translated_val.encode("utf8"))

        outfile.write('</span>')
コード例 #8
0
 def format(self, tokenstream, outfile):
     row = 1
     column = 1
     for (token_type, text) in tokenstream:
         parts = text.split("\n")
         new_row = row + len(parts) - 1
         if len(parts) > 1:
             new_column = len(parts[-1])
         else:
             new_column = column + len(parts[-1]) - 1
         if token_type is not Token.Text:
             self._annotation_list.append(Annotation(
                 row,
                 column,
                 new_row,
                 new_column,
                 {
                     "type": "style",
                     "what": STANDARD_TYPES.get(token_type, ""),
                 }
             ))
         row = new_row
         column = new_column + 1
コード例 #9
0
def get_pygments_tokens(page, elem, uid):
    """inserts a table containing all existent token types and corresponding
       css class, with an example"""
    # The original div in the raw html page may contain some text
    # as a visual reminder that we need to remove here.
    elem.text = ''
    elem.attrib['class'] = CRUNCHY_PYGMENTS
    table = SubElement(elem, 'table')
    row = SubElement(table, 'tr')
    for title in ['Token type', 'css class']:
        column = SubElement(row, 'th')
        column.text = title
    keys = list(STANDARD_TYPES.keys())
    keys.sort()
    for token in keys:
        if len(repr(token)) == 5:  # token = Token
            continue
        row = SubElement(table, 'tr')
        column1 = SubElement(row, 'td')
        column1.text = repr(token)[6:]  # remove "Token."
        column2 = SubElement(row, 'td')
        token_class = STANDARD_TYPES[token]
        column2.text = token_class.split('_')[0]
        column3 = SubElement(row, 'td')
        span = SubElement(column3, 'span')
        span.attrib['class'] = token_class
        span.text = " * test * "
        column4 = SubElement(row, 'td')
        _code = SubElement(column4, 'code')
        _code.attrib['class'] = token_class
        _code.text = " * test * "
        column5 = SubElement(row, 'td')
        var = SubElement(column5, 'var')
        var.attrib['class'] = token_class
        var.text = " * test * "
    return
コード例 #10
0
ファイル: style.py プロジェクト: Mekyi/crunchy
def get_pygments_tokens(page, elem, uid):
    """inserts a table containing all existent token types and corresponding
       css class, with an example"""
    # The original div in the raw html page may contain some text
    # as a visual reminder that we need to remove here.
    elem.text = ''
    elem.attrib['class'] = CRUNCHY_PYGMENTS
    table = SubElement(elem, 'table')
    row = SubElement(table, 'tr')
    for title in ['Token type', 'css class']:
        column = SubElement(row, 'th')
        column.text = title
    keys = list(STANDARD_TYPES.keys())
    keys.sort()
    for token in keys:
        if len(repr(token)) == 5: # token = Token
            continue
        row = SubElement(table, 'tr')
        column1 = SubElement(row, 'td')
        column1.text = repr(token)[6:] # remove "Token."
        column2 = SubElement(row, 'td')
        token_class = STANDARD_TYPES[token]
        column2.text = token_class.split('_')[0]
        column3 = SubElement(row, 'td')
        span = SubElement(column3, 'span')
        span.attrib['class'] = token_class
        span.text = " * test * "
        column4 = SubElement(row, 'td')
        _code = SubElement(column4, 'code')
        _code.attrib['class'] = token_class
        _code.text = " * test * "
        column5 = SubElement(row, 'td')
        var = SubElement(column5, 'var')
        var.attrib['class'] = token_class
        var.text = " * test * "
    return
コード例 #11
0
ファイル: pygments_in.py プロジェクト: yask123/moin-2.0
 def _append(self, type, value, element):
     class_ = STANDARD_TYPES.get(type)
     if class_:
         value = moin_page.span(attrib={moin_page.class_: class_},
                                children=(value, ))
     element.append(value)
コード例 #12
0
    import pygments
    from pygments.lexers import get_lexer_for_mimetype
    from pygments.lexers import TextLexer, IniLexer
    from pygments.styles import get_style_by_name
    from pygments.token import STANDARD_TYPES, Token
    from pygments.lexer import RegexLexer, bygroups
    
    from pygments.lexers import PythonLexer
    from pygments.token import *
    from pygments.formatters import *
    from pygments import highlight
    from pygments.formatter import Formatter
except:
    print 'no pygments found,  please install it first'
    
STANDARD_TOKENS = STANDARD_TYPES.keys()

tag_name = lambda sn, token: sn + '_' + str(token).replace('.', '_').lower()
  
class textEditor(defaultValues):
    def __init__(self):
        defaultValues.__init__(self) 
        self.pygLexer = None
        self.textbufferMisc = None
        self.viewMisc = None
        self.hl_style = None
        self._generated_styles = set()
        
        
    def getEditor(self,  mime_type =  'text/x-tex',  highlight = True):
コード例 #13
0
ファイル: pygments_in.py プロジェクト: YelaSeamless/moin-2.0
 def _append(self, type, value, element):
     class_ = STANDARD_TYPES.get(type)
     if class_:
         value = moin_page.span(attrib={moin_page.class_: class_}, children=(value, ))
     element.append(value)
コード例 #14
0
ファイル: __init__.py プロジェクト: dantman/gareth
def line_out(out, line, side=None):
	if len(line) >= 4:
		dtype, num, chunks, syntax_chunks = line
	else:
		dtype, num, chunks = line
		syntax_chunks = None
	if not chunks:
		ltype = "nil"
	elif dtype == -1:
		ltype = "old"
	elif dtype == 1:
		ltype = "new"
	else:
		ltype = "same"

	if side:
		side = "%s " % side
	else:
		side = ""

	errors = []

	out.extend('<td class="%s%s num">' % (side, ltype))
	if num is not None:
		out.extend(smart_str(escape(num)))
	out.extend('</td>')
	out.extend('<td class="%s%s line">' % (side, ltype))
	if chunks:
		for ddt, text in chunks:
			if ddt == -1:
				out.extend('<del>')
			elif ddt == 1:
				out.extend('<ins>')
			if syntax_chunks:
				syntax_chunks = list(syntax_chunks)
				while len(text) > 0:
					if len(syntax_chunks) <= 0:
						error = "Chunk overflow error '%s' does not have a syntax chunk" % smart_str(text)
						print error
						errors.append(error)
						out.extend(line_to_html(text))
						text = ''
						continue
					syntax_type, syntax_text = syntax_chunks.pop(0)
					if len(syntax_text) > len(text):
						# If the syntax chunk is larger than the diff chunk then prepend a new syntax chunk with the remainder
						syntax_chunks.insert(0, (syntax_type, syntax_text[len(text):]))
						syntax_text = syntax_text[:len(text)]
					subchunk_text = text[:len(syntax_text)]
					text = text[len(syntax_text):] # Trim the initial text off the chunk text
					if syntax_text == subchunk_text:
						cls = ''
						fname = STANDARD_TYPES.get(syntax_type)
						if fname:
							cls = fname
						else:
							aname = ''
							while fname is None:
								aname = '-' + syntax_type[-1] + aname
								syntax_type = syntax_type.parent
								fname = STANDARD_TYPES.get(syntax_type)
							cls = fname + aname

						if cls:
							out.extend('<span class="%s">' % " ".join(cls))
							out.extend(line_to_html(subchunk_text))
							out.extend('</span>')
						else:
							out.extend(line_to_html(subchunk_text))
					else:
						# Chunk mismatch (code error)
						error = "Chunk mismatch error '%s' does not match '%s'" % (smart_str(syntax_text), smart_str(subchunk_text))
						print error
						errors.append(error)
						out.extend(line_to_html(subchunk_text))

			else:
				out.extend(line_to_html(text))
			if ddt == -1:
				out.extend('</del>')
			elif ddt == 1:
				out.extend('</ins>')
	for error in errors:
		out.extend('<br>')
		out.extend('<span class="error differror">')
		out.extend(smart_str(escape(error)))
		out.extend('</span>')
	out.extend('</td>')
コード例 #15
0
'''

CLASS_START = '''class %sStyle(Style):

    """Pygments style %sStyle."""

    background_color = "%s"  %s
    highlight_color = "%s"   %s

    styles = {
%s
    }
'''

tokens = {"hll": "highlight_color"}
for k, v in ST.items():
    if v == "":
        continue
    tokens[v] = str(k).replace("Token.", '')


class Comments(object):

    """Comment strip class."""

    re_line_preserve = re.compile(r"\r?\n", re.MULTILINE)
    re_css_comment = re.compile(
        r'''(?x)
            (?P<comments>
                /\*[^*]*\*+(?:[^/*][^*]*\*+)*/  # multi-line comments
              | \s*//(?:[^\r\n])*               # single line comments
コード例 #16
0
ファイル: annotate.py プロジェクト: mahmoudimus/icbd
    def _output_token(self, ttype, value, pos, outfile):
        # Manually split things like the "os.path" in "import os.path" into separate tokens so we can annotate them separately
        if ttype == Token.Name.Namespace and '.' in value:
            names = value.split('.')
            r, c = pos
            for i, n in enumerate(names):
                if i:
                    self._output_token(Token.Text, u'.', (r, c), outfile)
                    c += 1
                self._output_token(Token.Name.Namespace, n, (r, c), outfile)
                c += len(n)
            return

        outfile.write('<span class="error">' * self._num_errors_start(pos))

        outfile.write('</span>' * self._num_errors_end(pos))

        if ttype == Token.Text and pos[1] > 0:
            if '\n' in value:
                outfile.write('</span>')
                self.current_errors = []

        id_str = ''
        if self._have_type_info_for_pos(pos):
            id_str = ' id="%d_%d"' % pos

        def output_preview(should_slide=False):
            slide_class = ' slide' if should_slide else ''
            outfile.write('<span class="anno_preview%s" id="col_%d"></span>' %
                          (slide_class, pos[0]))
            self._output_errors_for_line(pos[0], outfile)

        # This currently outputs errors and annotations before comments
        if ttype == Token.Comment and pos[1] > 0:
            self.comments.add(pos[0])
            output_preview(True)
        elif ttype == Token.Text and pos[1] > 0 and pos[0] not in self.comments:
            try:
                value.index('\n')
                should_slide = len(self._errors_for_line(pos[0])) > 0
                output_preview(should_slide)
            except ValueError:
                pass

        cls_str = ''
        cls = STANDARD_TYPES.get(ttype)
        if cls or id_str:
            cls_str = ' class="'
            if id_str:
                cls_str += 'anno '
            if cls:
                cls_str += cls
            cls_str += '"'

        if pos in self.links:
            outfile.write("<a href='%s'>" % self.links[pos])

        if cls_str or id_str:
            outfile.write('<span%s%s>' % (cls_str, id_str))

        translated_val = value.translate(self._html_escape_table)
        outfile.write(translated_val.encode("utf8"))

        if cls:
            outfile.write('</span>')

        if pos in self.links:
            outfile.write("</a>")
コード例 #17
0
'''

CLASS_START = '''class %sStyle(Style):

    """Pygments style %sStyle."""

    background_color = "%s"  %s
    highlight_color = "%s"   %s

    styles = {
%s
    }
'''

tokens = {"hll": "highlight_color"}
for k, v in ST.items():
    if v == "":
        continue
    tokens[v] = str(k).replace("Token.", '')


class Comments(object):
    """Comment strip class."""

    re_line_preserve = re.compile(r"\r?\n", re.MULTILINE)
    re_css_comment = re.compile(
        r'''(?x)
            (?P<comments>
                /\*[^*]*\*+(?:[^/*][^*]*\*+)*/  # multi-line comments
              | \s*//(?:[^\r\n])*               # single line comments
            )
コード例 #18
0
#!/usr/bin/env
# -*- coding: utf-8 -*-
# filename = javascript
# author=KGerring
# date = 7/13/17
from startups import *
import sys, os
from pygments.lexers.javascript import JavascriptLexer
import re, regex
import pygments.token
from pygments.token import string_to_tokentype, Token, is_token_subtype, STANDARD_TYPES

STANDARD = {v: k for k, v in STANDARD_TYPES.items()}


class PeekableIterator(object):
    'Iterator that supports peeking at the next item in the iterable.'

    def __init__(self, iterable):
        self.iterator = iter(iterable)
        self.item = None

    def peek(self):
        'Get the next item in the iterable without advancing our position.'
        if (not self.item):
            try:
                self.item = next(self.iterator)
            except StopIteration:
                return None
        return self.item
コード例 #19
0
def line_out(out, line, side=None):
    if len(line) >= 4:
        dtype, num, chunks, syntax_chunks = line
    else:
        dtype, num, chunks = line
        syntax_chunks = None
    if not chunks:
        ltype = "nil"
    elif dtype == -1:
        ltype = "old"
    elif dtype == 1:
        ltype = "new"
    else:
        ltype = "same"

    if side:
        side = "%s " % side
    else:
        side = ""

    errors = []

    out.extend('<td class="%s%s num">' % (side, ltype))
    if num is not None:
        out.extend(smart_str(escape(num)))
    out.extend('</td>')
    out.extend('<td class="%s%s line">' % (side, ltype))
    if chunks:
        for ddt, text in chunks:
            if ddt == -1:
                out.extend('<del>')
            elif ddt == 1:
                out.extend('<ins>')
            if syntax_chunks:
                syntax_chunks = list(syntax_chunks)
                while len(text) > 0:
                    if len(syntax_chunks) <= 0:
                        error = "Chunk overflow error '%s' does not have a syntax chunk" % smart_str(
                            text)
                        print error
                        errors.append(error)
                        out.extend(line_to_html(text))
                        text = ''
                        continue
                    syntax_type, syntax_text = syntax_chunks.pop(0)
                    if len(syntax_text) > len(text):
                        # If the syntax chunk is larger than the diff chunk then prepend a new syntax chunk with the remainder
                        syntax_chunks.insert(
                            0, (syntax_type, syntax_text[len(text):]))
                        syntax_text = syntax_text[:len(text)]
                    subchunk_text = text[:len(syntax_text)]
                    text = text[
                        len(syntax_text
                            ):]  # Trim the initial text off the chunk text
                    if syntax_text == subchunk_text:
                        cls = ''
                        fname = STANDARD_TYPES.get(syntax_type)
                        if fname:
                            cls = fname
                        else:
                            aname = ''
                            while fname is None:
                                aname = '-' + syntax_type[-1] + aname
                                syntax_type = syntax_type.parent
                                fname = STANDARD_TYPES.get(syntax_type)
                            cls = fname + aname

                        if cls:
                            out.extend('<span class="%s">' % " ".join(cls))
                            out.extend(line_to_html(subchunk_text))
                            out.extend('</span>')
                        else:
                            out.extend(line_to_html(subchunk_text))
                    else:
                        # Chunk mismatch (code error)
                        error = "Chunk mismatch error '%s' does not match '%s'" % (
                            smart_str(syntax_text), smart_str(subchunk_text))
                        print error
                        errors.append(error)
                        out.extend(line_to_html(subchunk_text))

            else:
                out.extend(line_to_html(text))
            if ddt == -1:
                out.extend('</del>')
            elif ddt == 1:
                out.extend('</ins>')
    for error in errors:
        out.extend('<br>')
        out.extend('<span class="error differror">')
        out.extend(smart_str(escape(error)))
        out.extend('</span>')
    out.extend('</td>')
コード例 #20
0
ファイル: conf.py プロジェクト: opencor/user-documentation
    CellmlTextComment: 'ctc',
    CellmlTextKeyword: 'ctk',
    CellmlTextCellmlKeyword: 'ctck',
    CellmlTextNumber: 'ctn',
    CellmlTextOperator: 'cto',
    CellmlTextParameterBlock: 'ctpb',
    CellmlTextParameterCellmlKeyword: 'ctpck',
    CellmlTextParameterKeyword: 'ctpk',
    CellmlTextParameterNumber: 'ctpn',
    CellmlTextParameterSiUnitKeyword: 'ctpsuk',
    CellmlTextPunctuation: 'ctp',
    CellmlTextString: 'cts',
    CellmlTextSiUnitKeyword: 'ctsuk'
}

STANDARD_TYPES.update(CELLMLTEXT_TYPES)


class cellmlTextLexer(RegexLexer):
    tokens = {
        'root': [
            # Single and multiline comments
            (r'//(\n|[\w\W]*?[^\\]\n)', CellmlTextComment),
            (r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', CellmlTextComment),
            (r'/(\\\n)?[*][\w\W]*', CellmlTextComment),

            # Keywords
            (
                words(
                    (
                        # CellML Text keywords
コード例 #21
0
ファイル: annotate.py プロジェクト: kmod/icbd
    def _output_token(self, ttype, value, pos, outfile):
        # Manually split things like the "os.path" in "import os.path" into separate tokens so we can annotate them separately
        if ttype == Token.Name.Namespace and '.' in value:
            names = value.split('.')
            r, c = pos
            for i, n in enumerate(names):
                if i:
                    self._output_token(Token.Text, u'.', (r, c), outfile)
                    c += 1
                self._output_token(Token.Name.Namespace, n, (r, c), outfile)
                c += len(n)
            return

        outfile.write('<span class="error">' * self._num_errors_start(pos))

        outfile.write('</span>' * self._num_errors_end(pos))

        if ttype == Token.Text and pos[1] > 0:
            if '\n' in value:
                outfile.write('</span>')
                self.current_errors = []

        id_str = ''
        if self._have_type_info_for_pos(pos):
            id_str = ' id="%d_%d"' % pos

        def output_preview(should_slide=False):
            slide_class = ' slide' if should_slide else ''
            outfile.write('<span class="anno_preview%s" id="col_%d"></span>' % (slide_class, pos[0]))
            self._output_errors_for_line(pos[0], outfile)

        # This currently outputs errors and annotations before comments
        if ttype == Token.Comment and pos[1] > 0:
            self.comments.add(pos[0])
            output_preview(True)
        elif ttype == Token.Text and pos[1] > 0 and pos[0] not in self.comments:
            try:
                value.index('\n')
                should_slide = len(self._errors_for_line(pos[0])) > 0
                output_preview(should_slide)
            except ValueError:
                pass
            
        cls_str = ''
        cls = STANDARD_TYPES.get(ttype)
        if cls or id_str:
            cls_str = ' class="'
            if id_str:
                cls_str += 'anno '
            if cls:
                cls_str += cls
            cls_str += '"'

        if pos in self.links:
            outfile.write("<a href='%s'>" % self.links[pos])

        if cls_str or id_str:
            outfile.write('<span%s%s>' % (cls_str, id_str))

        translated_val = value.translate(self._html_escape_table)
        outfile.write(translated_val.encode("utf8"))

        if cls:
            outfile.write('</span>')

        if pos in self.links:
            outfile.write("</a>")
コード例 #22
0
    Keyword,
    Name,
    String,
    Number,
    Punctuation,
    #  Literal,
    STANDARD_TYPES)

STANDARD_TYPES.update({
    String.Double.Logger: 's2l',
    String.Double.Logger.Asctime: 's2la',
    String.Double.Logger.Curl: 's2lc',
    String.Double.Logger.Name: 's2ln',
    String.Double.Logger.Message: 's2lm',
    String.Double.Logger.Message.Success: 's2lms',
    String.Double.Logger.Message.Error: 's2lme',
    String.Double.Logger.Version.Multiverse: 's2lvm',
    String.Double.Logger.Version.Requests: 's2lvr',
    String.Double.Logger.Level.Info: 's2lli',
    String.Double.Logger.Level.Debug: 's2lld',
    String.Double.Logger.Level.Warning: 's2llw',
    String.Double.Logger.Level.Error: 's2lle',
    String.Double.Logger.Level.Critical: 's2llc'
})


class LoggerJsonLexer(pygments.lexer.RegexLexer):
    """
    For JSON data structures.

    .. versionadded:: 0.1
    """
コード例 #23
0
ファイル: editor.py プロジェクト: gfoyle/PyQuil
# A Database and Query Analyze Tool
# Copyright (c) 2010-2011 John Anderson <*****@*****.**>
# License: GPLv3. See COPYING

import gtk
import pango
import gobject
from pygments.lexers import SqlLexer
from pygments.styles import get_style_by_name
from pygments.styles.colorful import ColorfulStyle
from pygments.token import STANDARD_TYPES, Token
from lib.common import _
from lib.pluginfactory import get_plugin

STANDARD_TOKENS = STANDARD_TYPES.keys()

tag_name = lambda sn, token: sn + '_' + str(token).replace('.', '_').lower()


class PyQuilDocument(gtk.VBox):
    def __init__(self):
        super(gtk.VBox, self).__init__()
        self.hbox = gtk.HBox()

        self.__init_plugins()

        self.result_window = None
        self.plugin = None

        self.connection_string = gtk.Entry()