コード例 #1
0
    def __init__(self, master, name, token, style):

        self.frame = NumberedTextFrame(master,
                                       style=style,
                                       wrap=tk.NONE,
                                       bd=0,
                                       padx=5,
                                       pady=5)
        master.add(self.frame, text=name)
        master.select(self.frame)

        self.path = None
        self.index = master.index(self.frame)

        lexers = {
            '.py': PythonLexer(),
            '.md': MarkdownLexer(),
            '.tcl': TclLexer(),
            '.c': CLexer(),
            '.h': CLexer(),
            '.ini': IniLexer()
        }
        lexer = lexers.get(pathlib.Path(name).suffix, None)
        self.frame.text.highlighter = Highlighter(self.frame.text, token,
                                                  lexer)
コード例 #2
0
ファイル: c_parse.py プロジェクト: zsa/qmk_firmware
def _parse_led_config(file, matrix_cols, matrix_rows):
    """Return any 'raw' led/rgb matrix config
    """
    file_contents = file.read_text(encoding='utf-8')
    file_contents = comment_remover(file_contents)
    file_contents = file_contents.replace('\\\n', '')

    matrix_raw = []
    position_raw = []
    flags = []

    found_led_config = False
    bracket_count = 0
    section = 0
    for _type, value in lex(file_contents, CLexer()):
        # Assume g_led_config..stuff..;
        if value == 'g_led_config':
            found_led_config = True
        elif value == ';':
            found_led_config = False
        elif found_led_config:
            # Assume bracket count hints to section of config we are within
            if value == '{':
                bracket_count += 1
                if bracket_count == 2:
                    section += 1
            elif value == '}':
                bracket_count -= 1
            else:
                # Assume any non whitespace value here is important enough to stash
                if _type in [
                        Token.Literal.Number.Integer,
                        Token.Literal.Number.Float, Token.Literal.Number.Hex,
                        Token.Name
                ]:
                    if section == 1 and bracket_count == 3:
                        matrix_raw.append(_coerce_led_token(_type, value))
                    if section == 2 and bracket_count == 3:
                        position_raw.append(_coerce_led_token(_type, value))
                    if section == 3 and bracket_count == 2:
                        flags.append(_coerce_led_token(_type, value))

    # Slightly better intrim format
    matrix = list(_get_chunks(matrix_raw, matrix_cols))
    position = list(_get_chunks(position_raw, 2))
    matrix_indexes = list(filter(lambda x: x is not None, matrix_raw))

    # If we have not found anything - bail
    if not section:
        return None

    # TODO: Improve crude parsing/validation
    if len(matrix) != matrix_rows and len(matrix) != (matrix_rows / 2):
        raise ValueError("Unable to parse g_led_config matrix data")
    if len(position) != len(flags):
        raise ValueError("Unable to parse g_led_config position data")
    if len(matrix_indexes) and (max(matrix_indexes) >= len(flags)):
        raise ValueError("OOB within g_led_config matrix data")

    return (matrix, position, flags)
コード例 #3
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in CLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             if value in self.keywords:
                 token = Keyword.Keyword
             elif value in self.types:
                 token = Keyword.Type
         yield index, token, value
コード例 #4
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in CLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             if value in self.ospray_types:
                 token = Keyword.Type
             elif value in self.ospray_functions:
                 token = Keyword.Function
             elif value in self.ospray_constants:
                 token = Keyword.Constant
         yield index, token, value
コード例 #5
0
def test_get_comment_tokens():
    from pygments.lexers.c_cpp import CLexer

    file_text_test = "int main(int argc, char[] argv){\n//This is a comment\n}\n"
    c_lexer = CLexer()

    results = []
    for comment in get_comment_tokens(file_text_test, c_lexer):
        results.append(comment)

    assert len(results) == 1
    assert results[0] == "//This is a comment\n"
コード例 #6
0
ファイル: c_parse.py プロジェクト: xsv24/qmk_firmware
def _parse_led_config(file, matrix_cols, matrix_rows):
    """Return any 'raw' led/rgb matrix config
    """
    matrix_raw = []
    position_raw = []
    flags = []

    found_led_config = False
    bracket_count = 0
    section = 0
    for _type, value in lex(_preprocess_c_file(file), CLexer()):
        # Assume g_led_config..stuff..;
        if value == 'g_led_config':
            found_led_config = True
        elif value == ';':
            found_led_config = False
        elif found_led_config:
            # Assume bracket count hints to section of config we are within
            if value == '{':
                bracket_count += 1
                if bracket_count == 2:
                    section += 1
            elif value == '}':
                bracket_count -= 1
            else:
                # Assume any non whitespace value here is important enough to stash
                if _type in [
                        Token.Literal.Number.Integer,
                        Token.Literal.Number.Float, Token.Literal.Number.Hex,
                        Token.Name
                ]:
                    if section == 1 and bracket_count == 3:
                        matrix_raw.append(_coerce_led_token(_type, value))
                    if section == 2 and bracket_count == 3:
                        position_raw.append(_coerce_led_token(_type, value))
                    if section == 3 and bracket_count == 2:
                        flags.append(_coerce_led_token(_type, value))

    # Slightly better intrim format
    matrix = list(_get_chunks(matrix_raw, matrix_cols))
    position = list(_get_chunks(position_raw, 2))
    matrix_indexes = list(filter(lambda x: x is not None, matrix_raw))

    # If we have not found anything - bail with no error
    if not section:
        return None

    # Throw any validation errors
    _validate_led_config(matrix, matrix_rows, matrix_indexes, position,
                         position_raw, flags)

    return (matrix, position, flags)
コード例 #7
0
ファイル: c_like.py プロジェクト: Oire/gobyexample
 def get_tokens_unprocessed(self, text):
     for index, token, value in CLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             if value in self.variable_qualifiers:
                 token = Keyword.Type
             elif value in self.vector_types:
                 token = Keyword.Type
             elif value in self.variables:
                 token = Name.Builtin
             elif value in self.execution_confs:
                 token = Keyword.Pseudo
             elif value in self.function_qualifiers:
                 token = Keyword.Reserved
             elif value in self.functions:
                 token = Name.Function
         yield index, token, value
コード例 #8
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in CLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             if value in self.variable_qualifiers:
                 token = Keyword.Type
             elif value in self.vector_types:
                 token = Keyword.Type
             elif value in self.variables:
                 token = Name.Builtin
             elif value in self.execution_confs:
                 token = Keyword.Pseudo
             elif value in self.function_qualifiers:
                 token = Keyword.Reserved
             elif value in self.functions:
                 token = Name.Function
         yield index, token, value
コード例 #9
0
def highlight_text():
    my_text.mark_set("range_start", "1.0")
    data = my_text.get("1.0", "end-1c")
    for token, content in lex(data, CLexer()):
        my_text.mark_set("range_end", "range_start + %dc" % len(content))
        my_text.tag_add(str(token), "range_start", "range_end")
        my_text.mark_set("range_start", "range_end")
    for token, content in lex(data, CppLexer()):
        my_text.mark_set("range_end", "range_start + %dc" % len(content))
        my_text.tag_add(str(token), "range_start", "range_end")
        my_text.mark_set("range_start", "range_end")

    my_text.tag_configure("Token.Keyword", foreground="#CC7A00")
    my_text.tag_configure("Token.Keyword.Constant", foreground="#CC7A00")
    my_text.tag_configure("Token.Keyword.Declaration", foreground="#CC7A00")
    my_text.tag_configure("Token.Keyword.Namespace", foreground="#CC7A00")
    my_text.tag_configure("Token.Keyword.Pseudo", foreground="#CC7A00")
    my_text.tag_configure("Token.Keyword.Reserved", foreground="#CC7A00")
    my_text.tag_configure("Token.Keyword.Type", foreground="#CC7A00")
    my_text.tag_configure("Token.Name", foreground="#003D99")
    my_text.tag_configure("Token.Name.Attribute", foreground="#003D99")
    my_text.tag_configure("Token.Name.Builtin", foreground="#003D99")
    my_text.tag_configure("Token.Name.Builtin.Pseudo", foreground="#003D99")
    my_text.tag_configure("Token.Name.Class", foreground="#003D99")
    my_text.tag_configure("Token.Name.Constant", foreground="#003D99")
    my_text.tag_configure("Token.Name.Exception", foreground="#003D99")
    my_text.tag_configure("Token.Name.Decorator", foreground="#003D99")
    my_text.tag_configure("Token.Name.Entity", foreground="#003D99")
    my_text.tag_configure("Token.Name.Label", foreground="#003D99")
    my_text.tag_configure("Token.Name.Namespace", foreground="#003D99")
    my_text.tag_configure("Token.Name.Other", foreground="#003D99")
    my_text.tag_configure("Token.Name.Tag", foreground="#003D99")
    my_text.tag_configure("Token.Name.Variable", foreground="#003D99")
    my_text.tag_configure("Token.Name.Function", foreground="#003D99")
    my_text.tag_configure("Token.Operator.Word", foreground="#CC7A00")
    my_text.tag_configure("Token.Comment", foreground="#6C666C")
    my_text.tag_configure("Token.Comment.Single", foreground="#6C666C")
    my_text.tag_configure("Token.Comment.Multiline", foreground="#6C666C")
    my_text.tag_configure("Token.Comment.Preproc", foreground="#003D99")
    my_text.tag_configure("Token.Literal", foreground="#248F24")
    my_text.tag_configure("Token.Literal.String", foreground="#248F24")
    my_text.tag_configure("Token.String", foreground="#248F24")
    my_text.tag_configure("Token.Generic", foreground="#4FFF00")
    my_text.tag_configure("Token.Generic.Heading", foreground="#4FFF00")
    my_text.tag_configure("Token.Generic.Subheading", foreground="#4FFF00")
    my_text.tag_configure("Token.Operator", foreground="#FF0000")
    my_text.tag_configure("Token.Operator.Word", foreground="#FF0000")
コード例 #10
0
ファイル: output_moodle_xml.py プロジェクト: elavinal/genquiz
def generate_code_image(code, save_image=False, qname=None):
    # Generate file name from question name
    Path('img').mkdir(parents=True, exist_ok=True)
    if save_image and qname != None:
        fname = "img/" + qname.replace(" ", "-") + ".png"
    else:
        fname = "img/out.png"
    # Use pygments to highlight the code and generate image
    with open(fname, "wb") as png_file:
        highlight(code, CLexer(),
                  ImageFormatter(line_pad=4, image_pad=5, line_numbers=False),
                  png_file)
    # Encode image as a base64 string
    with open(fname, 'rb') as image_file:
        encoded_str = base64.b64encode(image_file.read()).decode('UTF-8')
        img_str = '<img alt="code-fig" src="data:image/png;base64,{}">'.format(
            encoded_str)
    return img_str
コード例 #11
0
def make_question_multichoice(question):
    # Name
    q = make_question_title(question['qname'])
    # Text
    q += question_text(question)
    # Answers
    q += '<form>\n'
    if question['type'] == 'single':
        in_type = 'radio'
    else:
        in_type = 'checkbox'
    for i, answer in enumerate(question['answers']):
        q += '  <input type="{}" id="id{}" name="quest">\n'.format(in_type, i)
        if 'text' in answer:
            q += '  <label for="id{}">{} <span class="ans">[{}]</span></label><br>\n'\
                    .format(i, answer['text'], answer['fraction'])
        elif 'code' in answer:
            code_html = highlight(answer['code'], CLexer(), HtmlFormatter())
            q += '  <label for="id{}">{} <span class="ans">[{}]</span></label><br>\n'\
                    .format(i, code_html, answer['fraction'])
    q += '</form>\n'
    return q
コード例 #12
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in CLexer.get_tokens_unprocessed(self, text):
         if token is Name and value in self.EXTRA_KEYWORDS:
             yield index, Keyword, value
         else:
             yield index, token, value
コード例 #13
0
 def __getitem__(self, index):
     if self.code is None:
         with open(self.path) as fh:
             formatted = highlight(fh.read(), CLexer(), TheFormatter)
             self.code = formatted.splitlines()
     return self.code[index]
コード例 #14
0
def generate_code_html(question):
    # Use pygments to highlight the code
    code_html = highlight(question['code'], CLexer(), HtmlFormatter())
    return code_html
コード例 #15
0
ファイル: keymap.py プロジェクト: tarynshitup/vial-qmk
def _get_layers(keymap):  # noqa C901 : until someone has a good idea how to simplify/split up this code
    """ Find the layers in a keymap.c file.

    Args:
        keymap: the content of the keymap.c file

    Returns:
        a dictionary containing the parsed keymap
    """
    layers = list()
    opening_braces = '({['
    closing_braces = ')}]'
    keymap_certainty = brace_depth = 0
    is_keymap = is_layer = is_adv_kc = False
    layer = dict(name=False, layout=False, keycodes=list())
    for line in lex(keymap, CLexer()):
        if line[0] is Token.Name:
            if is_keymap:
                # If we are inside the keymap array
                # we know the keymap's name and the layout macro will come,
                # followed by the keycodes
                if not layer['name']:
                    if line[1].startswith('LAYOUT') or line[1].startswith('KEYMAP'):
                        # This can happen if the keymap array only has one layer,
                        # for macropads and such
                        layer['name'] = '0'
                        layer['layout'] = line[1]
                    else:
                        layer['name'] = line[1]
                elif not layer['layout']:
                    layer['layout'] = line[1]
                elif is_layer:
                    # If we are inside a layout macro,
                    # collect all keycodes
                    if line[1] == '_______':
                        kc = 'KC_TRNS'
                    elif line[1] == 'XXXXXXX':
                        kc = 'KC_NO'
                    else:
                        kc = line[1]
                    if is_adv_kc:
                        # If we are inside an advanced keycode
                        # collect everything and hope the user
                        # knew what he/she was doing
                        layer['keycodes'][-1] += kc
                    else:
                        layer['keycodes'].append(kc)

        # The keymaps array's signature:
        # const uint16_t PROGMEM keymaps[][MATRIX_ROWS][MATRIX_COLS]
        #
        # Only if we've found all 6 keywords in this specific order
        # can we know for sure that we are inside the keymaps array
            elif line[1] == 'PROGMEM' and keymap_certainty == 2:
                keymap_certainty = 3
            elif line[1] == 'keymaps' and keymap_certainty == 3:
                keymap_certainty = 4
            elif line[1] == 'MATRIX_ROWS' and keymap_certainty == 4:
                keymap_certainty = 5
            elif line[1] == 'MATRIX_COLS' and keymap_certainty == 5:
                keymap_certainty = 6
        elif line[0] is Token.Keyword:
            if line[1] == 'const' and keymap_certainty == 0:
                keymap_certainty = 1
        elif line[0] is Token.Keyword.Type:
            if line[1] == 'uint16_t' and keymap_certainty == 1:
                keymap_certainty = 2
        elif line[0] is Token.Punctuation:
            if line[1] in opening_braces:
                brace_depth += 1
                if is_keymap:
                    if is_layer:
                        # We found the beginning of a non-basic keycode
                        is_adv_kc = True
                        layer['keycodes'][-1] += line[1]
                    elif line[1] == '(' and brace_depth == 2:
                        # We found the beginning of a layer
                        is_layer = True
                elif line[1] == '{' and keymap_certainty == 6:
                    # We found the beginning of the keymaps array
                    is_keymap = True
            elif line[1] in closing_braces:
                brace_depth -= 1
                if is_keymap:
                    if is_adv_kc:
                        layer['keycodes'][-1] += line[1]
                        if brace_depth == 2:
                            # We found the end of a non-basic keycode
                            is_adv_kc = False
                    elif line[1] == ')' and brace_depth == 1:
                        # We found the end of a layer
                        is_layer = False
                        layers.append(layer)
                        layer = dict(name=False, layout=False, keycodes=list())
                    elif line[1] == '}' and brace_depth == 0:
                        # We found the end of the keymaps array
                        is_keymap = False
                        keymap_certainty = 0
            elif is_adv_kc:
                # Advanced keycodes can contain other punctuation
                # e.g.: MT(MOD_LCTL | MOD_LSFT, KC_ESC)
                layer['keycodes'][-1] += line[1]

        elif line[0] is Token.Literal.Number.Integer and is_keymap and not is_adv_kc:
            # If the pre-processor finds the 'meaning' of the layer names,
            # they will be numbers
            if not layer['name']:
                layer['name'] = line[1]

        else:
            # We only care about
            # operators and such if we
            # are inside an advanced keycode
            # e.g.: MT(MOD_LCTL | MOD_LSFT, KC_ESC)
            if is_adv_kc:
                layer['keycodes'][-1] += line[1]

    return layers
コード例 #16
0
                        makevar='make variable',
                        program='program')


class ManPage(rst.Inline):
    style = 'man page'


lexers = dict(
    none=TextLexer(stripnl=False),
    python=PythonLexer(stripnl=False),
    python3=Python3Lexer(stripnl=False),
    pycon=PythonConsoleLexer(stripnl=False),
    pycon3=PythonConsoleLexer(python3=True, stripnl=False),
    rest=RstLexer(stripnl=False),
    c=CLexer(stripnl=False),
)  # type: Dict[unicode, Lexer]
for _lexer in lexers.values():
    _lexer.add_filter('raiseonerror')


class Literal_Block(rst.Literal_Block):
    @staticmethod
    def lexer_getter(text, language):
        # This is a partial copy of Sphinx's PygmentsBridge.highlight_block()
        if language in ('py', 'python'):
            if text.startswith('>>>'):
                # interactive session
                lexer = lexers['pycon']
            else:
                lexer = lexers['python']
コード例 #17
0
ファイル: conf.py プロジェクト: offa/Criterion
#epub_identifier = ''

# A unique identification for the text.
#epub_uid = ''

# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()

# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []

# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []

# A list of files that should not be packed into the epub file.
#epub_exclude_files = []

# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3

# Allow duplicate toc entries.
#epub_tocdup = True

# Highlight PHP without starting <?php tag
from sphinx.highlighting import lexers
from pygments.lexers.c_cpp import CLexer

lexers['c'] = CLexer(startinline=True)
コード例 #18
0
ファイル: source_code.py プロジェクト: aakropotkin/igcc
def color_code(source_code):
    from pygments import highlight
    from pygments.lexers.c_cpp import CLexer
    from pygments.formatters import Terminal256Formatter
    return highlight(source_code, CLexer(), Terminal256Formatter(style='trac'))
コード例 #19
0
 def _code(self):
     if self.__code is None:
         with open(self.path) as fh:
             formatted = highlight(fh.read(), CLexer(), TheFormatter)
             self.__code = formatted.splitlines()
     return self.__code