def get_tokens_unprocessed(self, text):
     for index, token, value in CLexer.get_tokens_unprocessed(self, text):
         if token is Name and value in self.EXTRA_TYPE:
             yield index, Keyword.Type, value
         elif token is Name and value in self.EXTRA_KEYWORDS:
             yield index, Keyword, value
         else:
             yield index, token, value
Пример #2
0
 def get_tokens_unprocessed(self, text):
   for index, token, value in CLexer.get_tokens_unprocessed(self, text):
     if token is Name:
       if value in self.xmds_types:
         token = Keyword.Type
       elif value in self.xmds_functions:
         token = Name.Function
       elif value in self.xmds_constants:
         token = Keyword.Constant
     yield index, token, value
Пример #3
0
class ProgrammingLexer(object):
    """lexes a string with multiple programming lexers and returns tokens"""

    lexers = {
        'actionscript': ActionScript3Lexer(),
        'c': CLexer(),
        'cpp': CppLexer(),
        'cs': CSharpLexer(),
        'java': JavaLexer(),
        'javascript': JavascriptLexer(),
        'perl': PerlLexer(),
        'php': PhpLexer(startinline=True),
        'python': PythonLexer(),
        'ruby': RubyLexer(),
        'vb': VbNetLexer(),
    }

    matched_languages = []
    data = None

    def __init__(self, matched_langs, data_string):
        self.matched_languages = matched_langs
        self.data = data_string

    def lex(self):
        """
        For every possible matched language, we run a lexer to see if we can
        eliminate it as a possible match. If we detect errors, or have no
        lexer matches, we remove it from the list.

        :return: the list of lexer results
        :rtype: list
        """
        results = {}
        threads = []

        # Looping through each matched language that has a lexer
        for lexer_id, lexer in \
                [[lexid, lxr] for lexid, lxr in
                 self.lexers.items() if lexid in self.matched_languages]:
            # Creating a thread for each lexer
            thread = ProgrammingLexerThread(lexer_id, lexer, self.data)
            thread.start()
            threads.append(thread)

        for thr in threads:
            thr.join()

        for thr in [th for th in threads if th.result]:
            results[thr.thread_id] = thr.result

        return results
Пример #4
0
def highlight(code, language='c'):
    """Syntax-highlight code using pygments, if installed."""
    try:
        from pygments.formatters.terminal256 import Terminal256Formatter
        from pygments.lexers.compiled import CLexer
        from pygments.lexers.asm import LlvmLexer
        from pygments import highlight
    except ImportError:
        log.info("install pygments for syntax-highlighted output.")
        return code

    if   language.lower() == 'llvm': lexer = LlvmLexer()
    elif language.lower() == 'c':    lexer = CLexer()
    else:
        raise ValueError("Unrecognized highlight language: %s" % language)

    style = ctree.CONFIG.get('log', 'pygments_style')
    return highlight(code, lexer, Terminal256Formatter(style=style))
Пример #5
0
def main():
    """
    Función principal del script.
    """
    if len(sys.argv) != 2:
        print 'Usage: {0} <input-file>'.format(os.path.basename(sys.argv[0]))
    else:
        input_file = os.path.abspath(sys.argv[1])
        if input_file.endswith('.py'):
            lexer = PythonLexer()
        elif input_file.endswith('.c') or input_file.endswith('.h'):
            lexer = CLexer()
        elif input_file.endswith('.tiger') or input_file.endswith('.tig'):
            lexer = TigerLexer()
        else:
            print 'Error: Invalid input file. Only C, Python and Tiger programs accepted.'
            sys.exit()
        dot_index = -len(input_file) + input_file.rfind('.')
        output_file = '%s.tex' % input_file[:dot_index]
        with codecs.open(input_file, encoding='utf-8') as input:
            with codecs.open(output_file, mode='w',
                             encoding='utf-8') as output:
                highlight(input.read(), lexer, LatexFormatter(), output)
Пример #6
0
    def code(self):
        """generate the contents of the #code section"""
        # Get ready to use Pygments:
        formatter = CodeHtmlFormatter(
            style='default',
            cssclass='source',
            linenostart=self.data['function']['lines'][0],
        )

        # <link rel="stylesheet", href="pygments_c.css", type="text/css">
        open('pygments_c.css', 'w').write(formatter.get_style_defs())

        # Use pygments to convert it all to HTML:
        code = parse(highlight(self.raw_code(), CLexer(), formatter))

        # linkify the python C-API functions
        for name in code.xpath('//span[@class="n"]'):
            url = capi.get_url(name.text)
            if url is not None:
                link = E.A(name.text, href=url)
                name.text = None
                name.append(link)

        return code
Пример #7
0
pygments_style = 'symfonycom.sphinx.SensioStyle'

# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []

# -- Settings for symfony doc extension ---------------------------------------------------

# enable highlighting for PHP code not between ``<?php ... ?>`` by default
lexers['markdown'] = TextLexer()
lexers['php'] = PhpLexer(startinline=True)
lexers['php-annotations'] = PhpLexer(startinline=True)
lexers['php-attributes'] = PhpLexer(startinline=True)
lexers['php-standalone'] = PhpLexer(startinline=True)
lexers['php-symfony'] = PhpLexer(startinline=True)
lexers['rst'] = RstLexer()
lexers['varnish2'] = CLexer()
lexers['varnish3'] = CLexer()
lexers['varnish4'] = CLexer()
lexers['terminal'] = TerminalLexer()
lexers['env'] = BashLexer()

config_block = {
    'apache': 'Apache',
    'markdown': 'Markdown',
    'nginx': 'Nginx',
    'rst': 'reStructuredText',
    'varnish2': 'Varnish 2',
    'varnish3': 'Varnish 3',
    'varnish4': 'Varnish 4',
    'env': '.env'
}
Пример #8
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#

import sys, os
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
from pygments.lexers.compiled import CLexer

lexers['php'] = PhpLexer(startinline=True, linenos=1)
lexers['varnish3'] = CLexer()
lexers['varnish'] = CLexer()

on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:  # only import and set the theme if we're building docs locally
    import sphinx_rtd_theme
    html_theme = 'sphinx_rtd_theme'
    html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]

primary_domain = 'php'
highlight_language = 'php'

# -- General configuration -----------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
    'sphinx.ext.coverage', 'sphinx.ext.extlinks',
Пример #9
0
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.

import sys
import os

from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
from pygments.lexers.compiled import CLexer

lexers['varnish4'] = CLexer()

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))

# -- General configuration ------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
Пример #10
0
from pygments import highlight
from pygments.lexers.compiled import CLexer
from pygments.formatters import HtmlFormatter

print "<pre>"
try:
  while True:
    line = raw_input()
    if (line[:4] == 'diff') or (line[:5] == 'index') or (line[:3] == '---') or (line[:3] == '+++'):
      print ('<span class="dh">' + line + '</span>')
    elif line[:2] == '@@':
      ix = line[2:].index('@') + 4
      print ('<span class="dl">' + line[:ix] + highlight(line[ix:], CLexer(), HtmlFormatter(nowrap=True))[:-1] + '</span>')
    elif line[:1] == ' ':
      print ('<span class="du"> ' + highlight(line[1:], CLexer(), HtmlFormatter(nowrap=True))[:-1] + '</span>')
    elif line[:1] == '+':
      print ('<span class="di">+' + highlight(line[1:], CLexer(), HtmlFormatter(nowrap=True))[:-1] + '</span>')
    elif line[:1] == '-':
      print ('<span class="dd">-' + highlight(line[1:], CLexer(), HtmlFormatter(nowrap=True))[:-1] + '</span>')
    else:
      print ('<span class="dw">' + line + '</span>')
except EOFError:
  pass
print "</pre>"