def preprocess(filename, b=False, bo="none.txt"):
    """
	Removes boiler plate if provided, then strips comments, replaces function calls, removes #defines, removes multiple spaces
	Replace variable by 'v'
	"""
    try:
        if b == True:
            boiler(bo, filename)
        file = open(filename, "r")
        text = file.read()
        file.close()
        from pygments.lexers.c_cpp import CppLexer
        lexer = CppLexer()
        tokens = lexer.get_tokens(text)
        tokens = list(tokens)
        result = ""
        lenT = len(tokens)

        for i in range(lenT):
            if tokens[i][
                    0] == pygments.token.Name and not i == lenT - 1 and not tokens[
                        i + 1][1] == '(':
                result += 'V'
            elif tokens[i][0] in pygments.token.Literal.String:
                result += 'S'
            elif tokens[i][0] in pygments.token.Name.Function:
                result += 'F'
            elif tokens[i][0] == pygments.token.Text or tokens[i][
                    0] in pygments.token.Comment:
                pass  #whitespaces and comments ignored
            else:
                result += tokens[i][1]
        open(filename, 'w').write(result)
    except:
        return
Beispiel #2
0
    def createLexers(self):

        lex = {}
        lex['.c'] = CFamilyLexer()
        lex['.h'] = CFamilyLexer()
        lex['.cpp'] = CppLexer()
        lex['.hpp'] = CppLexer()
        lex['.css'] = CssLexer()
        lex['.sass'] = SassLexer()
        lex['.yaml'] = YamlLexer()
        lex['.yml'] = YamlLexer()
        lex['.json'] = JsonLexer()
        lex['.cs'] = CSharpLexer()
        lex['.fs'] = FSharpLexer()
        lex['.e'] = EiffelLexer()
        lex['.erl'] = ErlangLexer()
        lex['.hrl'] = ErlangLexer()
        lex['.es'] = ErlangLexer()
        lex['.f03'] = FortranLexer()
        lex['.f90'] = FortranLexer()
        lex['.F03'] = FortranLexer()
        lex['.F90'] = FortranLexer()
        lex['.go'] = GoLexer()
        lex['.hs'] = HaskellLexer()
        lex['.v'] = VerilogLexer()
        lex['.vhdl'] = VhdlLexer()
        lex['.vhd'] = VhdlLexer()
        lex['.html'] = HtmlLexer()
        lex['.htm'] = HtmlLexer()
        lex['.xhtml'] = HtmlLexer()
        lex['.xml'] = XmlLexer()
        lex['.js'] = JavascriptLexer()
        lex['.tex'] = TypeScriptLexer()
        lex['.coffee'] = CoffeeScriptLexer()
        lex['.java'] = JavaLexer()
        lex['.scala'] = ScalaLexer()
        lex['.kt'] = KotlinLexer()
        lex['.ktm'] = KotlinLexer()
        lex['.kts'] = KotlinLexer()
        lex['.lisp'] = CommonLispLexer()
        lex['make'] = MakefileLexer()
        lex['Make'] = MakefileLexer()
        lex['CMake'] = CMakeLexer()
        lex['cmake'] = CMakeLexer()
        lex['.m'] = MatlabLexer()
        lex['.mat'] = MatlabLexer()
        lex['.dpr'] = DelphiLexer()
        lex['.perl'] = PerlLexer()
        lex['.php'] = PhpLexer()
        lex['.pr'] = PrologLexer()
        lex['.py'] = Python3Lexer()
        lex['.rb'] = RubyLexer()
        lex['.sh'] = BashLexer()
        lex['.sql'] = MySqlLexer()
        lex['.mysql'] = MySqlLexer()
        lex['.tcl'] = TclLexer()
        lex['.awk'] = AwkLexer()

        return lex
Beispiel #3
0
    def __init__(self, filename):
        existing_filename = None
        if os.path.exists(filename):
            existing_filename = filename
        else:
            fn = os.path.join(args.source_dir, filename)
            if os.path.exists(fn):
                existing_filename = fn

        self.stream = open(
            os.path.join(args.output_dir,
                         SourceFileRenderer.html_file_name(filename)), 'w')
        if existing_filename:
            self.source_stream = open(existing_filename)
        else:
            self.source_stream = None
            print('''
<html>
<h1>Unable to locate file {}</h1>
</html>
            '''.format(filename),
                  file=self.stream)

        self.html_formatter = HtmlFormatter()
        self.cpp_lexer = CppLexer()
Beispiel #4
0
    def __init__(self, source_dir, output_dir, filename):
        existing_filename = None
        if os.path.exists(filename):
            existing_filename = filename
        else:
            fn = os.path.join(source_dir, filename)
            if os.path.exists(fn):
                existing_filename = fn

        self.stream = codecs.open(os.path.join(
            output_dir, optrecord.html_file_name(filename)),
                                  'w',
                                  encoding='utf-8')
        if existing_filename:
            self.source_stream = open(existing_filename)
        else:
            self.source_stream = None
            print('''
<html>
<h1>Unable to locate file {}</h1>
</html>
            '''.format(filename),
                  file=self.stream)

        self.html_formatter = HtmlFormatter(encoding='utf-8')
        self.cpp_lexer = CppLexer(stripnl=False)
Beispiel #5
0
def process_file(path, args):
    with open(path, 'rb') as fp:
        text = fp.read().decode('utf-8')

    nl_style = determine_nl_style(path)

    lexer = CppLexer()
    tokens = lexer.get_tokens_unprocessed(text)
    fixup_tool = FixupTool(path, tokens, nl_style)
    changes = fixup_tool.parse()
    new_text = change_text(text, changes)

    if args.in_place:
        with open(path, 'wt') as fp:
            fp.write(new_text)
    else:
        print(new_text)
Beispiel #6
0
    def build(self):
        languages = Spinner(text='Courier New',
                            values=sorted([
                                'CppLexer',
                            ] + list(lexers.LEXERS.keys())))

        languages.bind(text=self.change_lang)

        menu = BoxLayout(size_hint_y=None, height='15pt')
        fnt_size = Spinner(text='20',
                           values=list(map(str, list(range(10, 45, 5)))))
        fnt_size.bind(text=self._update_size)

        fonts = [
            file for file in LabelBase._font_dirs_files
            if file.endswith('.ttf')
        ]

        fnt_name = Spinner(text='Courier New',
                           option_cls=Fnt_SpinnerOption,
                           values=fonts)
        fnt_name.bind(text=self._update_font)
        mnu_file = Spinner(text='File',
                           values=('Open', 'SaveAs', 'Save', 'Close'))
        mnu_file.bind(text=self._file_menu_selected)
        key_bindings = Spinner(text='Key bindings',
                               values=('Default key bindings',
                                       'Emacs key bindings'))
        key_bindings.bind(text=self._bindings_selected)

        run_button = Button(text='Run')
        run_button.bind(on_press=self.compile)

        menu.add_widget(mnu_file)
        menu.add_widget(fnt_size)
        menu.add_widget(run_button)
        self.b.add_widget(menu)

        self.codeinput = CodeInputWithBindings(
            lexer=CppLexer(),
            font_size=20,
            text=self.current_code,
            key_bindings='default',
        )
        self.output = ScrolllabelLabel(text="SECTION: OUTPUT\n", )

        self.text_input_box = CodeInputWithBindings(text='',
                                                    multiline=False,
                                                    height='0dp')
        self.text_input_box.bind(on_text_validate=self.on_enter)
        self.text_input_box.size_hint_y = None

        self.b.add_widget(self.codeinput)
        self.b.add_widget(self.text_input_box)
        self.b.add_widget(self.output)

        return self.b
Beispiel #7
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in CppLexer.get_tokens_unprocessed(self, text):
         if token is Name and value in self.EXTRA_KEYWORDS:
             yield index, Keyword, value
         elif token is Name and value in self.FUNC_KEYWORDS:
             yield index, Name.Function, value
         elif token is Name and value in self.TYPE_KEYWORDS:
             yield index, Name.Class, value
         elif token is Name and value in self.CONST_KEYWORDS:
             yield index, Name.Constant, value
         else:
             yield index, token, value
Beispiel #8
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in CppLexer.get_tokens_unprocessed(self, text):
         if value in self.structure:
             yield index, Name.Builtin, value
         elif value in self.operators:
             yield index, Operator, value
         elif value in self.variables:
             yield index, Keyword.Reserved, value
         elif value in self.suppress_highlight:
             yield index, Name, value
         elif value in self.functions:
             yield index, Name.Function, value
         else:
             yield index, token, value
Beispiel #9
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in CppLexer.get_tokens_unprocessed(self, text):
         if value in self.structure:
             yield index, Name.Builtin, value
         elif value in self.operators:
             yield index, Operator, value
         elif value in self.variables:
             yield index, Keyword.Reserved, value
         elif value in self.suppress_highlight:
             yield index, Name, value
         elif value in self.functions:
             yield index, Name.Function, value
         else:
             yield index, token, value
Beispiel #10
0
    def __init__(self, language="python"):
        """

        :param language: python, javascript, java or cpp
        """
        self.language = language
        if self.language == "python":
            self.lexer = PythonLexer()
        elif self.language == "javascript":
            self.lexer = JavascriptLexer()
        elif self.language == "cpp":
            self.lexer = CppLexer()
        elif self.language == "java":
            self.lexer = JavaLexer()
        else:
            raise NotImplementedError
def highlight_text():
    my_text.mark_set("range_start", "1.0")
    data = my_text.get("1.0", "end-1c")
    for token, content in lex(data, CLexer()):
        my_text.mark_set("range_end", "range_start + %dc" % len(content))
        my_text.tag_add(str(token), "range_start", "range_end")
        my_text.mark_set("range_start", "range_end")
    for token, content in lex(data, CppLexer()):
        my_text.mark_set("range_end", "range_start + %dc" % len(content))
        my_text.tag_add(str(token), "range_start", "range_end")
        my_text.mark_set("range_start", "range_end")

    my_text.tag_configure("Token.Keyword", foreground="#CC7A00")
    my_text.tag_configure("Token.Keyword.Constant", foreground="#CC7A00")
    my_text.tag_configure("Token.Keyword.Declaration", foreground="#CC7A00")
    my_text.tag_configure("Token.Keyword.Namespace", foreground="#CC7A00")
    my_text.tag_configure("Token.Keyword.Pseudo", foreground="#CC7A00")
    my_text.tag_configure("Token.Keyword.Reserved", foreground="#CC7A00")
    my_text.tag_configure("Token.Keyword.Type", foreground="#CC7A00")
    my_text.tag_configure("Token.Name", foreground="#003D99")
    my_text.tag_configure("Token.Name.Attribute", foreground="#003D99")
    my_text.tag_configure("Token.Name.Builtin", foreground="#003D99")
    my_text.tag_configure("Token.Name.Builtin.Pseudo", foreground="#003D99")
    my_text.tag_configure("Token.Name.Class", foreground="#003D99")
    my_text.tag_configure("Token.Name.Constant", foreground="#003D99")
    my_text.tag_configure("Token.Name.Exception", foreground="#003D99")
    my_text.tag_configure("Token.Name.Decorator", foreground="#003D99")
    my_text.tag_configure("Token.Name.Entity", foreground="#003D99")
    my_text.tag_configure("Token.Name.Label", foreground="#003D99")
    my_text.tag_configure("Token.Name.Namespace", foreground="#003D99")
    my_text.tag_configure("Token.Name.Other", foreground="#003D99")
    my_text.tag_configure("Token.Name.Tag", foreground="#003D99")
    my_text.tag_configure("Token.Name.Variable", foreground="#003D99")
    my_text.tag_configure("Token.Name.Function", foreground="#003D99")
    my_text.tag_configure("Token.Operator.Word", foreground="#CC7A00")
    my_text.tag_configure("Token.Comment", foreground="#6C666C")
    my_text.tag_configure("Token.Comment.Single", foreground="#6C666C")
    my_text.tag_configure("Token.Comment.Multiline", foreground="#6C666C")
    my_text.tag_configure("Token.Comment.Preproc", foreground="#003D99")
    my_text.tag_configure("Token.Literal", foreground="#248F24")
    my_text.tag_configure("Token.Literal.String", foreground="#248F24")
    my_text.tag_configure("Token.String", foreground="#248F24")
    my_text.tag_configure("Token.Generic", foreground="#4FFF00")
    my_text.tag_configure("Token.Generic.Heading", foreground="#4FFF00")
    my_text.tag_configure("Token.Generic.Subheading", foreground="#4FFF00")
    my_text.tag_configure("Token.Operator", foreground="#FF0000")
    my_text.tag_configure("Token.Operator.Word", foreground="#FF0000")
Beispiel #12
0
 def POST(self):
     data = web.input()
     code = data.code
     language = data.lang
     if language == 'python':
         from pygments.lexers.python import PythonLexer
         lexer = PythonLexer()
     elif language == 'php':
         from pygments.lexers.php import PhpLexer
         lexer = PhpLexer()
     elif language == 'java':
         from pygments.lexers.jvm import JavaLexer
         lexer = JavaLexer()
     elif language == 'javascript':
         from pygments.lexers.javascript import JavascriptLexer
         lexer = JavascriptLexer()
     elif language == 'html':
         from pygments.lexers.html import HtmlLexer
         lexer = HtmlLexer()
     elif language == 'cpp':
         from pygments.lexers.c_cpp import CppLexer
         lexer = CppLexer()
     elif language == 'shell':
         from pygments.lexers.shell import ShellSessionLexer
         lexer = ShellSessionLexer()
     elif language == 'matlab':
         from pygments.lexers.matlab import MatlabLexer
         lexer = MatlabLexer()
     elif language == 'ruby':
         from pygments.lexers.ruby import RubyLexer
         lexer = RubyLexer()
     elif language == 'r':
         from pygments.lexers.r import RConsoleLexer
         lexer = RConsoleLexer()
     elif language == 'lisp':
         from pygments.lexers.lisp import SchemeLexer
         lexer = SchemeLexer()
     elif language == 'go':
         from pygments.lexers.go import GoLexer
         lexer = GoLexer()
     formatter = html.HtmlFormatter(linenos=False,
                                    encoding='utf-8',
                                    nowrap=False)
     hilighted_snippet = highlight(code, lexer, formatter)
     #return hilighted
     #return render.submit()
     return render.result(hilighted_snippet)
Beispiel #13
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in CppLexer.get_tokens_unprocessed(self, text):
         if value in self.operators:
             yield index, Operator, value
         elif value in self.types:
             yield index, Keyword.Type, value
         elif value in self.fespaces:
             yield index, Name.Class, value
         elif value in self.preprocessor:
             yield index, Comment.Preproc, value
         elif value in self.keywords:
             yield index, Keyword.Reserved, value
         elif value in self.functions:
             yield index, Name.Function, value
         elif value in self.parameters:
             yield index, Keyword.Pseudo, value
         elif value in self.suppress_highlight:
             yield index, Name, value
         else:
             yield index, token, value
Beispiel #14
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in CppLexer.get_tokens_unprocessed(self, text):
         if value in self.operators:
             yield index, Operator, value
         elif value in self.types:
             yield index, Keyword.Type, value
         elif value in self.fespaces:
             yield index, Name.Class, value
         elif value in self.preprocessor:
             yield index, Comment.Preproc, value
         elif value in self.keywords:
             yield index, Keyword.Reserved, value
         elif value in self.functions:
             yield index, Name.Function, value
         elif value in self.parameters:
             yield index, Keyword.Pseudo, value
         elif value in self.suppress_highlight:
             yield index, Name, value
         else:
             yield index, token, value
Beispiel #15
0
    def print_symbol_sizes(self):
        """
        Prints top list of symbols
        :return:
        """
        if len(self.top_symbols) == 0:
            return

        demangled_symbols = zip(
            demangle([symbol for symbol, _ in self.top_symbols]),
            [size for _, size in self.top_symbols])
        max_digits = len(str(self.top_symbols[0][1]))
        fmt_string = click.style("** ", fg="green") + click.style("{: <" + str(max_digits) + "}", fg="yellow") + \
                     click.style(" : ", fg="green") + "{}"

        lexer = CppLexer()
        formatter = Terminal256Formatter()
        for symbol, size in demangled_symbols:
            print(
                fmt_string.format(sizeof_fmt(size),
                                  highlight(symbol, lexer,
                                            formatter).rstrip()))
Beispiel #16
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in CppLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             if value in self.constants:
                 yield index, Keyword.Constant, value
             elif value in self.functions:
                 yield index, Name.Function, value
             elif value in self.storage:
                 yield index, Keyword.Type, value
             else:
                 yield index, token, value
         elif token is Name.Function:
             if value in self.structure:
                 yield index, Name.Other, value
             else:
                 yield index, token, value
         elif token is Keyword:
             if value in self.storage:
                 yield index, Keyword.Type, value
             else:
                 yield index, token, value
         else:
             yield index, token, value
Beispiel #17
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in CppLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             if value in self.constants:
                 yield index, Keyword.Constant, value
             elif value in self.functions:
                 yield index, Name.Function, value
             elif value in self.storage:
                 yield index, Keyword.Type, value
             else:
                 yield index, token, value
         elif token is Name.Function:
             if value in self.structure:
                 yield index, Name.Other, value
             else: 
                 yield index, token, value
         elif token is Keyword:
             if value in self.storage:
                 yield index, Keyword.Type, value
             else:
                 yield index, token, value
         else:
             yield index, token, value
Beispiel #18
0
import sys
import os
from sphinx.highlighting import lexers
from pygments.lexers.c_cpp import CppLexer

lexers['c++'] = CppLexer(startinline=True, linenos=1)

extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'EOSIO'
copyright = u'2018, block.one'
version = '1.4'
release = '1.4.0'
language = 'ko'
html_title = 'EOSIO'

exclude_patterns = ['_build']
html_static_path = ['_static']

import guzzle_sphinx_theme
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'

html_sidebars = {'**': ['logo-text.html', 'globaltoc.html', 'searchbox.html']}

extensions.append("guzzle_sphinx_theme")

html_theme_options = {}
Beispiel #19
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in CppLexer.get_tokens_unprocessed(self, text):
         if token is Name and value in self.EXTRA_TYPES:
             yield index, Keyword.Type, value
         else:
             yield index, token, value
Beispiel #20
0
from pygments import highlight
from pygments.lexers.c_cpp import CppLexer
from pygments.formatters import RtfFormatter
from pygments.style import Style
from pygments.styles import STYLE_MAP
import sys
import win32clipboard as clippy

# Stylizer settings
styles = list(STYLE_MAP.keys()) # Available styles
style = styles[6]
font = "Monaco"
fontsize = 24


# Get input and style it
input = str(sys.argv[1])

output = highlight(input, CppLexer(), RtfFormatter(style=style, fontface=font, fontsize=fontsize))


# Copy to clipboard
CF_RTF = clippy.RegisterClipboardFormat("Rich Text Format")

output = bytearray(output, "utf8")

clippy.OpenClipboard(0)
clippy.EmptyClipboard()
clippy.SetClipboardData(CF_RTF, output)
clippy.CloseClipboard()
Beispiel #21
0
 def change_lang(self, instance, z):
     if z == 'CppLexer':
         lx = CppLexer()
     else:
         lx = lexers.get_lexer_by_name(lexers.LEXERS[z][2][0])
     self.codeinput.lexer = lx
Beispiel #22
0
 def print(self):
     if self.project is None:
         raise RuntimeError("project is None!")
     print(
         highlight(self.project.unparse(), CppLexer(),
                   Terminal256Formatter()))
Beispiel #23
0
 def fetch_tokens(self):
     if self._tokens is not None:
         return self._tokens
     code = self.fetch()
     lexer = CppLexer()
     return list(lexer.get_tokens_unprocessed(code))