Esempio n. 1
0
def checkBadLoopCollect(code):
    """
    Look for bad loop like 'for i in range(len(list))'
    Documentation: https://youtu.be/OSGv2VnC0go?t=4m47s
    """
    sequence = [(Token.Keyword, '^for$'),
                (Token.Name, '^\w+$'),
                (Token.Operator.Word, '^in$'),
                (Token.Name.Builtin, '^range$|^xrange$'),
                (Token.Punctuation, '^\($'),
                (Token.Name.Builtin, '^len$'),
                (Token.Punctuation, '^\($'),
                (Token.Name, '^\w+$')]
    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    badLoopCollectIdiom = PythonIdiom('badLoop')

    lineNumber = 1
    while True:
        lineAux = _findSeqInTokens(sequence, tokens)
        if lineAux < 0:
            break
        lineNumber += lineAux -1
        badLoopCollectIdiom.addNew(lineNumber)
    log("badLoopCollect found in lines {0}".format(badLoopCollectIdiom.getLines()))

    return badLoopCollectIdiom
Esempio n. 2
0
def findDocstring(code):
    """Find the use of documentation in the functions, classes or script
    Documentation: https://www.python.org/dev/peps/pep-0257/
    """
    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')

    classDefToken = (Token.Keyword, '^class$')
    functDefToken = (Token.Keyword, '^def$')
    tokens = pygments.lex(code, lexer)

    docIdiom = PythonIdiom('docstring')
    docstringFound = defaultdict(int)
    typeDoc = 'module'
    lineNumber = 1


    for ttype, word in tokens:
        if _sameToken((ttype, word), classDefToken):
            typeDoc = 'class'
        elif _sameToken((ttype, word), functDefToken):
            typeDoc = 'function'
        elif ttype == Token.Literal.String.Doc:
            docstringFound[typeDoc] += 1
            docIdiom.addNew(lineNumber)
        lineNumber += _getNewLines((ttype, word))

    for typeDoc in docstringFound:
        log("type %s: %d found" % (typeDoc, docstringFound[typeDoc]))
    log('DocString found in lines: ' + str(docIdiom.getLines()))
    return docIdiom
Esempio n. 3
0
def checkNotRange(code):
    """
    Check if there is: for xx in [0,1,2] instead of for xxx in (x)range
    Documentation: https://youtu.be/OSGv2VnC0go?t=3m4s
    """
    sequence = [(Token.Keyword, '^for$'),
                (Token.Name, '^\w+$'),
                (Token.Operator.Word, '^in$'),
                (Token.Punctuation, '^\[$'),
                (Token.Literal.Number.Integer, '^\d$')]

    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    notRangeIdiom = PythonIdiom('notRange')

    lineNumber = 1
    while True:
        lineAux = _findSeqInTokens(sequence, tokens)
        if lineAux < 0:
            break
        lineNumber += lineAux -1
        notRangeIdiom.addNew(lineNumber)
    log("badForIn found in lines {0}".format(notRangeIdiom.getLines()))
    return notRangeIdiom
Esempio n. 4
0
def basicStructure(code):
    sequence = []
    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    for token in tokens:
        print token
Esempio n. 5
0
def basicStructure(code):
    sequence = []
    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    for token in tokens:
        print token
Esempio n. 6
0
def findBadUseImport(code):
    """
    Find when use from foo import *
    Documentation: http://python.net/~goodger/projects/pycon/2007/idiomatic/handout.html#importing
                   https://docs.python.org/2/howto/doanddont.html#from-module-import
    """
    sequence = [(Token.Keyword.Namespace, '^from$'),
                (Token.Name.Namespace, '.*'),
                (Token.Keyword.Namespace, '^import$'),
                (Token.Operator, '\*')]
    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    badUseImport = PythonIdiom('badImport')

    lineNumber = 1
    while True:
        lineAux = _findSeqInTokens(sequence, tokens)
        if lineAux < 0:
            break
        lineNumber += lineAux -1
        badUseImport.addNew(lineNumber)
    log("badUseImport found in lines {0}".format(badUseImport.getLines()))

    return badUseImport
Esempio n. 7
0
def redent(s):
    """
    Shamelessly stolen from infobob(#python bot) code
    https://code.launchpad.net/~pound-python/infobat/infobob
    """
    lexer = PythonLexer()
    lexer.add_filter(_RedentFilter())
    return highlight(s, lexer, NullFormatter())
Esempio n. 8
0
def python_prettify(code, style):
    lexer = PythonLexer()
    lexer.add_filter(VisibleWhitespaceFilter(spaces="&nbsp"))
    pretty_code = highlight(
        code, lexer, HtmlFormatter(
            linenos=style, linenostart=0))
    # print(pretty_code)
    return format_html('{}', mark_safe(pretty_code))
Esempio n. 9
0
def findUseMapFilterReduce(code):
    """
    Find the use of map, filter and reduce builtins in the code.
    A better option is the use of generators and list comprehensions
    Documentation: Fluent Python page 142
                   https://docs.python.org/2/library/functions.html#map
                   https://docs.python.org/2/library/functions.html#filter
                   https://docs.python.org/2/library/functions.html#reduce
    """
    filterToken = (Token.Name.Builtin, '^filter$')
    mapToken = (Token.Name.Builtin, '^map$')
    reduceToken = (Token.Name.Builtin, '^reduce$')
    tokensFound = {'filter': 0,
                   'map': 0,
                   'reduce': 0}

    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    lineNumber = 1

    mapIdiom = PythonIdiom('map')
    filterIdiom = PythonIdiom('filter')
    reduceIdiom = PythonIdiom('reduce')

    for token in tokens:
        lineNumber += _getNewLines(token)
        if _sameToken(token, filterToken):
            tokensFound['filter'] += 1
            filterIdiom.addNew(lineNumber)
        elif _sameToken(token, reduceToken):
            tokensFound['reduce'] += 1
            reduceIdiom.addNew(lineNumber)
        elif _sameToken(token, mapToken):
            tokensFound['map'] += 1
            mapIdiom.addNew(lineNumber)
    log('filter found in lines: ' + str(filterIdiom.getLines()))
    log('map found in lines: ' + str(mapIdiom.getLines()))
    log('reduce found in lines: ' + str(reduceIdiom.getLines()))
    return [mapIdiom, filterIdiom, reduceIdiom]
code = """
for i in range(1, 11):
    print("Hello world!")

if x and y:
    print("yes")

if x or y:
    print("dunno")

globalVariable = 42

def helloWorld():
    print("Hello world!")

helloWorld()

print(globalVariable)
"""

print(highlight(code, PythonLexer(), TerminalFormatter()))

print("-----------------------")

lexer = PythonLexer()

# pridani filtru
lexer.add_filter(to_snake_case())

print(highlight(code, lexer, TerminalFormatter()))
Esempio n. 11
0
                    if (not indents) and seen_def:
                        yield token.Text, "\n"

                decorator_started = False

                for t, v in indents:
                    yield t, v
                indents = []

                seen_def = True  # print leading new lines

            if def_started or decorator_started:
                yield ttype, value

            if (ttype is token.Punctuation) and (value == ':'):
                def_started = False


if __name__ == "__main__":
    from pygments import highlight
    from pygments.lexers import PythonLexer
    from pygments.formatters import NullFormatter

    lex = PythonLexer()
    lex.add_filter(PythonAPIFilter())

    for f in [__file__, "../views.py", '../admin.py']:
        code = open(f, 'r').read()
        print "---------- %s ----------" % f
        print highlight(code, lex, NullFormatter())
Esempio n. 12
0
def generate_plots(results_filename, plot_filename, github_url):
    with open(os.path.join(os.path.dirname(__file__), 'plot.tmpl.html')) as f:
        html_template = Template(f.read())

    with open(results_filename, 'r') as f:
        results = json.load(f)

    name = results['name']
    xlabel = results['xlabel']
    baseline = results['baseline']

    sections = [
        layouts.row(Div(text='''<h1>Example: %s</h1>
            <p><b>Description</b>: %s</p>''' % \
                (results['name'], markdown.render(results['description'])), width=WIDTH)),
    ]
    # Implementations
    sections.append(
        layouts.row(Div(text='<h2>Implementations</h2>', width=WIDTH)))
    source_tabs = []
    for impl in results['implementations']:
        lexer = PythonLexer()
        lexer.add_filter('codetagify', codetags=['NOTE', 'SPEEDTIP'])
        highlighted = highlight(impl['source'], lexer, HtmlFormatter())
        source_tabs.append((impl['name'],
                            Div(text=markdown.render(impl['description']),
                                width=WIDTH), Div(text=highlighted,
                                                  width=WIDTH)))

    tabs = Tabs(tabs=[
        Panel(child=layouts.column(st[1], st[2]), title=st[0])
        for st in source_tabs
    ],
                width=WIDTH)
    sections.append(layouts.row(tabs))

    # Benchmarks
    sections.append(layouts.row(Div(text='<h2>Benchmarks</h2>')))
    for category_results in results['results']:
        category = category_results['category']
        del category_results['category']

        plot_title = name + ': ' + ', '.join(category)

        speedup_p = make_plot(category_results,
                              title=plot_title,
                              xlabel=xlabel,
                              ylabel='Speedup over %s' % baseline,
                              baseline=baseline,
                              ycolname='speedup',
                              yaxis_format='%1.1f')
        throughput_p = make_plot(category_results,
                                 title=plot_title,
                                 xlabel=xlabel,
                                 ylabel='%s / sec' % xlabel,
                                 baseline=baseline,
                                 ycolname='throughput',
                                 yaxis_format='%1.0e')
        raw_p = make_plot(category_results,
                          title=plot_title,
                          xlabel=xlabel,
                          ylabel='Execution time (usec)',
                          baseline=baseline,
                          ycolname='times',
                          yaxis_format='%1.0f')

        tabs = Tabs(tabs=[
            Panel(child=speedup_p, title='Speedup'),
            Panel(child=throughput_p, title='Throughput'),
            Panel(child=raw_p, title='Raw times')
        ],
                    width=WIDTH)
        sections.append(layouts.row(tabs))

    html = file_html(
        layouts.column(sections),
        resources=CDN,
        title='Example: %s' % results['name'],
        template=html_template,
        template_variables=dict(
            pygments_css=HtmlFormatter().get_style_defs('.highlight'),
            github_url=github_url))

    with open(plot_filename, 'w') as f:
        f.write(html)

    return results
Esempio n. 13
0
code = """
for i in range(1, 11):
    print("Hello world!")

if x and y:
    print("yes")

if x or y:
    print("dunno")

globalVariable = 42

def helloWorld():
    print("Hello world!")

helloWorld()

print(globalVariable)
"""

print(highlight(code, PythonLexer(), TerminalFormatter()))

print("-----------------------")

lexer = PythonLexer()

# pridani filtru
lexer.add_filter(CamelCaseFilter())

print(highlight(code, lexer, TerminalFormatter()))
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
from pygments.filters import NameHighlightFilter

code = """
for i in range(1, 11):
    print("Hello world!")
if x and y:
    print("yes")
if x or y:
    print("dunno")
if x xor y:
    print("different")
goto 10
"""

print(highlight(code, PythonLexer(), TerminalFormatter()))

print("-----------------------")

lexer = PythonLexer()

# pridani filtru
lexer.add_filter(NameHighlightFilter(names=['xor', 'goto']))

print(highlight(code, lexer, TerminalFormatter()))
Esempio n. 15
0
# a list of builtin themes.
#
html_theme = 'alabaster'

html_theme_options = {
    'description': 'Structural Optimization with FEniCS and dolfin-adjoint',
    'fixed_sidebar': True,
    'font_family':
    '-apple-system,system-ui,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,sans-serif',
    'code_font_size': '0.85em',
    # 'caption_font_size': 0.5,
    # 'font_size': '05'
    # 'pre_bg': '#444444'
    'page_width': '60%'
}

pygments_style = 'pastie'

# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']

from pygments.lexers import PythonLexer

l = PythonLexer()
l.add_filter('whitespace', tabs="    ")

latex_elements = {'preamble': r'''
\usepackage{relsize}'''}
Esempio n. 16
0
                        if (not indents) and seen_def:
                            yield token.Text, "\n"

                    decorator_started = False

                    for t, v in indents:
                        yield t, v
                    indents = []

                    seen_def = True # print leading new lines

            if def_started or decorator_started:
                yield ttype, value

            if  (ttype is token.Punctuation) and (value == ':'):
                def_started = False

if __name__ == "__main__":
    from pygments import highlight
    from pygments.lexers import PythonLexer
    from pygments.formatters import NullFormatter

    lex = PythonLexer()
    lex.add_filter(PythonAPIFilter())

    for f in [__file__, "../views.py", '../admin.py']:
        code = open(f, 'r').read()
        print "---------- %s ----------" % f
        print highlight(code, lex, NullFormatter())
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
from pygments.filters import NameHighlightFilter

code = """
for i in range(1, 11):
    print("Hello world!")
if x and y:
    print("yes")
if x or y:
    print("dunno")
if x xor y:
    print("different")
goto 10
"""


print(highlight(code, PythonLexer(), TerminalFormatter()))

print("-----------------------")

lexer = PythonLexer()

# pridani filtru
lexer.add_filter(NameHighlightFilter(names=["xor", "goto"]))

print(highlight(code, lexer, TerminalFormatter()))
def initWithURL_(_self, _sel, _url):
    from objc_util import ObjCInstance
    import os
    from pygments import highlight
    from pygments.lexers import PythonLexer
    from pygments.formatters import HtmlFormatter
    from pygments.styles import get_style_by_name
    import re
    global search_term
    self = ObjCInstance(_self)  # PA2QuickHelpContentViewController
    url = ObjCInstance(_url)
    if 'myfile://' in str(url):
        t = str(url)
        i = t.find('myfile://')
        t = t[i + 2:]
        url = nsurl(t)  # file:///...
        tx = '/Pythonista3/Documents/'
        i = t.find(tx) + len(tx)
        j = t.find('#')
        path = t[i:j]
        path = path.replace('~', ' ')
        self.setTitle_(path)

        fpath = os.path.expanduser('~/Documents/' + path)

        with open(fpath, mode='rt', encoding='utf-8', errors='ignore') as fil:
            code = fil.read()

        # Syntax-highlight code
        # from omz code at https://forum.omz-software.com/topic/1950/syntax-highlight-python-code-on-screen-while-running
        html_formatter = HtmlFormatter(style='colorful')
        l = PythonLexer()
        l.add_filter('whitespace', tabs=' ', tabsize=2)
        highlighted_code = highlight(code, l, html_formatter)
        styles = html_formatter.get_style_defs()

        # change html code to highlight searched term with yellow background
        styles = styles + '\n' + '.search { background-color: #ffff00 } /* search term */'
        highlighted_code = highlighted_code.replace(
            search_term,
            '</span><span class="search">' + search_term + '</span>')

        # add class to searched term independantly of case
        src_str = re.compile('(' + search_term + ')', re.IGNORECASE)
        # @Olaf: use \1 to refer to matched text grouped by () in the regex
        highlighted_code = src_str.sub(
            r'</span><span class="search">\1</span>', highlighted_code)

        # meta tag UTF-8 is needed to display emojis if present in a script
        html = '<html><head><meta charset="UTF-8"><style>%s</style></head><body> %s </body></html>' % (
            styles, highlighted_code)

        fpath = os.path.expanduser('~/Documents/find_in_files_via_help.html')
        # segmentation error crash if write file in text mode
        #with open(fpath, mode='wb') as fil:
        #	fil.write(html.encode())
        with open(fpath, mode='wt', encoding='utf8') as fil:
            fil.write(html)
        url = nsurl('file://' + fpath)
    rtnval = self.originalinitWithURL_(url)
    return rtnval.ptr
Esempio n. 19
0
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
from pygments.filters import VisibleWhitespaceFilter

code = """
for i in range(1, 11):
\tprint("Hello world!")
if x and y:
        print("yes")
if x or y:
\tprint("dunno")
"""


print(highlight(code, PythonLexer(), TerminalFormatter()))

print("-----------------------")

lexer = PythonLexer()
lexer.add_filter(VisibleWhitespaceFilter(tabs=True))

print(highlight(code, lexer, TerminalFormatter()))
def helloWorld():
    print("Hello world!")

helloWorld()

print(globalVariable)
"""


print(highlight(code, PythonLexer(), TerminalFormatter()))

input()
print("-----------------------")

lexer = PythonLexer()

# pridani filtru
lexer.add_filter(to_snake_case())

print(highlight(code, lexer, TerminalFormatter()))

input()
print("-----------------------")

lexer = PythonLexer()

# pridani filtru
lexer.add_filter(to_snake_case(convert_all_names=True))

print(highlight(code, lexer, TerminalFormatter()))
Esempio n. 21
0
def redent(s):
    lexer = PythonLexer()
    lexer.add_filter(_RedentFilter())
    return highlight(s, lexer, NullFormatter())