コード例 #1
0
 def prettyPrintNode(self, p):
     '''The driver for beautification: beautify a single node.'''
     # c = self.c
     if not should_beautify(p):
         # @nobeautify is in effect.
         return
     if not p.b:
         # Pretty printing might add text!
         return
     if not p.b.strip():
         # Do this *after* we are sure @beautify is in effect.
         self.replace_body(p, '')
         return
     t1 = time.time()
     # Replace Leonine syntax with special comments.
     comment_string, s0 = comment_leo_lines(p)
     try:
         s1 = g.toEncodedString(s0)
         node1 = ast.parse(s1, filename='before', mode='exec')
     except IndentationError:
         self.skip_message('IndentationError', p)
         return
     except SyntaxError:
         self.skip_message('SyntaxError', p)
         return
     except Exception:
         g.es_exception()
         self.skip_message('Exception', p)
         return
     t2 = time.time()
     readlines = g.ReadLinesClass(s0).next
     tokens = list(tokenize.generate_tokens(readlines))
     t3 = time.time()
     s2 = self.run(tokens)
     t4 = time.time()
     try:
         s2_e = g.toEncodedString(s2)
         node2 = ast.parse(s2_e, filename='before', mode='exec')
         ok = compare_ast(node1, node2)
     except Exception:
         g.es_exception()
         g.trace('Error in %s...\n%s' % (p.h, s2_e))
         self.skip_message('BeautifierError', p)
         return
     if not ok:
         self.skip_message('BeautifierError', p)
         return
     t5 = time.time()
     # Restore the tags after the compare
     s3 = uncomment_leo_lines(comment_string, p, s2)
     self.replace_body(p, s3)
     # Update the stats
     self.n_input_tokens += len(tokens)
     self.n_output_tokens += len(self.code_list)
     self.n_strings += len(s3)
     self.parse_time += (t2 - t1)
     self.tokenize_time += (t3 - t2)
     self.beautify_time += (t4 - t3)
     self.check_time += (t5 - t4)
     self.total_time += (t5 - t1)
コード例 #2
0
def beautify(options, path):
    '''Beautify the file with the given path.'''
    fn = g.shortFileName(path)
    s, e = g.readFileIntoString(path)
    if not s:
        return
    print('beautifying %s' % fn)
    s1 = g.toEncodedString(s)
    node1 = ast.parse(s1, filename='before', mode='exec')
    readlines = g.ReadLinesClass(s).next
    tokens = list(tokenize.generate_tokens(readlines))
    beautifier = PythonTokenBeautifier(c=None)
    beautifier.delete_blank_lines = not options.keep
    s2 = beautifier.run(tokens)
    s2_e = g.toEncodedString(s2)
    node2 = ast.parse(s2_e, filename='before', mode='exec')
    if compare_ast(node1, node2):
        f = open(path, 'wb')
        f.write(s2_e)
        f.close()
    else:
        print('failed to beautify %s' % fn)
コード例 #3
0
def test_beautifier(c, h, p, settings):
    '''Test Leo's beautifier code'''
    if not p:
        g.trace('not found: %s' % h)
        return
    s = g.getScript(c,
                    p,
                    useSelectedText=False,
                    forcePythonSentinels=True,
                    useSentinels=False)
    g.trace(h.strip())
    t1 = time.clock()
    s1 = g.toEncodedString(s)
    node1 = ast.parse(s1, filename='before', mode='exec')
    t2 = time.clock()
    readlines = g.ReadLinesClass(s).next
    tokens = list(tokenize.generate_tokens(readlines))
    t3 = time.clock()
    beautifier = PythonTokenBeautifier(c)
    keep_blank_lines = settings.get('tidy-keep-blank-lines')
    if keep_blank_lines is not None:
        beautifier.delete_blank_lines = not keep_blank_lines
    s2 = beautifier.run(tokens)
    t4 = time.clock()
    try:
        s2_e = g.toEncodedString(s2)
        node2 = ast.parse(s2_e, filename='before', mode='exec')
        ok = compare_ast(node1, node2)
    except Exception:
        g.es_exception()
        ok = False
    t5 = time.clock()
    #  Update the stats
    beautifier.n_input_tokens += len(tokens)
    beautifier.n_output_tokens += len(beautifier.code_list)
    beautifier.n_strings += len(s2)
    beautifier.parse_time += (t2 - t1)
    beautifier.tokenize_time += (t3 - t2)
    beautifier.beautify_time += (t4 - t3)
    beautifier.check_time += (t5 - t4)
    beautifier.total_time += (t5 - t1)
    if settings.get('input_string'):
        print('==================== input_string')
        for i, z in enumerate(g.splitLines(s)):
            print('%4s %s' % (i + 1, z.rstrip()))
    if settings.get('input_lines'):
        print('==================== input_lines')
        dump_tokens(tokens, verbose=False)
    if settings.get('input_tokens'):
        print('==================== input_tokens')
        dump_tokens(tokens, verbose=True)
    if settings.get('output_tokens'):
        print('==================== code_list')
        for i, z in enumerate(beautifier.code_list):
            print('%4s %s' % (i, z))
    if settings.get('output_string'):
        print('==================== output_string')
        for i, z in enumerate(g.splitLines(s2)):
            if z == '\n':
                print('%4s' % (i + 1))
            elif z.rstrip():
                print('%4s %s' % (i + 1, z.rstrip()))
            else:
                print('%4s %r' % (i + 1, str(z)))
    if settings.get('stats'):
        beautifier.print_stats()
    if not ok:
        print('*************** fail: %s ***************' % (h))
    return beautifier