Пример #1
0
class PygmentsPreviewer(Previewer):
    # All supported MIME types
    MIMETYPES = ('text/css', 'text/x-python', 'text/x-ruby-script', 'text/x-java-source', 'text/x-c',
                 'application/javascript', 'text/x-c', 'text/x-fortran', 'text/x-csharp', 'text/php',
                 'text/x-php')

    # Python's mimetypes lib and Pygments do not quite agree on some MIME types
    CUSTOM_LEXERS = {
        'text/x-c': CppLexer(),
        'text/x-java-source': JavaLexer(),
        'text/x-ruby-script': RubyLexer(),
        'text/php': PhpLexer()
    }

    @classmethod
    def can_preview(cls, attachment_file):
        return attachment_file.content_type in cls.MIMETYPES

    @classmethod
    def generate_content(cls, attachment):
        mime_type = attachment.file.content_type

        lexer = cls.CUSTOM_LEXERS.get(mime_type)
        if lexer is None:
            lexer = get_lexer_for_mimetype(mime_type)

        with attachment.file.open() as f:
            html_formatter = HtmlFormatter(style='tango', linenos='inline', prestyles='mono')
            html_code = highlight(f.read(), lexer, html_formatter)

        css_code = html_formatter.get_style_defs('.highlight')

        return render_template('previewer_code:pygments_preview.html', attachment=attachment,
                               html_code=html_code, css_code=css_code)
Пример #2
0
 def highlightString(src):
     try:
         if self.currentExt == 'php':
             from pygments.lexers import PhpLexer
             return highlight(src, PhpLexer(), HtmlFormatter())
         elif self.currentExt == 'py':
             from pygments.lexers import PythonLexer
             return highlight(src, PythonLexer(), HtmlFormatter())
         elif self.currentExt == 'rb':
             from pygments.lexers import RubyLexer
             return highlight(src, RubyLexer(), HtmlFormatter())
         elif self.currentExt == 'pl':
             from pygments.lexers import PerlLexer
             return highlight(src, PerlLexer(), HtmlFormatter())
         elif self.currentExt == 'java':
             from pygments.lexers import JavaLexer
             return highlight(src, JavaLexer(), HtmlFormatter())
         elif self.currentExt == 'cs':
             from pygments.lexers import CSharpLexer
             return highlight(src, CSharpLexer(), HtmlFormatter())
         else:
             from pygments.lexers import JavascriptLexer
             return highlight(src, JavascriptLexer(), HtmlFormatter())
     except: 
         return "File could not be highlighted"
Пример #3
0
def ruby2html(ruby_file):
    with open(ruby_file, encoding='utf-8') as rbf:
        code = rbf.readlines()
    html = highlight(''.join(code), RubyLexer(), HtmlFormatter())
    html_name = ''.join(ruby_file.split('.')[:-1]) + '.html'
    with open(html_name, 'w', encoding='utf-8') as hf:
        hf.writelines(html)
def main(inputFileName):
    
    outputFileName = "%s.html" % inputFileName
    rubyLexer = RubyLexer()
    rubyLexer.add_filter(RelabelNegativeCommentsFilter())
    htmlPageFormatter = HtmlPageFormatter(title = inputFileName)
    
    inputFile = open(inputFileName, "r")
    code = inputFile.read()
    inputFile.close()
    
    print("pygmentizing %s into %s ..." % (inputFileName, outputFileName))
    
    outputFile = open(outputFileName, "w")
    
    highlight(code, rubyLexer, htmlPageFormatter, outfile = outputFile)
    outputFile.close()
def process(inputFileName, relativeBaseDir = ""):
    
    #E the output file name is the input file name with ".html" on the end
    outputFileName = "%s.html" % inputFileName
    #E use the Ruby lexer provided by Pygments to parse Ruby input files
    rubyLexer = RubyLexer()
    rubyLexer.add_filter(RelabelExtremeCommentsFilter())
    htmlPageFormatter = HtmlPageFormatter(title = inputFileName, relativeBaseDir = relativeBaseDir)
    
    inputFile = open(inputFileName, "r")
    code = inputFile.read()
    inputFile.close()
    
    print("pygmentizing %s into %s ..." % (inputFileName, outputFileName))
    
    outputFile = open(outputFileName, "w")
    
    highlight(code, rubyLexer, htmlPageFormatter, outfile = outputFile)
    outputFile.close()
Пример #6
0
 def recognize(cls, location):
     """
     Yield one or more Package manifest objects given a file ``location`` pointing to a
     package archive, manifest or similar.
     """
     with io.open(location, encoding='utf-8') as loc:
         file_contents = loc.read()
     formatted_file_contents = highlight(
         file_contents, RubyLexer(), ChefMetadataFormatter())
     package_data = json.loads(formatted_file_contents)
     return build_package(cls, package_data)
Пример #7
0
def parse(location):
    """
    Return a Package object from a metadata.json file or a metadata.rb file or None.
    """
    if is_metadata_json(location):
        with io.open(location, encoding='utf-8') as loc:
            package_data = json.load(loc, object_pairs_hook=OrderedDict)
        return build_package(package_data)

    if is_metadata_rb(location):
        with io.open(location, encoding='utf-8') as loc:
            file_contents = loc.read()
        formatted_file_contents = highlight(
            file_contents, RubyLexer(), ChefMetadataFormatter())
        package_data = json.loads(formatted_file_contents, object_pairs_hook=OrderedDict)
        return build_package(package_data)
Пример #8
0
 def setUp(self):
     self.lexer = RubyLexer()
     self.maxDiff = None
Пример #9
0
class RubyTest(unittest.TestCase):

    def setUp(self):
        self.lexer = RubyLexer()
        self.maxDiff = None

    def testRangeSyntax1(self):
        fragment = u'1..3\n'
        tokens = [
            (Number.Integer, u'1'),
            (Operator, u'..'),
            (Number.Integer, u'3'),
            (Text, u'\n'),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))

    def testRangeSyntax2(self):
        fragment = u'1...3\n'
        tokens = [
            (Number.Integer, u'1'),
            (Operator, u'...'),
            (Number.Integer, u'3'),
            (Text, u'\n'),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))

    def testRangeSyntax3(self):
        fragment = u'1 .. 3\n'
        tokens = [
            (Number.Integer, u'1'),
            (Text, u' '),
            (Operator, u'..'),
            (Text, u' '),
            (Number.Integer, u'3'),
            (Text, u'\n'),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))

    def testInterpolationNestedCurly(self):
        fragment = (
            u'"A#{ (3..5).group_by { |x| x/2}.map '
            u'do |k,v| "#{k}" end.join }" + "Z"\n')

        tokens = [
            (Token.Literal.String.Double, u'"'),
            (Token.Literal.String.Double, u'A'),
            (Token.Literal.String.Interpol, u'#{'),
            (Token.Text, u' '),
            (Token.Punctuation, u'('),
            (Token.Literal.Number.Integer, u'3'),
            (Token.Operator, u'..'),
            (Token.Literal.Number.Integer, u'5'),
            (Token.Punctuation, u')'),
            (Token.Operator, u'.'),
            (Token.Name, u'group_by'),
            (Token.Text, u' '),
            (Token.Literal.String.Interpol, u'{'),
            (Token.Text, u' '),
            (Token.Operator, u'|'),
            (Token.Name, u'x'),
            (Token.Operator, u'|'),
            (Token.Text, u' '),
            (Token.Name, u'x'),
            (Token.Operator, u'/'),
            (Token.Literal.Number.Integer, u'2'),
            (Token.Literal.String.Interpol, u'}'),
            (Token.Operator, u'.'),
            (Token.Name, u'map'),
            (Token.Text, u' '),
            (Token.Keyword, u'do'),
            (Token.Text, u' '),
            (Token.Operator, u'|'),
            (Token.Name, u'k'),
            (Token.Punctuation, u','),
            (Token.Name, u'v'),
            (Token.Operator, u'|'),
            (Token.Text, u' '),
            (Token.Literal.String.Double, u'"'),
            (Token.Literal.String.Interpol, u'#{'),
            (Token.Name, u'k'),
            (Token.Literal.String.Interpol, u'}'),
            (Token.Literal.String.Double, u'"'),
            (Token.Text, u' '),
            (Token.Keyword, u'end'),
            (Token.Operator, u'.'),
            (Token.Name, u'join'),
            (Token.Text, u' '),
            (Token.Literal.String.Interpol, u'}'),
            (Token.Literal.String.Double, u'"'),
            (Token.Text, u' '),
            (Token.Operator, u'+'),
            (Token.Text, u' '),
            (Token.Literal.String.Double, u'"'),
            (Token.Literal.String.Double, u'Z'),
            (Token.Literal.String.Double, u'"'),
            (Token.Text, u'\n'),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))

    def testOperatorMethods(self):
        fragment = u'x.==4\n'
        tokens = [
            (Token.Name, u'x'),
            (Token.Operator, u'.'),
            (Token.Name.Operator, u'=='),
            (Token.Literal.Number.Integer, u'4'),
            (Token.Text, u'\n'),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))

    def testEscapedBracestring(self):
        fragment = u'str.gsub(%r{\\\\\\\\}, "/")\n'
        tokens = [
            (Token.Name, u'str'),
            (Token.Operator, u'.'),
            (Token.Name, u'gsub'),
            (Token.Punctuation, u'('),
            (Token.Literal.String.Regex, u'%r{'),
            (Token.Literal.String.Regex, u'\\\\'),
            (Token.Literal.String.Regex, u'\\\\'),
            (Token.Literal.String.Regex, u'}'),
            (Token.Punctuation, u','),
            (Token.Text, u' '),
            (Token.Literal.String.Double, u'"'),
            (Token.Literal.String.Double, u'/'),
            (Token.Literal.String.Double, u'"'),
            (Token.Punctuation, u')'),
            (Token.Text, u'\n'),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
Пример #10
0
def lexer():
    yield RubyLexer()
Пример #11
0
 def setUp(self):
     self.lexer = RubyLexer()
     self.maxDiff = None
Пример #12
0
class RubyTest(unittest.TestCase):
    def setUp(self):
        self.lexer = RubyLexer()
        self.maxDiff = None

    def testRangeSyntax1(self):
        fragment = u'1..3\n'
        tokens = [
            (Number.Integer, u'1'),
            (Operator, u'..'),
            (Number.Integer, u'3'),
            (Text, u'\n'),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))

    def testRangeSyntax2(self):
        fragment = u'1...3\n'
        tokens = [
            (Number.Integer, u'1'),
            (Operator, u'...'),
            (Number.Integer, u'3'),
            (Text, u'\n'),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))

    def testRangeSyntax3(self):
        fragment = u'1 .. 3\n'
        tokens = [
            (Number.Integer, u'1'),
            (Text, u' '),
            (Operator, u'..'),
            (Text, u' '),
            (Number.Integer, u'3'),
            (Text, u'\n'),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))

    def testInterpolationNestedCurly(self):
        fragment = (u'"A#{ (3..5).group_by { |x| x/2}.map '
                    u'do |k,v| "#{k}" end.join }" + "Z"\n')

        tokens = [
            (Token.Literal.String.Double, u'"'),
            (Token.Literal.String.Double, u'A'),
            (Token.Literal.String.Interpol, u'#{'),
            (Token.Text, u' '),
            (Token.Punctuation, u'('),
            (Token.Literal.Number.Integer, u'3'),
            (Token.Operator, u'..'),
            (Token.Literal.Number.Integer, u'5'),
            (Token.Punctuation, u')'),
            (Token.Operator, u'.'),
            (Token.Name, u'group_by'),
            (Token.Text, u' '),
            (Token.Literal.String.Interpol, u'{'),
            (Token.Text, u' '),
            (Token.Operator, u'|'),
            (Token.Name, u'x'),
            (Token.Operator, u'|'),
            (Token.Text, u' '),
            (Token.Name, u'x'),
            (Token.Operator, u'/'),
            (Token.Literal.Number.Integer, u'2'),
            (Token.Literal.String.Interpol, u'}'),
            (Token.Operator, u'.'),
            (Token.Name, u'map'),
            (Token.Text, u' '),
            (Token.Keyword, u'do'),
            (Token.Text, u' '),
            (Token.Operator, u'|'),
            (Token.Name, u'k'),
            (Token.Punctuation, u','),
            (Token.Name, u'v'),
            (Token.Operator, u'|'),
            (Token.Text, u' '),
            (Token.Literal.String.Double, u'"'),
            (Token.Literal.String.Interpol, u'#{'),
            (Token.Name, u'k'),
            (Token.Literal.String.Interpol, u'}'),
            (Token.Literal.String.Double, u'"'),
            (Token.Text, u' '),
            (Token.Keyword, u'end'),
            (Token.Operator, u'.'),
            (Token.Name, u'join'),
            (Token.Text, u' '),
            (Token.Literal.String.Interpol, u'}'),
            (Token.Literal.String.Double, u'"'),
            (Token.Text, u' '),
            (Token.Operator, u'+'),
            (Token.Text, u' '),
            (Token.Literal.String.Double, u'"'),
            (Token.Literal.String.Double, u'Z'),
            (Token.Literal.String.Double, u'"'),
            (Token.Text, u'\n'),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))

    def testOperatorMethods(self):
        fragment = u'x.==4\n'
        tokens = [
            (Token.Name, u'x'),
            (Token.Operator, u'.'),
            (Token.Name.Operator, u'=='),
            (Token.Literal.Number.Integer, u'4'),
            (Token.Text, u'\n'),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))

    def testEscapedBracestring(self):
        fragment = u'str.gsub(%r{\\\\\\\\}, "/")\n'
        tokens = [
            (Token.Name, u'str'),
            (Token.Operator, u'.'),
            (Token.Name, u'gsub'),
            (Token.Punctuation, u'('),
            (Token.Literal.String.Regex, u'%r{'),
            (Token.Literal.String.Regex, u'\\\\'),
            (Token.Literal.String.Regex, u'\\\\'),
            (Token.Literal.String.Regex, u'}'),
            (Token.Punctuation, u','),
            (Token.Text, u' '),
            (Token.Literal.String.Double, u'"'),
            (Token.Literal.String.Double, u'/'),
            (Token.Literal.String.Double, u'"'),
            (Token.Punctuation, u')'),
            (Token.Text, u'\n'),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
Пример #13
0
class RubyTest(unittest.TestCase):
    def setUp(self):
        self.lexer = RubyLexer()
        self.maxDiff = None

    def testRangeSyntax1(self):
        fragment = u"1..3\n"
        tokens = [(Number.Integer, u"1"), (Operator, u".."), (Number.Integer, u"3"), (Text, u"\n")]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))

    def testRangeSyntax2(self):
        fragment = u"1...3\n"
        tokens = [(Number.Integer, u"1"), (Operator, u"..."), (Number.Integer, u"3"), (Text, u"\n")]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))

    def testRangeSyntax3(self):
        fragment = u"1 .. 3\n"
        tokens = [
            (Number.Integer, u"1"),
            (Text, u" "),
            (Operator, u".."),
            (Text, u" "),
            (Number.Integer, u"3"),
            (Text, u"\n"),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))

    def testInterpolationNestedCurly(self):
        fragment = u'"A#{ (3..5).group_by { |x| x/2}.map ' u'do |k,v| "#{k}" end.join }" + "Z"\n'

        tokens = [
            (Token.Literal.String.Double, u'"'),
            (Token.Literal.String.Double, u"A"),
            (Token.Literal.String.Interpol, u"#{"),
            (Token.Text, u" "),
            (Token.Punctuation, u"("),
            (Token.Literal.Number.Integer, u"3"),
            (Token.Operator, u".."),
            (Token.Literal.Number.Integer, u"5"),
            (Token.Punctuation, u")"),
            (Token.Operator, u"."),
            (Token.Name, u"group_by"),
            (Token.Text, u" "),
            (Token.Literal.String.Interpol, u"{"),
            (Token.Text, u" "),
            (Token.Operator, u"|"),
            (Token.Name, u"x"),
            (Token.Operator, u"|"),
            (Token.Text, u" "),
            (Token.Name, u"x"),
            (Token.Operator, u"/"),
            (Token.Literal.Number.Integer, u"2"),
            (Token.Literal.String.Interpol, u"}"),
            (Token.Operator, u"."),
            (Token.Name, u"map"),
            (Token.Text, u" "),
            (Token.Keyword, u"do"),
            (Token.Text, u" "),
            (Token.Operator, u"|"),
            (Token.Name, u"k"),
            (Token.Punctuation, u","),
            (Token.Name, u"v"),
            (Token.Operator, u"|"),
            (Token.Text, u" "),
            (Token.Literal.String.Double, u'"'),
            (Token.Literal.String.Interpol, u"#{"),
            (Token.Name, u"k"),
            (Token.Literal.String.Interpol, u"}"),
            (Token.Literal.String.Double, u'"'),
            (Token.Text, u" "),
            (Token.Keyword, u"end"),
            (Token.Operator, u"."),
            (Token.Name, u"join"),
            (Token.Text, u" "),
            (Token.Literal.String.Interpol, u"}"),
            (Token.Literal.String.Double, u'"'),
            (Token.Text, u" "),
            (Token.Operator, u"+"),
            (Token.Text, u" "),
            (Token.Literal.String.Double, u'"'),
            (Token.Literal.String.Double, u"Z"),
            (Token.Literal.String.Double, u'"'),
            (Token.Text, u"\n"),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))

    def testOperatorMethods(self):
        fragment = u"x.==4\n"
        tokens = [
            (Token.Name, u"x"),
            (Token.Operator, u"."),
            (Token.Name.Operator, u"=="),
            (Token.Literal.Number.Integer, u"4"),
            (Token.Text, u"\n"),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))

    def testEscapedBracestring(self):
        fragment = u'str.gsub(%r{\\\\\\\\}, "/")\n'
        tokens = [
            (Token.Name, u"str"),
            (Token.Operator, u"."),
            (Token.Name, u"gsub"),
            (Token.Punctuation, u"("),
            (Token.Literal.String.Regex, u"%r{"),
            (Token.Literal.String.Regex, u"\\\\"),
            (Token.Literal.String.Regex, u"\\\\"),
            (Token.Literal.String.Regex, u"}"),
            (Token.Punctuation, u","),
            (Token.Text, u" "),
            (Token.Literal.String.Double, u'"'),
            (Token.Literal.String.Double, u"/"),
            (Token.Literal.String.Double, u'"'),
            (Token.Punctuation, u")"),
            (Token.Text, u"\n"),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))