예제 #1
0
 def lang_pre_handler2(lexer, lang, code, other=''):
     try:
         lang_lexer = get_lexer_by_name(lang, stripall=True)
         if other:
             html_formatter = MyHtmlFormatter(other)
         else:
             html_formatter = HtmlFormatter()
         token_stream = get_lexer(highlight(code, lang_lexer, html_formatter), PreLexer)
     except ClassNotFound:
         token_stream = lexer.pure_pre_token_stream(code)
     for token, value in token_stream:
         yield 0, token, value
예제 #2
0
def get_tree(text):
    text = text.replace("\r\n", "\n")
    text = text.replace("\r", "\n")

    token_stream = get_lexer(text, DocumentLexer)

    # first stage tree contains the full parse tree, including empty nodes
    # such as <p>       </p> and <p>   <br>   </p>.
    first_stage_tree = build_parse_tree(token_stream)

    # second stage tree clears up the empty text elements
    second_stage_tree = filter_tree(first_stage_tree)

    return second_stage_tree
예제 #3
0
def parse_comment(text):
    from catonmat.parser.lexer   import CommentLexer
    # TODO: this method is 1:1 as pageparser.py:parsepage(),
    #       merge them!
    text = text.replace("\r\n", "\n")
    text = text.replace("\r", "\n")

    token_stream = get_lexer(text, CommentLexer)

    # first stage tree contains the full parse tree, including empty nodes
    # such as <p>       </p> and <p>   <br>   </p>.
    first_stage_tree = build_parse_tree(token_stream)

    # second stage tree clears up the empty text elements
    second_stage_tree = filter_tree(first_stage_tree)

    buffer = StringIO()
    build_html(second_stage_tree, buffer)
    return buffer.getvalue()