def inline_nowiki_repl(self, stack, nowiki, nowiki_text=None, nowiki_text_pre=None, pre_args='', nowiki_text_code=None, nowiki_text_tt=None): text = None if nowiki_text is not None: text = nowiki_text stack.top_append(moin_page.code(children=[text])) elif nowiki_text_code is not None: text = nowiki_text_code stack.top_append(moin_page.code(children=[text])) elif nowiki_text_tt is not None: text = nowiki_text_tt stack.top_append(moin_page.code(children=[text])) # Remove empty backtick nowiki samples elif nowiki_text_pre: # TODO: pre_args parsing text = nowiki_text_pre stack.top_append(moin_page.blockcode(children=[text])) else: return
def __call__(self, data, contenttype=None, arguments=None): text = decode_data(data, contenttype) content = normalize_split_text(text) content = u'\n'.join(content) blockcode = moin_page.blockcode(attrib={moin_page.class_: 'highlight'}) pygments.highlight(content, self.lexer, TreeFormatter(), blockcode) body = moin_page.body(children=(blockcode, )) return moin_page.page(children=(body, ))
def __call__(self, content, arguments=None): """Parse the text and return DOM tree.""" blockcode = moin_page.blockcode() for line in content: if len(blockcode): blockcode.append('\n') blockcode.append(line.expandtabs()) body = moin_page.body(children=(blockcode, )) return moin_page.page(children=(body, ))
def block_nowiki_repl(self, iter_content, stack, nowiki): """Handles a complete nowiki block""" stack.clear() try: firstline = iter_content.next() except StopIteration: stack.push(moin_page.blockcode()) return # Stop directly if we got an end marker in the first line match = self.nowiki_end_re.match(firstline) if match and not match.group('escape'): stack.push(moin_page.blockcode()) return lines = _Iter(self.block_nowiki_lines(iter_content), startno=iter_content.lineno) match = self.nowiki_interpret_re.match(firstline) if match: name = match.group('nowiki_name') args = match.group('nowiki_args') if args: args = parse_arguments(args) # Parse it directly if the type is ourself if not name or name == 'creole': body = self.parse_block(lines, args) elem = moin_page.page(children=(body, )) stack.top_append(elem) else: stack.top_append(self.parser(name, args, lines)) else: elem = moin_page.blockcode(children=(firstline, )) stack.top_append(elem) for line in lines: elem.append('\n') elem.append(line)
def __call__(self, data, contenttype=None, arguments=None): text = decode_data(data, contenttype) content = normalize_split_text(text) blockcode = moin_page.blockcode() for line in content: if len(blockcode): blockcode.append('\n') blockcode.append(line.expandtabs()) body = moin_page.body(children=(blockcode, )) return moin_page.page(children=(body, ))
def visit_literal_block(self, node): parser = node.get('parser', u'') if parser: named_args = re.findall(r"(\w+)=(\w+)", parser) simple_args = re.findall(r"(?:\s)\w+(?:\s|$)", parser) args = [] for value in simple_args: args.append(moin_page.argument(children=[value])) for name, value in named_args: args.append(moin_page.argument(attrib={moin_page.name: name}, children=[value])) arguments = moin_page.arguments(children=args) self.open_moin_page_node(moin_page.part( children=[arguments], attrib={moin_page.content_type: "x-moin/format;name={0}".format(parser.split(' ')[0])})) else: self.open_moin_page_node(moin_page.blockcode())
def handle_nowiki(self, elem, page): """{{{* where * may be #!wiki, #!csv, #!highlight python, "", etc., or an invalid argument.""" logging.debug("handle_nowiki elem: %r" % elem) marker_len, all_nowiki_args, content = elem._children nowiki_args = all_nowiki_args[0].strip() # remove all the old children of the element, new children will be added elem.remove_all() if not nowiki_args: # input similar to: {{{\ntext\n}}}\n blockcode = moin_page.blockcode(children=(content, )) elem.append(blockcode) return if nowiki_args.startswith('#!') and len(nowiki_args) > 2: arguments = nowiki_args[2:].split(' ', 1) # skip leading #! nowiki_name = arguments[0] optional_args = arguments[1] if len(arguments) > 1 else None else: nowiki_name = optional_args = None lexer = None if nowiki_name in set(('diff', 'cplusplus', 'python', 'java', 'pascal', 'irc')): # make old style markup similar to {{{#!python like new style {{{#!highlight python optional_args = nowiki_name if not optional_args else nowiki_name + ' ' + optional_args nowiki_name = 'highlight' if nowiki_name == u'highlight': # TODO: support moin 1.9 options like numbers=on start=222 step=10 optional_args = optional_args.split()[0] # ignore all parameters except lexer name try: lexer = pygments.lexers.get_lexer_by_name(optional_args) except ClassNotFound: try: lexer = pygments.lexers.get_lexer_for_mimetype(optional_args) except ClassNotFound: self.invalid_args(elem, all_nowiki_args) lexer = pygments.lexers.get_lexer_by_name('text') if lexer: blockcode = moin_page.blockcode(attrib={moin_page.class_: 'highlight'}) pygments.highlight(content, lexer, TreeFormatter(), blockcode) elem.append(blockcode) return if nowiki_name in ('csv', 'text/csv'): # TODO: support moin 1.9 options: quotechar, show, hide, autofilter, name, link, static_cols, etc delim = None if optional_args: m = re.search('delimiter=(.?)', optional_args) if m and m.group(1): delim = m.group(1) if not delim: delim = optional_args.split()[0] # ignore all parameters except a delimiter in first position if len(delim) > 1: delim = None sep = delim or u';' content = content.split('\n') head = content[0].split(sep) rows = [x.split(sep) for x in content[1:]] csv_builder = TableMixin() table = csv_builder.build_dom_table(rows, head=head, cls='moin-csv-table moin-sortable') elem.append(table) return if nowiki_name in ('wiki', 'text/x.moin.wiki',): from .moinwiki_in import Converter as moinwiki_converter moinwiki = moinwiki_converter() lines = normalize_split_text(content) lines = _Iter(lines) # reparse arguments from original: {{{#!wiki solid/orange (style="color: red;") wiki_args = parse_arguments(all_nowiki_args[0][2:]) if len(wiki_args.positional) > 1: wiki_args.keyword['class'] = u' '.join(wiki_args.positional[1:]) del wiki_args.positional[:] body = moinwiki.parse_block(lines, wiki_args) page = moin_page.page(children=(body, )) elem.append(page) return if nowiki_name in ('creole', 'text/x.moin.creole'): from .creole_in import Converter as creole_converter creole = creole_converter() lines = normalize_split_text(content) lines = _Iter(lines) body = creole.parse_block(lines, optional_args) page = moin_page.page(children=(body, )) elem.append(page) return if nowiki_name in ('rst', 'text/x-rst'): from .rst_in import Converter as rst_converter rst = rst_converter() page = rst(content, contenttype=u'text/x-rst;charset=utf-8') elem.append(page) return if nowiki_name in ('docbook', 'application/docbook+xml'): from .docbook_in import Converter as docbook_converter docbook = docbook_converter() page = docbook(content, contenttype=u'application/docbook+xml;charset=utf-8') elem.append(page) return if nowiki_name in ('markdown', 'text/x-markdown'): from .markdown_in import Converter as markdown_converter markdown = markdown_converter() page = markdown(content, contenttype=u'text/x-markdown;charset=utf-8') elem.append(page) return if nowiki_name in ('mediawiki', 'text/x-mediawiki'): from .mediawiki_in import Converter as mediawiki_converter mediawiki = mediawiki_converter() page = mediawiki(content, optional_args) elem.append(page) return if nowiki_name in ('html', 'HTML', 'text/html'): from .html_in import Converter as html_converter html = html_converter() page = html(content, optional_args) elem.append(page) return self.invalid_args(elem, all_nowiki_args) lexer = pygments.lexers.get_lexer_by_name('text') blockcode = moin_page.blockcode(attrib={moin_page.class_: 'highlight'}) pygments.highlight(content, lexer, TreeFormatter(), blockcode) elem.append(blockcode) return