def __init__(self, request, page): self.parser = Parser("", request) # Format the empty string, making it set its internal data (ugh!). out = cStringIO.StringIO() backup = sys.stdout, request.write sys.stdout, request.write = out, out.write self.parser.format(page.formatter) sys.stdout, request.write = backup self.include_re = re.compile("\[\[Include(?:\(.*?\))?\]\]") self.macro = macro.Macro(self.parser) # This is really deep and cool black magic. Basically, it creates # a local copy of the function macro.Include.execute that behaves # exactly like the original one, but with a different "Page" # global. This allows us to follow changes in the Include macro # without much trouble. from MoinMoin.macro.Include import execute func_globals = {} func_globals.update(execute.func_globals) class IncludePage(Page): incparser = self def send_page(self, request, msg=None, **keywords): request.write(self.incparser._parse(self.get_raw_body())) func_globals["Page"] = IncludePage self.execute = new.function(execute.func_code, func_globals, execute.func_name, execute.func_defaults)
class IncludeParser: def __init__(self, request, page): self.parser = Parser("", request) # Format the empty string, making it set its internal data (ugh!). out = cStringIO.StringIO() backup = sys.stdout, request.write sys.stdout, request.write = out, out.write self.parser.format(page.formatter) sys.stdout, request.write = backup self.include_re = re.compile("\[\[Include(?:\(.*?\))?\]\]") self.macro = macro.Macro(self.parser) # This is really deep and cool black magic. Basically, it creates # a local copy of the function macro.Include.execute that behaves # exactly like the original one, but with a different "Page" # global. This allows us to follow changes in the Include macro # without much trouble. from MoinMoin.macro.Include import execute func_globals = {} func_globals.update(execute.func_globals) class IncludePage(Page): incparser = self def send_page(self, request, msg=None, **keywords): request.write(self.incparser._parse(self.get_raw_body())) func_globals["Page"] = IncludePage self.execute = new.function(execute.func_code, func_globals, execute.func_name, execute.func_defaults)
def format_wikitext(request, data, parser=None): request.page.formatter = request.formatter request.formatter.page = request.page if not parser: parser = Parser(data, request) else: parser.raw = data parser.request = request # Do not store pagelinks for values in metadata listings plstore = getattr(request.formatter, '_store_pagelinks', 0) request.formatter._store_pagelinks = 0 parser.formatter = request.formatter # No line anchors of any type to table cells request.page.formatter.in_p = 1 parser._line_anchordef = lambda: '' # Do not parse macros from revision pages. For some reason, # it spawns multiple requests, which are not finished properly, # thus littering a number of readlocks. Besides, the macros do not # return anything useful anyway for pages they don't recognize if '?action=recall' in request.page.page_name: parser._macro_repl = lambda x: x out = parser.scan(data, inhibit_p=True) request.formatter._store_pagelinks = plstore return out.strip()
def format(self, formatter): """ Formatter the text inside classed div """ self.header = SectionHeader(self.request, self.text) Parser = self.importParser(self.sectionFormat()) parser = Parser(self.text[self.header.length():], self.request) # Set content language during rendering savedLanguage = self.request.content_lang self.request.setContentLanguage(self.sectionLanguage()) try: # XXX Should use formatter.section when its available self.request.write(formatter.rawHTML( '<div class="%s">\n' % self.sectionClass())) parser.format(formatter) self.request.write(formatter.rawHTML('</div>\n')) finally: self.request.setContentLanguage(savedLanguage)
def __init__(self, raw, request, **kw): XsltParser.__init__(self, raw, request) # relative path to docbook.xsl and compiled_xsl docbook_html_directory = request.cfg.docbook_html_dir self.db_xsl = os.path.join(docbook_html_directory, 'docbook.xsl') self.db_compiled_xsl = os.path.join(docbook_html_directory, 'db_compiled.dat') self.wikiParser = WikiParser(raw=self.raw, request=self.request, pretty_url=1) self.key = 'docbook'
def moin2doku(pagename, text, randomID=None): parser = Parser(text, request) formatter.setRandomID(randomID) # this needed for macros request.formatter = formatter p = Page(request, pagename) formatter.setPage(p) output = StringIO.StringIO() # wrap sys.stdout as RequestCLI has no interface to say where to output stdout = sys.stdout sys.stdout = output parser.format(formatter) sys.stdout = stdout return unicode(output.getvalue().decode('utf-8'))
def do_macro(request, args, **kw): formatter = request.formatter _ = request.getText out = list() pagename = request.page.page_name # Note, metatable_parseargs deals with permissions pagelist, metakeys, styles = metatable_parseargs(request, args, get_all_keys=True) # No data -> bail out quickly, Scotty if not pagelist: out.append(formatter.linebreak() + u'<div class="metatable">' + formatter.table(1)) if kw.get('silent'): out.extend(t_cell(request, request.page, ["%s" % _("No matches")])) else: out.extend( t_cell(request, request.page, ["%s '%s'" % (_("No matches for"), args)])) out.append(formatter.table(0) + u'</div>') return "".join(out) parser = Parser('', request) options = dict({'args': args}.items() + kw.items()) divfmt = {'class': "metatable", 'data-options': quote(json.dumps(options))} out.append(formatter.div(1, **divfmt)) # We're sure the user has the access to the page, so don't check out.extend( construct_table(request, pagelist, metakeys, checkAccess=False, styles=styles, options=options, parser=parser)) def action_link(action, linktext, args): req_url = request.script_root + "/" + \ url_escape(request.page.page_name) + \ '?action=' + action + '&args=' + url_escape(args) return '<a href="%s" class="meta_footer_link">[%s]</a>\n' % \ (req_url, _(linktext)) # If the user has no write access to this page, omit editlink if kw.get('editlink', True): out.append(action_link('MetaEdit', 'edit', args)) out.append(action_link('metaCSV', 'csv', args)) out.append(action_link('metaPackage', 'zip', args)) out.append(formatter.div(0)) return "".join(out)
def do(self, text, output): text = text.lstrip('\n') output = output.strip('\n') request = MinimalRequest(self.request) page = MinimalPage() formatter = Formatter(request) formatter.setPage(page) Parser(text, request).format(formatter) repeat = ''.join(request.result).strip('\n') #assert repeat == output out = self.do_convert_real([request, page.page_name, repeat]) assert text == out
def formatMarkup(self, request, text, percent): """ Formats the text using the wiki parser/formatter. This raises an exception if a text needs itself to be translated, this could possibly happen with macros. @param request: the request object @param text: the text to format @param percent: True if result is used as left-side of a % operator and thus any GENERATED % needs to be escaped as %%. """ logging.debug("formatting: %r" % text) from MoinMoin.Page import Page from MoinMoin.parser.text_moin_wiki import Parser as WikiParser if percent: from MoinMoin.formatter.text_html_percent import Formatter else: from MoinMoin.formatter.text_html import Formatter out = StringIO() request.redirect(out) parser = WikiParser(text, request, line_anchors=False) formatter = Formatter(request, terse=True) reqformatter = None if hasattr(request, "formatter"): reqformatter = request.formatter request.formatter = formatter p = Page(request, "$$$$i18n$$$$") formatter.setPage(p) parser.format(formatter) text = out.getvalue() if reqformatter is None: del request.formatter else: request.formatter = reqformatter request.redirect() text = text.strip() return text
def formatMarkup(self, request, text, percent): """ Formats the text using the wiki parser/formatter. This raises an exception if a text needs itself to be translated, this could possibly happen with macros. @param request: the request object @param text: the text to format @param percent: True if result is used as left-side of a % operator and thus any GENERATED % needs to be escaped as %%. """ logging.debug("formatting: %r" % text) from MoinMoin.Page import Page from MoinMoin.parser.text_moin_wiki import Parser as WikiParser if percent: from MoinMoin.formatter.text_html_percent import Formatter else: from MoinMoin.formatter.text_html import Formatter out = StringIO() request.redirect(out) parser = WikiParser(text, request, line_anchors=False) formatter = Formatter(request, terse=True) reqformatter = None if hasattr(request, 'formatter'): reqformatter = request.formatter request.formatter = formatter p = Page(request, "$$$$i18n$$$$") formatter.setPage(p) parser.format(formatter) text = out.getvalue() if reqformatter is None: del request.formatter else: request.formatter = reqformatter request.redirect() text = text.strip() return text
def t_cell(macro, value, head=0): out = macro.request style = dict() style['class'] = 'meta_cell' out.write(macro.formatter.table_cell(1, attrs=style)) if not isinstance(value, unicode): value = unicode(value, config.charset) value = value.strip() if head: kw = {} if '?' in value: value, query = value.split('?') kw['querystr'] = query out.write(macro.formatter.pagelink(1, value, **kw)) out.write(macro.formatter.text(value)) out.write(macro.formatter.pagelink(0)) else: out.page.formatter = out.formatter parser = Parser(value, out) # No line anchors of any type to table cells out.page.formatter.in_p = 1 parser._line_anchordef = lambda: '' # Using StringIO in order to strip the output value = StringIO.StringIO() out.redirect(value) # Produces output on a single table cell out.page.format(parser) out.redirect() out.write(value.getvalue().strip())
def t_cell(macro, value): out = macro.request style = dict() style['class'] = 'meta_cell' out.write(macro.formatter.table_cell(1, attrs=style)) value = value.strip() out.page.formatter = out.formatter parser = Parser(value, out) # No line anchors of any type to table cells out.page.formatter.in_p = 1 parser._line_anchordef = lambda: '' # Using StringIO in order to strip the output value = StringIO.StringIO() out.redirect(value) # Produces output on a single table cell out.page.format(parser) out.redirect() out.write(value.getvalue().strip())
class Cfg: _site_plugin_lists = {} _plugin_modules = [] bang_meta = False class Request: getText = None form = None cfg = Cfg() pragma = {} def write(self, text): print text, class Page: hilite_re = None page_name = 'arst' if __name__ == '__main__': req = Request() if len(sys.argv) == 1: contents = sys.stdin.read() else: contents = open(sys.argv[1]).read() p = Parser(contents, req) f = Formatter(req) f.page = Page() p.format(f)
def execute(pagename, request): from MoinMoin import wikiutil from MoinMoin.Page import Page _ = request.getText thispage = Page(request, pagename) if request.user.valid: username = request.user.name else: username = '' if not username: request.theme.add_msg(_('Please log in first.'), "error") return thispage.send_page() userhomewiki = request.cfg.user_homewiki if userhomewiki != 'Self' and userhomewiki != request.cfg.interwikiname: interwiki = wikiutil.getInterwikiHomePage(request, username=username) wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_interwiki(request, *interwiki) wikiurl = wikiutil.mapURL(request, wikiurl) homepageurl = wikiutil.join_wiki(wikiurl, wikitail) request.http_redirect('%s?action=MyPages' % homepageurl) homepage = Page(request, username) if not homepage.exists(): request.theme.add_msg(_('Please first create a homepage before creating additional pages.'), "error") return homepage.send_page() pagecontent = _("""\ You can add some additional sub pages to your already existing homepage here. You can choose how open to other readers or writers those pages shall be, access is controlled by group membership of the corresponding group page. Just enter the sub page's name and click on the button to create a new page. Before creating access protected pages, make sure the corresponding group page exists and has the appropriate members in it. Use HomepageGroupsTemplate for creating the group pages. ||'''Add a new personal page:'''||'''Related access control list group:'''|| ||<<NewPage(HomepageReadWritePageTemplate,read-write page,%(username)s)>>||[[%(username)s/ReadWriteGroup]]|| ||<<NewPage(HomepageReadPageTemplate,read-only page,%(username)s)>>||[[%(username)s/ReadGroup]]|| ||<<NewPage(HomepagePrivatePageTemplate,private page,%(username)s)>>||%(username)s only|| """) pagecontent = pagecontent % locals() pagecontent = pagecontent.replace('\n', '\r\n') from MoinMoin.parser.text_moin_wiki import Parser as WikiParser request.emit_http_headers() # This action generate data using the user language request.setContentLanguage(request.lang) request.theme.send_title(_('MyPages management'), page=homepage) parser = WikiParser(pagecontent, request) p = Page(request, "$$$") request.formatter.setPage(p) parser.format(request.formatter) # Start content - IMPORTANT - without content div, there is no direction support! request.write(request.formatter.startContent("content")) request.write(request.formatter.endContent()) request.theme.send_footer(homepage.page_name) request.theme.send_closing_html()
def __init__(self, raw, request, **kw): ParserBase.__init__(self, raw, request, **kw)
def _line_anchordef(self): if self.in_dd: return '' return wikiParser._line_anchordef(self)
class Parser(XsltParser): """ Send XML file formatted via XSLT. """ caching = 1 Dependencies = Dependencies def __init__(self, raw, request, **kw): XsltParser.__init__(self, raw, request) # relative path to docbook.xsl and compiled_xsl docbook_html_directory = request.cfg.docbook_html_dir self.db_xsl = os.path.join(docbook_html_directory, 'docbook.xsl') self.db_compiled_xsl = os.path.join(docbook_html_directory, 'db_compiled.dat') self.wikiParser = WikiParser(raw=self.raw, request=self.request, pretty_url=1) self.key = 'docbook' def format(self, formatter): self.wikiParser.formatter = formatter XsltParser.format(self, formatter) def append_stylesheet(self): """" virtual function, for docbook parser """ abs_db_xsl = os.path.abspath(self.db_xsl) abs_db_compiled_xsl = os.path.abspath(self.db_compiled_xsl) # same as path.exists, but also test if it is a file if not os.path.isfile(abs_db_compiled_xsl): _compile_xsl(abs_db_xsl, abs_db_compiled_xsl) assert os.path.isfile(abs_db_compiled_xsl) self.processor.appendStylesheetInstance( cPickle.load(file(abs_db_compiled_xsl, 'rb'))) def parse_result(self, result): """ additional parsing to the resulting XSLT'ed result (resultString) before saving will do: BASIC CLEAN UP : remove unnecessary HTML tags RESOLVE IMG SRC : fix src to find attachment RESOLVE WikiNames: if a word is a valid wikiname & a valid wikipage, replace word with hyperlink """ # BASIC CLEAN UP # remove from beginning until end of body tag found = re.search('<body.*?>', result) if found: result = result[found.end():] # remove everything after & including </body> found = result.rfind('</body>') if found != -1: result = result[:found] # RESOLVE IMG SRC found = re.finditer('<img.*?>', result) if found: splitResult = _splitResult(found, result) for index in range(len(splitResult)): if splitResult[index].startswith('<img'): found = re.search('src="(?P<source>.*?)"', splitResult[index]) imageSrc = found.group('source') imageAlt = None # save alt found = re.search('alt="(?P<alt>.*?)"', splitResult[index]) if found: imageAlt = found.group('alt') splitResult[index] = self.wikiParser.attachment( ('attachment:' + imageSrc, "")) if imageAlt: # restore alt splitResult[index] = re.sub('alt=".*?"', 'alt="%s"' % imageAlt, splitResult[index]) result = ''.join(splitResult) # RESOLVE WikiNames # if a word is a valid wikiname & a valid wikipage, # replace word with hyperlink found = re.finditer(self.wikiParser.word_rule, result, re.UNICODE | re.VERBOSE) if found: splitResult = _splitResult(found, result) for index in range(len(splitResult)): if (re.match(self.wikiParser.word_rule, splitResult[index], re.UNICODE | re.VERBOSE) and Page.Page( self.request, splitResult[index]).exists()): splitResult[index] = self.wikiParser._word_repl( splitResult[index]) result = ''.join(splitResult) # remove stuff that fail HTML 4.01 Strict verification # remove unsupported attributes result = re.sub(' target=".*?"| type=".*?"', '', result) result = re.sub('<hr .*?>', '<hr>', result) # remove <p>...</p> inside <a>...</a> or <caption>...</caption> found = re.finditer('<a href=".*?</a>|<caption>.*?</caption>', result) # XXX re.DOTALL) if found: splitResult = _splitResult(found, result) for index in range(len(splitResult)): if (splitResult[index].startswith('<a href="') or splitResult[index].startswith('<caption>')): splitResult[index] = splitResult[index].replace( '<p>', '').replace('</p>', '') result = ''.join(splitResult) return result
class Parser(XsltParser): """ Send XML file formatted via XSLT. """ caching = 1 Dependencies = Dependencies def __init__(self, raw, request, **kw): XsltParser.__init__(self, raw, request) # relative path to docbook.xsl and compiled_xsl docbook_html_directory = request.cfg.docbook_html_dir self.db_xsl = os.path.join(docbook_html_directory, 'docbook.xsl') self.db_compiled_xsl = os.path.join(docbook_html_directory, 'db_compiled.dat') self.wikiParser = WikiParser(raw=self.raw, request=self.request, pretty_url=1) self.key = 'docbook' def format(self, formatter, **kw): self.wikiParser.formatter = formatter XsltParser.format(self, formatter) def append_stylesheet(self): """" virtual function, for docbook parser """ abs_db_xsl = os.path.abspath(self.db_xsl) abs_db_compiled_xsl = os.path.abspath(self.db_compiled_xsl) # same as path.exists, but also test if it is a file if not os.path.isfile(abs_db_compiled_xsl): _compile_xsl(abs_db_xsl, abs_db_compiled_xsl) assert os.path.isfile(abs_db_compiled_xsl) self.processor.appendStylesheetInstance(cPickle.load(file(abs_db_compiled_xsl, 'rb'))) def parse_result(self, result): """ additional parsing to the resulting XSLT'ed result (resultString) before saving will do: BASIC CLEAN UP : remove unnecessary HTML tags RESOLVE IMG SRC : fix src to find attachment RESOLVE WikiNames: if a word is a valid wikiname & a valid wikipage, replace word with hyperlink """ # BASIC CLEAN UP # remove from beginning until end of body tag found = re.search('<body.*?>', result) if found: result = result[found.end():] # remove everything after & including </body> found = result.rfind('</body>') if found != -1: result = result[:found] # RESOLVE IMG SRC found = re.finditer('<img.*?>', result) if found: splitResult = _splitResult(found, result) for index in range(len(splitResult)): if splitResult[index].startswith('<img'): found = re.search('src="(?P<source>.*?)"', splitResult[index]) imageSrc = found.group('source') imageAlt = None # save alt found = re.search('alt="(?P<alt>.*?)"', splitResult[index]) if found: imageAlt = found.group('alt') splitResult[index] = self.wikiParser.attachment(('attachment:' + imageSrc, "")) if imageAlt: # restore alt splitResult[index] = re.sub('alt=".*?"', 'alt="%s"' % imageAlt, splitResult[index]) result = ''.join(splitResult) # RESOLVE WikiNames # if a word is a valid wikiname & a valid wikipage, # replace word with hyperlink found = re.finditer(self.wikiParser.word_rule, result, re.UNICODE|re.VERBOSE) if found: splitResult = _splitResult(found, result) for index in range(len(splitResult)): if (re.match(self.wikiParser.word_rule, splitResult[index], re.UNICODE|re.VERBOSE) and Page.Page(self.request, splitResult[index]).exists()): splitResult[index] = self.wikiParser._word_repl(splitResult[index]) result = ''.join(splitResult) # remove stuff that fail HTML 4.01 Strict verification # remove unsupported attributes result = re.sub(' target=".*?"| type=".*?"', '', result) result = re.sub('<hr .*?>', '<hr>', result) # remove <p>...</p> inside <a>...</a> or <caption>...</caption> found = re.finditer('<a href=".*?</a>|<caption>.*?</caption>', result) # XXX re.DOTALL) if found: splitResult = _splitResult(found, result) for index in range(len(splitResult)): if (splitResult[index].startswith('<a href="') or splitResult[index].startswith('<caption>')): splitResult[index] = splitResult[index].replace('<p>', '').replace('</p>', '') result = ''.join(splitResult) return result
def execute(pagename, request): from MoinMoin import wikiutil from MoinMoin.Page import Page _ = request.getText thispage = Page(request, pagename) if request.user.valid: username = request.user.name else: username = '' if not username: request.theme.add_msg(_('Please log in first.'), "error") return thispage.send_page() userhomewiki = request.cfg.user_homewiki if userhomewiki != 'Self' and userhomewiki != request.cfg.interwikiname: interwiki = wikiutil.getInterwikiHomePage(request, username=username) wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_interwiki( request, *interwiki) wikiurl = wikiutil.mapURL(request, wikiurl) homepageurl = wikiutil.join_wiki(wikiurl, wikitail) request.http_redirect('%s?action=MyPages' % homepageurl) homepage = Page(request, username) if not homepage.exists(): request.theme.add_msg( _('Please first create a homepage before creating additional pages.' ), "error") return homepage.send_page() pagecontent = _("""\ You can add some additional sub pages to your already existing homepage here. You can choose how open to other readers or writers those pages shall be, access is controlled by group membership of the corresponding group page. Just enter the sub page's name and click on the button to create a new page. Before creating access protected pages, make sure the corresponding group page exists and has the appropriate members in it. Use HomepageGroupsTemplate for creating the group pages. ||'''Add a new personal page:'''||'''Related access control list group:'''|| ||<<NewPage(HomepageReadWritePageTemplate,read-write page,%(username)s)>>||[[%(username)s/ReadWriteGroup]]|| ||<<NewPage(HomepageReadPageTemplate,read-only page,%(username)s)>>||[[%(username)s/ReadGroup]]|| ||<<NewPage(HomepagePrivatePageTemplate,private page,%(username)s)>>||%(username)s only|| """) pagecontent = pagecontent % locals() pagecontent = pagecontent.replace('\n', '\r\n') from MoinMoin.parser.text_moin_wiki import Parser as WikiParser # This action generate data using the user language request.setContentLanguage(request.lang) request.theme.send_title(_('MyPages management'), page=homepage) parser = WikiParser(pagecontent, request) p = Page(request, "$$$") request.formatter.setPage(p) parser.format(request.formatter) # Start content - IMPORTANT - without content div, there is no direction support! request.write(request.formatter.startContent("content")) request.write(request.formatter.endContent()) request.theme.send_footer(homepage.page_name) request.theme.send_closing_html()