def title(self, d): """ Assemble the title (now using breadcrumbs) @param d: parameter dictionary @rtype: string @return: title html """ _ = self.request.getText content = [] if d['title_link']: # having a link means we have a (linked) pagename ONLY as title, not a message title # XXX this method is rather ugly and should be improved curpage = '' segments = d['page_name'].split('/') # was: title_text for s in segments[:-1]: curpage += s content.append("<li>%s</li>" % Page(self.request, curpage).link_to(self.request, s)) curpage += '/' content.append(('<li><a class="backlink" title="%(title)s" href="%(href)s">%(text)s</a></li>') % { 'title': _('Click to do a full-text search for this title'), 'href': d['title_link'], 'text': wikiutil.escape(segments[-1]), }) else: content.append('<li>%s</li>' % wikiutil.escape(d['title_text'])) html = ''' <ul id="pagelocation"> %s </ul> ''' % "".join(content) return html
def renderInPage(self): """ Render macro in page context The parser should decide what to do if this macro is placed in a paragraph context. """ f = self.formatter _ = self.request.getText requires_input = '%s' in self.nametemplate # TODO: better abstract this using the formatter html = [ u'<form class="macro" method="POST" action="%s"><div>' % self.request.href(self.formatter.page.page_name), u'<input type="hidden" name="action" value="newpage">', u'<input type="hidden" name="parent" value="%s">' % wikiutil.escape(self.parent, 1), u'<input type="hidden" name="template" value="%s">' % wikiutil.escape(self.template, 1), u'<input type="hidden" name="nametemplate" value="%s">' % wikiutil.escape(self.nametemplate, 1), ] if requires_input: html += [ u'<input type="text" name="pagename" size="30">', ] html += [ u'<input type="submit" value="%s">' % wikiutil.escape(self.label, 1), u'</div></form>', ] return self.formatter.rawHTML('\n'.join(html))
def revert_pages(request, editor, timestamp): _ = request.getText editor = wikiutil.url_unquote(editor) timestamp = int(timestamp * 1000000) log = editlog.EditLog(request) pages = {} revertpages = [] for line in log.reverse(): if line.ed_time_usecs < timestamp: break if not request.user.may.read(line.pagename): continue if not line.pagename in pages: pages[line.pagename] = 1 if repr(line.getInterwikiEditorData(request)) == editor: revertpages.append(line.pagename) request.write("Pages to revert:<br>%s" % "<br>".join([wikiutil.escape(p) for p in revertpages])) for pagename in revertpages: request.write("Begin reverting %s ...<br>" % wikiutil.escape(pagename)) msg = revert_page(request, pagename, editor) if msg: request.write("<p>%s: %s</p>" % ( Page.Page(request, pagename).link_to(request), msg)) request.write("Finished reverting %s.<br>" % wikiutil.escape(pagename))
def send_link_rel(request, pagename): files = _get_files(request, pagename) for fname in files: url = getAttachUrl(pagename, fname, request, do='view') request.write(u'<link rel="Appendix" title="%s" href="%s">\n' % ( wikiutil.escape(fname, 1), wikiutil.escape(url, 1)))
def explore_args(args, kwAllowed): """ explore args for positional and keyword parameters """ if args: args = args.split(',') args = [arg.strip() for arg in args] else: args = [] kw_count = 0 kw = {} # create a dictionary for the formatter.image call pp = [] # positional parameter if not kwAllowed: return pp, 0, kw, 0 for arg in args: if '=' in arg: key, value = arg.split('=', 1) # avoid that urls with "=" are interpreted as keyword if key.lower() not in kwAllowed: if not kw_count and _is_URL(arg): # assuming that this is the image pp.append(wikiutil.escape(arg, quote=1)) continue kw_count += 1 kw[str(key.lower())] = wikiutil.escape(value, quote=1) else: pp.append(wikiutil.escape(arg, quote=1)) return pp, len(pp), kw, len(kw)
def renderInText(self): """ Render macro in text context The parser should decide what to do if this macro is placed in a paragraph context. """ _ = self.request.getText # Default to show page instead of an error message (too lazy to # do an error message now). action = self.args.get('action', 'show') # Use translated text or action name text = self.args.get('text', action) text = _(text, formatted=False) # Escape user input action = wikiutil.escape(action, 1) text = wikiutil.escape(text, 1) # Create link formatter = self.macro.formatter page = wikiutil.quoteWikinameURL(formatter.page.page_name) url = '%s?action=%s' % (page, action) link = wikiutil.link_tag(self.request, url, text=text, formatter=formatter) return link
def send_viewfile(pagename, request): _ = request.getText filename, fpath = _access_file(pagename, request) if not filename: return request.write('<h2>' + _("Attachment '%(filename)s'") % {'filename': filename} + '</h2>') type, enc = mimetypes.guess_type(filename) if type: if type[:5] == 'image': timestamp = htdocs_access(request) and "?%s" % time.time() or '' request.write('<img src="%s%s" alt="%s">' % ( getAttachUrl(pagename, filename, request, escaped=1), timestamp, wikiutil.escape(filename, 1))) return elif type[:4] == 'text': # TODO: should use formatter here! request.write("<pre>") # Try to decode file contents. It may return junk, but we # don't have enough information on attachments. content = open(fpath, 'r').read() content = wikiutil.decodeUnknownInput(content) content = wikiutil.escape(content) request.write(content) request.write("</pre>") return request.write('<p>' + _("Unknown file type, cannot display this attachment inline.") + '</p>') request.write('<a href="%s">%s</a>' % ( getAttachUrl(pagename, filename, request, escaped=1), wikiutil.escape(filename)))
def attachment_inlined(self, url, text, **kw): url = wikiutil.escape(url) text = wikiutil.escape(text) if url == text: return '<span style="background-color:#ffff11">{{attachment:%s}}</span>' % url else: return '<span style="background-color:#ffff11">{{attachment:%s|%s}}</span>' % (url, text)
def html_head(self, d): """ Assemble html head @param d: parameter dictionary @rtype: unicode @return: html head """ html = [ u'<meta name="viewport" content="user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0, width=device-width" />', u'<meta name="viewport" content="width=device-width, initial-scale=1.0">', u'<title>%(title)s - %(sitename)s</title>' % { 'title': wikiutil.escape(d['title']), 'sitename': wikiutil.escape(d['sitename']), }, self.externalScript('common'), self.headscript(d), # Should move to separate .js file #self.guiEditorScript(d), self.html_stylesheets(d), #/moin_static195/common/js/jquery-1.8.2.min.js u'<script language="javascript" type="Text/javascript" src="/moin_static195/common/js/jquery-1.8.2.min.js"></script>', u'<script language="javascript" type="Text/javascript" src="/moin_static195/common/bootstrap/js/bootstrap.min.js"></script>', # u'<script language="javascript" type="Text/javascript" src="/moin_static195/common/bootstrap/js/bootstrap-dropdown.js"></script>', u'<link rel="stylesheet" type="text/css" charset="utf-8" href="/moin_static195/common/bootstrap/css/bootstrap.css">', self.rsslink(d), #self.universal_edit_button(d), ] return '\n'.join(html)
def _create_user(request): collab_mode = getattr(request.cfg, 'collab_mode', False) _ = request.getText form = request.form if request.method != 'POST': return if not wikiutil.checkTicket(request, form.get('ticket', '')): return if not TextCha(request).check_answer_from_form(): return _('TextCha: Wrong answer! Go back and try again...') # Create user profile theuser = user.User(request, auth_method="new-user") # Require non-empty name try: if collab_mode: name = wikiutil.clean_input(form.get('email', [''])) theuser.name = name.strip() else: theuser.name = form['name'] except KeyError: return _("Empty user name. Please enter a user name.") # Don't allow creating users with invalid names if not user.isValidName(request, theuser.name): return _("""Invalid user name {{{'%s'}}}. Name may contain any Unicode alpha numeric character, with optional one space between words. Group page name is not allowed.""", wiki=True) % wikiutil.escape(theuser.name) # Name required to be unique. Check if name belong to another user. if user.getUserId(request, theuser.name): return _("This user name already belongs to somebody else.") # try to get the password and pw repeat password = form.get('password1', '') password2 = form.get('password2', '') # Check if password is given and matches with password repeat if password != password2: return _("Passwords don't match!") if not password: return _("Please specify a password!") pw_checker = request.cfg.password_checker if pw_checker: pw_error = pw_checker(request, theuser.name, password) if pw_error: return _("Password not acceptable: %s") % wikiutil.escape(pw_error) # Encode password try: theuser.enc_password = user.encodePassword(request.cfg, password) except UnicodeError, err: # Should never happen return "Can't encode password: %s" % wikiutil.escape(str(err))
def _make_form_values(self, question, given_answer): timestamp = time() question_form = "%s %d%s" % ( wikiutil.escape(question, True), timestamp, self._compute_signature(question, timestamp) ) given_answer_form = wikiutil.escape(given_answer, True) return question_form, given_answer_form
def handle_action(context, pagename, action_name='show'): """ Actual dispatcher function for non-XMLRPC actions. Also sets up the Page object for this request, normalizes and redirects to canonical pagenames and checks for non-allowed actions. """ _ = context.getText cfg = context.cfg # pagename could be empty after normalization e.g. '///' -> '' # Use localized FrontPage if pagename is empty if not pagename: context.page = wikiutil.getFrontPage(context) else: context.page = Page(context, pagename) if '_' in pagename and not context.page.exists(): pagename = pagename.replace('_', ' ') page = Page(context, pagename) if page.exists(): url = page.url(context) return context.http_redirect(url) msg = None # Complain about unknown actions if not action_name in get_names(cfg): msg = _("Unknown action %(action_name)s.") % { 'action_name': wikiutil.escape(action_name), } # Disallow non available actions elif action_name[0].isupper() and not action_name in \ get_available_actions(cfg, context.page, context.user): msg = _("You are not allowed to do %(action_name)s on this page.") % { 'action_name': wikiutil.escape(action_name), } if not context.user.valid: # Suggest non valid user to login msg += " " + _("Login and try again.") if msg: context.theme.add_msg(msg, "error") context.page.send_page() # Try action else: from MoinMoin import action handler = action.getHandler(context, action_name) if handler is None: msg = _("You are not allowed to do %(action_name)s on this page.") % { 'action_name': wikiutil.escape(action_name), } if not context.user.valid: # Suggest non valid user to login msg += " " + _("Login and try again.") context.theme.add_msg(msg, "error") context.page.send_page() else: handler(context.page.page_name, context) return context
def line2dict(self, line): """Return ';;' separated 'key=val' tuples as dict.""" d = {} items = line.split(';;') d['name'] = wikiutil.escape(items[0]) for item in items[1:]: keyval = item.split('=') if len(keyval) != 2: return {} key, val = keyval d[wikiutil.escape(key)] = wikiutil.escape(val) return d
def _do_move(pagename, request): _ = request.getText pagename, filename, fpath = _access_file(pagename, request) if not request.user.may.delete(pagename): return _('You are not allowed to move attachments from this page.') if not filename: return # error msg already sent in _access_file # move file d = {'action': action_name, 'url': request.href(pagename), 'do': 'attachment_move', 'ticket': wikiutil.createTicket(request), 'pagename': wikiutil.escape(pagename, 1), 'attachment_name': wikiutil.escape(filename, 1), 'move': _('Move'), 'cancel': _('Cancel'), 'newname_label': _("New page name"), 'attachment_label': _("New attachment name"), } formhtml = ''' <form action="%(url)s" method="POST"> <input type="hidden" name="action" value="%(action)s"> <input type="hidden" name="do" value="%(do)s"> <input type="hidden" name="ticket" value="%(ticket)s"> <table> <tr> <td class="label"><label>%(newname_label)s</label></td> <td class="content"> <input type="text" name="newpagename" value="%(pagename)s" size="80"> </td> </tr> <tr> <td class="label"><label>%(attachment_label)s</label></td> <td class="content"> <input type="text" name="newattachmentname" value="%(attachment_name)s" size="80"> </td> </tr> <tr> <td></td> <td class="buttons"> <input type="hidden" name="oldattachmentname" value="%(attachment_name)s"> <input type="submit" name="move" value="%(move)s"> <input type="submit" name="cancel" value="%(cancel)s"> </td> </tr> </table> </form>''' % d thispage = Page(request, pagename) request.theme.add_msg(formhtml, "dialog") return thispage.send_page()
def send_link_rel(request, pagename): files = _get_files(request, pagename) if len(files) > 0 and not htdocs_access(request): scriptName = request.getScriptname() pagename_quoted = wikiutil.quoteWikinameURL(pagename) for file in files: url = "%s/%s?action=%s&do=view&target=%s" % ( scriptName, pagename_quoted, action_name, wikiutil.url_quote_plus(file)) request.write(u'<link rel="Appendix" title="%s" href="%s">\n' % ( wikiutil.escape(file), wikiutil.escape(url)))
def title_with_separators(self, d): """ Assemble the title using slashes, not <ul> @param d: parameter dictionary @rtype: string @return: title html """ #_ = self.request.getText #html = wikiutil.escape(d['title_text']).replace("/",":") #return u'<span id="pagelocation">%s</span>' % html """ Assemble the title using slashes, not <ul> @param d: parameter dictionary @rtype: string @return: title html """ _ = self.request.getText if not self.request.user.valid: html = u'<span class="navbar-brand">%s</span>' % wikiutil.escape(d['title_text']) return html if d['title_text'] == d['page'].split_title(): # just showing a page, no action segments = d['page_name'].split('/') link_text = segments[-1] link_title = _('Click to do a full-text search for this title') link_query = {'action': 'fullsearch', 'context': '180', 'value': 'linkto:"%s"' % d['page_name'], } link = d['page'].link_to(self.request, link_text, querystr=link_query, title=link_title, rel='nofollow') if len(segments) <= 1: html = link else: content = [] curpage = '' for s in segments[:-1]: curpage += s content.append(Page(self.request, curpage).link_to(self.request, s, css_class='')) curpage += '/' path_html = u'/'.join(content) html = u'<span class="pagepath">%s</span>/%s' % (path_html, link) else: html = wikiutil.escape(d['title_text']) # return u'<span id="pagelocation">%s</span>' % html # return u'<span class="navbar-brand">%s</span>' % html return html
def sendForm(self): request = self.request _ = request.getText ## Begin form request.write(u'<form method="GET" action="%s">\n' % actionname(request)) request.write(u'<input type=hidden name=action value="ViewDot">') request.write(u"<table>\n<tr>\n") # format request.write(u"<td>\n" + _('Output format') + u"<br>\n") for type in self.available_formats: request.write(u'<input type="radio" name="format" ' + u'value="%s"%s%s<br>\n' % (form_escape(type), type == self.format and " checked>" or ">", wikiutil.escape(type))) # graphengine request.write(u"<td>\n" + _('Output graphengine') + u"<br>\n") for type in self.available_graphengines: request.write(u'<input type="radio" name="graphengine" ' + u'value="%s"%s%s<br>\n' % (form_escape(type), type == self.graphengine and " checked>" or ">", wikiutil.escape(type))) request.write(_("Dot file") + "<br>\n" + u'<select name="attachment">\n') # Use request.rootpage, request.page has weird failure modes for page in request.rootpage.getPageList(): # Page#getPageList filters out pages to which the user doesn't have # read access. files = AttachFile._get_files(request, page) for file in files: if file.endswith('.dot') or file.endswith('.gv'): request.write('<option label="%s" value="%s">%s</option>\n' % (form_escape(file), form_escape("attachment:%s/%s" % (page, file)), wikiutil.escape("%s/%s" % (page, file)))) request.write('</select>\n</table>\n') request.write(u'<input type=submit name=view ' + 'value="%s">\n' % form_escape(_('View'))) request.write(u'<input type=submit name=help ' + 'value="%s"><br>\n' % form_escape(_('Inline'))) request.write(u'</form>\n')
def getEditor(self, request): """ Return a HTML-safe string representing the user that did the edit. """ kind, editor = self.getEditorData(request) if kind == 'homepage': return '<span title="%s">%s</span>' % (wikiutil.escape(self.hostname), editor.link_to(request)) elif kind == 'ip': idx = editor.find('.') if idx==-1: idx = len(editor) return '<span title="%s">%s</span>' % (wikiutil.escape( (request.cfg.show_hosts and ("%s=%s" % (self.addr,editor))) or ''), wikiutil.escape(editor[:idx])) else: return '<span title="%s">%s</span>' % (wikiutil.escape( (request.cfg.show_hosts and self.hostname) or ''), wikiutil.escape(editor))
def linkto(pagename, request, params=''): _ = request.getText if not request.cfg.chart_options: request.formatter = Formatter(request) return text(pagename, request, params) if _debug: return draw(pagename, request) page = Page(request, pagename) # Create escaped query string from dict and params querystr = {'action': 'chart', 'type': 'hitcounts'} querystr = wikiutil.makeQueryString(querystr) querystr = wikiutil.escape(querystr) if params: querystr += '&' + params # TODO: remove escape=0 in 2.0 data = {'url': page.url(request, querystr, escape=0)} data.update(request.cfg.chart_options) result = ('<img src="%(url)s" width="%(width)d" height="%(height)d"' ' alt="hitcounts chart">') % data return result
def render(self, form=None): """ Checks if textchas are enabled and returns HTML for one, or an empty string if they are not enabled. @return: unicode result html """ if self.is_enabled(): question, given_answer = self._extract_form_values(form) if question is None: question = self.question question_form, given_answer_form = self._make_form_values(question, given_answer) result = u""" <div id="textcha"> <span id="textcha-question">%s</span> <input type="hidden" name="textcha-question" value="%s"> <input id="textcha-answer" type="text" name="textcha-answer" value="%s" size="20" maxlength="80"> </div> """ % ( wikiutil.escape(question), question_form, given_answer_form, ) else: result = u"" return result
def getAttachUrl(pagename, filename, request, addts=0, escaped=0, do='get'): """ Get URL that points to attachment `filename` of page `pagename`. If 'addts' is true, a timestamp with the file's modification time is added, so that browsers reload a changed file. """ if htdocs_access(request): # direct file access via webserver timestamp = '' if addts: try: timestamp = '?ts=%s' % os.path.getmtime( getFilename(request, pagename, filename)) except IOError: pass url = "%s/%s/attachments/%s%s" % ( request.cfg.attachments['url'], wikiutil.quoteWikinameFS(pagename), wikiutil.url_quote(filename), timestamp) else: # send file via CGI if do not in ['get', 'view']: do = 'get' url = "%s/%s?action=%s&do=%s&target=%s" % ( request.getScriptname(), wikiutil.quoteWikinameURL(pagename), action_name, do, wikiutil.url_quote_plus(filename)) if escaped: url = wikiutil.escape(url) return url
def execute(macro, args): _ = macro._ case = 0 # If called with empty or no argument, default to regex search for .+, the full page list. needle = wikiutil.get_unicode(macro.request, args, 'needle', u'regex:.+') # With whitespace argument, return same error message as FullSearch if not needle.strip(): err = _('Please use a more selective search term instead of {{{"%s"}}}', wiki=True) % needle return '<span class="error">%s</span>' % err needle = needle.strip() # Return a title search for needle, sorted by name. try: results = search.searchPages(macro.request, needle, titlesearch=1, case=case, sort='page_name') fmt = macro.formatter.__class__(macro.request, is_included=True) fmt._base_depth = macro.formatter._base_depth ret = results.pageList(macro.request, fmt, paging=False,excluded=macro.formatter.page.getPageLinks(macro.request)) except ValueError: # same error as in MoinMoin/action/fullsearch.py, keep it that way! ret = ''.join([macro.formatter.text('<<PageList('), _('Your search query {{{"%s"}}} is invalid. Please refer to ' 'HelpOnSearching for more information.', wiki=True, percent=True) % wikiutil.escape(needle), macro.formatter.text(')>>')]) return ret
def package(self): """ Calls collectpackage() with the arguments specified. """ _ = self.request.getText # Get new name from form and normalize. pagelist = self.request.values.get('pagelist', u'') packagename = self.request.values.get('packagename', u'') include_attachments = self.request.values.get('include_attachments', False) if not self.request.values.get('submit'): self.request.theme.add_msg(self.makeform(), "dialog") raise ActionError target = wikiutil.taintfilename(packagename) if not target: self.request.theme.add_msg(self.makeform(_('Invalid filename "%s"!') % wikiutil.escape(packagename)), "error") raise ActionError request = self.request filelike = cStringIO.StringIO() package = self.collectpackage(unpackLine(pagelist, ","), filelike, target, include_attachments) request.headers['Content-Type'] = 'application/zip' request.headers['Content-Length'] = filelike.tell() request.headers['Content-Disposition'] = 'inline; filename="%s"' % target request.write(filelike.getvalue()) filelike.close()
def pageinfo(self, page): """ Return html fragment with page meta data Since page information uses translated text, it uses the ui language and direction. It looks strange sometimes, but translated text using page direction looks worse. @param page: current page @rtype: unicode @return: page last edit information """ _ = self.request.getText html = '' if self.shouldShowPageinfo(page): info = page.lastEditInfo() if info: if info['editor']: info = _("last edited %(time)s by %(editor)s") % info else: info = _("last modified %(time)s") % info pagename = page.page_name if self.request.cfg.show_interwiki: pagename = "%s: %s" % (self.request.cfg.interwikiname, pagename) info = "%s (%s)" % (wikiutil.escape(pagename), info) html = '<p id="pageinfo" class="info"%(lang)s>%(info)s</p>\n' % { 'lang': self.ui_lang_attr(), 'info': info } return html
def title(self, d): """ Assemble the title (now using breadcrumbs) @param d: parameter dictionary @rtype: string @return: title html """ _ = self.request.getText content = [] if d['title_text'] == d['page'].split_title(): # just showing a page, no action curpage = '' segments = d['page_name'].split('/') # was: title_text for s in segments[:-1]: curpage += s content.append(Page(self.request, curpage).link_to(self.request, s)) curpage += '/' link_text = segments[-1] link_title = _('Click to do a full-text search for this title') link_query = { 'action': 'fullsearch', 'value': 'linkto:"%s"' % d['page_name'], 'context': '180', } # we dont use d['title_link'] any more, but make it ourselves: link = d['page'].link_to(self.request, link_text, querystr=link_query, title=link_title, css_class='backlink', rel='nofollow') content.append(link) else: content.append(wikiutil.escape(d['title_text'])) location_html = u'<span class="sep">/</span>'.join(content) html = u'<span id="pagelocation">%s</span>' % location_html return html
def _makeoption(self, item, selected, ntitem=None): """ create an option for a <select> form element @param item: string containing the item name to show @param selected: indicates whether the item should be default or not """ if selected: selected = ' selected' else: selected = '' assert(isinstance(item, basestring)) if ntitem is None: ntitem = item return '<option value="%s"%s>%s</option>' % ( wikiutil.escape(ntitem, True), selected, wikiutil.escape(item))
def auto_backlink(self, d): """ Assemble the title using slashes, not <ul> @param d: parameter dictionary @rtype: string @return: title html """ _ = self.request.getText if d['title_text'] == d['page'].split_title(): # just showing a page, no action segments = d['page_name'].split('/') link_text = segments[-1] link_title = _('Click to do a full-text search for this title') # Rick: commented out the below line: #link_query = {'action': 'fullsearch', 'context': '180', 'value': 'linkto:"%s"' % d['page_name'], } # Rick: We also delete 'querystr=link_query, title=link_title,' from this section: link = d['page'].link_to(self.request, link_text, css_class='backlink', rel='nofollow') if len(segments) <= 1: html = link else: content = [] curpage = '' for s in segments[:-1]: curpage += s content.append(Page(self.request, curpage).link_to(self.request, s)) curpage += '/' path_html = u'<span class="sep"> / </span>'.join(content) # Rick: original: html = u'<span class="pagepath"> return to %s</span><span class="sep"> < </span>%s' % (path_html, link) html = u'<span class="pagepath_autobacklink">parent: %s</span><span class="sep"> / </span>' % (path_html) else: html = wikiutil.escape(d['title_text']) return u'<span id="pagelocation_autobacklink">%s</span>' % html
def title_with_separators(self, d): """ Assemble the title using slashes, not <ul> @param d: parameter dictionary @rtype: string @return: title html """ _ = self.request.getText if d['title_text'] == d['page'].split_title(): # just showing a page, no action segments = d['page_name'].split('/') link_text = segments[-1] link_title = _('Click to do a full-text search for this title') link_query = {'action': 'fullsearch', 'context': '180', 'value': 'linkto:"%s"' % d['page_name'], } link = d['page'].link_to(self.request, link_text, querystr=link_query, title=link_title, css_class='backlink', rel='nofollow') if len(segments) <= 1: html = link else: content = [] curpage = '' for s in segments[:-1]: curpage += s content.append(Page(self.request, curpage).link_to(self.request, s)) curpage += '/' path_html = u'<span class="divider">/</span></li><li>'.join(content) html = u'<li>%s<span class="divider">/</span></li><li class="active">%s</li>' % (path_html, link) else: html = wikiutil.escape(d['title_text']) return u'<ul class="breadcrumb">%s</ul>' % html
def searchform(self, d): """ assemble HTML code for the search form @param d: parameter dictionary @rtype: unicode @return: search form html """ _ = self.request.getText form = self.request.values updates = { 'search_label': _('Search:'), 'search_hint': _('Search'), 'search_value': wikiutil.escape(form.get('value', ''), 1), 'search_full_label': _('Text'), 'search_title_label': _('Titles'), 'url': self.request.href(d['page'].page_name) } d.update(updates) html = u''' <li> <div class="navbar-form"> <form class="form-search" role="search" id="searchform" method="get" action="%(url)s"> <input type="hidden" name="action" value="fullsearch"> <input type="hidden" name="context" value="180"> <div class="form-group"> <label class="sr-only" for="searchinput">%(search_label)s</label> <input id="searchinput" type="text" class="form-control form-search" placeholder="%(search_hint)s" name="value" value="%(search_value)s"> </div> </form> </div> </li> ''' % d return html
def linkto(pagename, request, params=''): _ = request.getText if not request.cfg.chart_options: return (request.formatter.sysmsg(1) + request.formatter.text(_('Charts are not available!')) + request.formatter.sysmsg(0)) if _debug: return draw(pagename, request) page = Page(request, pagename) # Create escaped query string from dict and params querystr = {'action': 'chart', 'type': 'pagesize'} querystr = wikiutil.makeQueryString(querystr) querystr = wikiutil.escape(querystr) if params: querystr += '&' + params data = {'url': page.url(request, querystr)} data.update(request.cfg.chart_options) result = ('<img src="%(url)s" width="%(width)d" height="%(height)d"' ' alt="pagesize chart">') % data return result
def render(self, form=None): """ Checks if textchas are enabled and returns HTML for one, or an empty string if they are not enabled. @return: unicode result html """ if self.is_enabled(): question, given_answer, timestamp, signature = self._extract_form_values( form) if question is None: question = self.question question_form, given_answer_form = self._make_form_values( question, given_answer) result = u""" <div id="textcha"> <span id="textcha-question">%s</span> <input type="hidden" name="textcha-question" value="%s"> <input id="textcha-answer" type="text" name="textcha-answer" value="%s" size="20" maxlength="80"> </div> """ % (wikiutil.escape(question), question_form, given_answer_form) else: result = u'' return result
def linkto(pagename, request, params=''): _ = request.getText if not request.cfg.chart_options: return text(pagename, request) if _debug: return draw(pagename, request) page = Page(request, pagename) # Create escaped query string from dict and params querystr = {'action': 'chart', 'type': 'useragents'} querystr = wikiutil.makeQueryString(querystr) querystr = wikiutil.escape(querystr) if params: querystr += '&' + params data = {'url': page.url(request, querystr)} data.update(request.cfg.chart_options) result = ('<img src="%(url)s" width="%(width)d" height="%(height)d"' ' alt="useragents chart">') % data return result
def text(pagename, request): from MoinMoin.util.dataset import TupleDataset, Column from MoinMoin.widget.browser import DataBrowserWidget _ = request.getText data = get_data(request) total = 0.0 for cnt, ua in data: total += cnt agents = TupleDataset() agents.columns = [ Column('agent', label=_("User agent"), align='left'), Column('value', label='%', align='right') ] cnt_printed = 0 data = data[:10] if total: for cnt, ua in data: try: ua = wikiutil.escape(unicode(ua)) agents.addRow((ua, "%.2f" % (100.0 * cnt / total))) cnt_printed += cnt except UnicodeError: pass if total > cnt_printed: agents.addRow( (_('Others'), "%.2f" % (100 * (total - cnt_printed) / total))) table = DataBrowserWidget(request) table.setData(agents) return table.render(method="GET")
def page_list(request): from MoinMoin import search name = request.values.get("pagename", "") if name: searchresult = search.searchPages(request, 't:"%s"' % name) pages = [p.page_name for p in searchresult.hits] else: pages = [name] request.write( '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>Insert Page Link</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta content="noindex,nofollow" name="robots"> </head> <body scroll="no" style="OVERFLOW: hidden"> <table height="100%%" cellSpacing="0" cellPadding="0" width="100%%" border="0"> <tr> <td> <table cellSpacing="0" cellPadding="0" align="center" border="0"> <tr> <td> <span fckLang="PageDlgName">Page name</span><br> <select id="txtName" size="1"> %s </select> </td> </tr> </table> </td> </tr> </table> </body> </html> ''' % "".join(["<option>%s</option>\n" % wikiutil.escape(p) for p in pages]))
def __unicode__(self): return wikiutil.escape(self.text)
def render(self): request = self.request _ = request.getText pagename = self.pagename target = self.target if not request.user.may.read(pagename): return _('You are not allowed to view attachments of this page.') if not target: return _("Empty target name given.") ci = AttachFile.ContainerItem(request, pagename, target) if ci.exists(): drawurl = ci.member_url('drawing.svg') else: drawurl = '' pageurl = request.href(pagename) saveurl = request.href(pagename, action=action_name, do='save', target=target, ticket=wikiutil.createTicket(request)) helpurl = request.href("HelpOnActions/AttachFile") html = """ <p> <applet code="org.anywikidraw.moinmoin.MoinMoinDrawingApplet.class" codebase="." archive="%(htdocs)s/applets/anywikidraw/lib/AnyWikiDrawForMoinMoin.jar" width="800" height="620"> <!-- The following parameters are used to tell AnyWikiDraw how to communicate with MoinMoin. --> <param name="DrawingName" value="%(basename)s.svg"> <param name="DrawingURL" value="%(drawurl)s"> <param name="PageURL" value="%(pageurl)s"> <param name="UploadURL" value="%(saveurl)s"> <!-- The following parameters are used to configure the drawing applet --> <param name="Locale" value="en"> <!-- The following parameters are used to configure Sun's Java Plug-In --> <param name="codebase_lookup" value="false"> <param name="classloader_cache" value="false"> <!-- The following makes trouble with FF3 on Ubuntu 9.04 as client and Apache2 / mod_wsgi on Debian Lenny as server, it seems to confuse .gz files with gzip content-encoding and fails miserably somehow: param name="java_arguments" value="-Djnlp.packEnabled=true" --> <param name="boxborder" value="false"> <param name="centerimage" value="true"> <strong>NOTE:</strong> You need a Java enabled browser to edit the drawing. </applet> </p> """ % dict( htdocs=request.cfg.url_prefix_static, basename=wikiutil.escape(target, 1), drawurl=wikiutil.escape(drawurl, 1), pageurl=wikiutil.escape(pageurl, 1), saveurl=wikiutil.escape(saveurl, 1), ) title = '%s %s:%s' % (_('Edit drawing'), pagename, target) request.theme.send_title(title, page=request.page, pagename=pagename) request.write(request.formatter.startContent("content")) request.write(request.formatter.rawHTML(html)) request.write(request.formatter.endContent()) request.theme.send_footer(pagename) request.theme.send_closing_html()
def execute(pagename, request, fieldname='value', titlesearch=0, statistic=0): _ = request.getText titlesearch = checkTitleSearch(request) if titlesearch < 0: check_surge_protect(request, kick=True) # get rid of spammer return advancedsearch = isAdvancedSearch(request) form = request.values # context is relevant only for full search if titlesearch: context = 0 elif advancedsearch: context = 180 # XXX: hardcoded context count for advancedsearch else: context = int(form.get('context', 0)) # Get other form parameters needle = form.get(fieldname, '') case = int(form.get('case', 0)) regex = int(form.get('regex', 0)) # no interface currently hitsFrom = int(form.get('from', 0)) mtime = None msg = '' historysearch = 0 # if advanced search is enabled we construct our own search query if advancedsearch: and_terms = form.get('and_terms', '').strip() or_terms = form.get('or_terms', '').strip() not_terms = form.get('not_terms', '').strip() #xor_terms = form.get('xor_terms', '').strip() categories = form.getlist('categories') or [''] timeframe = form.get('time', '').strip() language = form.getlist('language') or [''] mimetype = form.getlist('mimetype') or [0] excludeunderlay = form.get('excludeunderlay', 0) nosystemitems = form.get('nosystemitems', 0) historysearch = form.get('historysearch', 0) mtime = form.get('mtime', '') if mtime: mtime_parsed = None # get mtime from known date/time formats for fmt in (request.user.datetime_fmt, request.cfg.datetime_fmt, request.user.date_fmt, request.cfg.date_fmt): try: mtime_parsed = time.strptime(mtime, fmt) except ValueError: continue else: break if mtime_parsed: mtime = time.mktime(mtime_parsed) else: # didn't work, let's try parsedatetime cal = Calendar() mtime_parsed, parsed_what = cal.parse(mtime) # XXX it is unclear if usage of localtime here and in parsedatetime module is correct. # time.localtime is the SERVER's local time and of no relevance to the user (being # somewhere in the world) # mktime is reverse function for localtime, so this maybe fixes it again!? if parsed_what > 0 and mtime_parsed <= time.localtime(): mtime = time.mktime(mtime_parsed) else: mtime_parsed = None # we don't use invalid stuff # show info if mtime_parsed: # XXX mtime_msg is not shown in some cases mtime_msg = _( "(!) Only pages changed since '''%s''' are being displayed!", wiki=True) % request.user.getFormattedDateTime(mtime) else: mtime_msg = _( '/!\\ The modification date you entered was not ' 'recognized and is therefore not considered for the ' 'search results!', wiki=True) else: mtime_msg = None word_re = re.compile(r'(\"[\w\s]+"|\w+)') needle = '' if categories[0]: needle += 'category:%s ' % ','.join(categories) if language[0]: needle += 'language:%s ' % ','.join(language) if mimetype[0]: needle += 'mimetype:%s ' % ','.join(mimetype) if excludeunderlay: needle += '-domain:underlay ' if nosystemitems: needle += '-domain:system ' if and_terms: needle += '(%s) ' % and_terms if not_terms: needle += '(%s) ' % ' '.join( ['-%s' % t for t in word_re.findall(not_terms)]) if or_terms: needle += '(%s) ' % ' or '.join(word_re.findall(or_terms)) # check for sensible search term stripped = needle.strip() if len(stripped) == 0: request.theme.add_msg( _( 'Please use a more selective search term instead ' 'of {{{"%s"}}}', wiki=True) % wikiutil.escape(needle), "error") Page(request, pagename).send_page() return needle = stripped # Setup for type of search if titlesearch: title = _('Title Search: "%s"') sort = 'page_name' else: if advancedsearch: title = _('Advanced Search: "%s"') else: title = _('Full Text Search: "%s"') sort = 'weight' # search the pages from MoinMoin.search import searchPages, QueryParser, QueryError try: query = QueryParser(case=case, regex=regex, titlesearch=titlesearch).parse_query(needle) except QueryError: # catch errors in the search query request.theme.add_msg( _( 'Your search query {{{"%s"}}} is invalid. Please refer to ' 'HelpOnSearching for more information.', wiki=True, percent=True) % wikiutil.escape(needle), "error") Page(request, pagename).send_page() return results = searchPages(request, query, sort, mtime, historysearch) # directly show a single hit for title searches # this is the "quick jump" functionality if you don't remember # the pagename exactly, but just some parts of it if titlesearch and len(results.hits) == 1: page = results.hits[0] if not page.attachment: # we did not find an attachment page = Page(request, page.page_name) highlight = query.highlight_re() if highlight: querydict = {'highlight': highlight} else: querydict = {} url = page.url(request, querystr=querydict) request.http_redirect(url) return if not results.hits: # no hits? f = request.formatter querydict = dict(wikiutil.parseQueryString(request.query_string)) querydict.update({'titlesearch': 0}) request.theme.add_msg( _( 'Your search query {{{"%s"}}} didn\'t return any results. ' 'Please change some terms and refer to HelpOnSearching for ' 'more information.%s', wiki=True, percent=True) % (wikiutil.escape(needle), titlesearch and ''.join([ '<br>', _('(!) Consider performing a', wiki=True), ' ', f.url(1, href=request.page.url(request, querydict, escape=0)), _('full-text search with your search terms'), f.url(0), '.', ]) or ''), "error") Page(request, pagename).send_page() return # This action generates data using the user language request.setContentLanguage(request.lang) request.theme.send_title(title % needle, pagename=pagename) # Start content (important for RTL support) request.write(request.formatter.startContent("content")) # Hints f = request.formatter hints = [] if titlesearch: querydict = dict(wikiutil.parseQueryString(request.query_string)) querydict.update({'titlesearch': 0}) hints.append(''.join([ _( "(!) You're performing a title search that might not include" ' all related results of your search query in this wiki. <<BR>>', wiki=True), ' ', f.url(1, href=request.page.url(request, querydict, escape=0)), f.text( _('Click here to perform a full-text search with your ' 'search terms!')), f.url(0), ])) if advancedsearch and mtime_msg: hints.append(mtime_msg) if hints: request.write(searchHints(f, hints)) # Search stats request.write(results.stats(request, request.formatter, hitsFrom)) # Then search results info = not titlesearch if context: output = results.pageListWithContext(request, request.formatter, info=info, context=context, hitsFrom=hitsFrom, hitsInfo=1) else: output = results.pageList(request, request.formatter, info=info, hitsFrom=hitsFrom, hitsInfo=1) request.write(output) request.write(request.formatter.endContent()) request.theme.send_footer(pagename) request.theme.send_closing_html()
def checkSpelling(page, request, own_form=1): """ Do spell checking, return a tuple with the result. """ _ = request.getText # first check to see if we we're called with a "newwords" parameter if 'button_newwords' in request.form: _addLocalWords(request) # load words wordsdict = _loadDict(request) localwords = {} lsw_page = Page(request, request.cfg.page_local_spelling_words) if lsw_page.exists(): _loadWordsPage(request, localwords, lsw_page) # init status vars & load page request.clock.start('spellcheck') badwords = {} text = page.get_raw_body() # checker regex and matching substitute function word_re = re.compile( r'([%s]?[%s]+)' % (config.chars_upper, config.chars_lower), re.UNICODE) def checkword(match, wordsdict=wordsdict, badwords=badwords, localwords=localwords, num_re=re.compile(r'^\d+$', re.UNICODE)): word = match.group(1) if len(word) == 1: return "" w_enc = word.encode(config.charset) wl_enc = word.lower().encode(config.charset) if not (w_enc in wordsdict or wl_enc in wordsdict or w_enc in localwords or wl_enc in localwords): if not num_re.match(word): badwords[word] = 1 return "" # do the checking for line in text.split('\n'): if line == '' or line[0] == '#': continue word_re.sub(checkword, line) if badwords: badwords = badwords.keys() badwords.sort(lambda x, y: cmp(x.lower(), y.lower())) # build regex recognizing the bad words badwords_re = r'(^|(?<!\w))(%s)(?!\w)' badwords_re = badwords_re % ("|".join( [re.escape(bw) for bw in badwords]), ) badwords_re = re.compile(badwords_re, re.UNICODE) lsw_msg = '' if localwords: lsw_msg = ' ' + _('(including %(localwords)d %(pagelink)s)') % { 'localwords': len(localwords), 'pagelink': lsw_page.link_to(request) } msg = _( 'The following %(badwords)d words could not be found in the dictionary of ' '%(totalwords)d words%(localwords)s and are highlighted below:') % { 'badwords': len(badwords), 'totalwords': len(wordsdict) + len(localwords), 'localwords': lsw_msg } + "<br>" # figure out what this action is called action_name = os.path.splitext(os.path.basename(__file__))[0] # add a form containing the bad words if own_form: msg = msg + ('<form method="post" action="%s">\n' '<input type="hidden" name="action" value="%s">\n' ) % (request.href(page.page_name), action_name) checkbox = '<input type="checkbox" name="newwords" value="%(word)s">%(word)s ' msg = msg + (" ".join([ checkbox % { 'word': wikiutil.escape(w, True), } for w in badwords ]) + '<p><input type="submit" name="button_newwords" value="%s"></p>' % _('Add checked words to dictionary')) if own_form: msg = msg + '</form>' else: badwords_re = None msg = _("No spelling errors found!") request.clock.stop('spellcheck') return badwords, badwords_re, msg
def format_page_edits(macro, lines, bookmark_usecs): request = macro.request _ = request.getText d = {} # dict for passing stuff to theme line = lines[0] pagename = line.pagename rev = int(line.rev) tnow = time.time() is_new = lines[-1].action == 'SAVENEW' is_renamed = lines[-1].action == 'SAVE/RENAME' # check whether this page is newer than the user's bookmark hilite = line.ed_time_usecs > (bookmark_usecs or line.ed_time_usecs) page = Page(request, pagename) html_link = '' if not page.exists(): img = request.theme.make_icon('deleted') revbefore = rev - 1 if revbefore and page.exists(rev=revbefore, domain='standard'): # indicate page was deleted and show diff to last existing revision of it html_link = page.link_to_raw(request, img, querystr={'action': 'diff'}, rel='nofollow') else: # just indicate page was deleted html_link = img elif page.isConflict(): img = request.theme.make_icon('conflict') html_link = page.link_to_raw(request, img, querystr={'action': 'edit'}, rel='nofollow') elif hilite: # show special icons if change was after the user's bookmark if is_new: img = 'new' elif is_renamed: img = 'renamed' else: img = 'updated' img = request.theme.make_icon(img) html_link = page.link_to_raw(request, img, querystr={'action': 'diff', 'date': '%d' % bookmark_usecs}, rel='nofollow') else: # show "DIFF" icon else img = request.theme.make_icon('diffrc') html_link = page.link_to_raw(request, img, querystr={'action': 'diff'}, rel='nofollow') # print name of page, with a link to it force_split = len(page.page_name) > _MAX_PAGENAME_LENGTH d['icon_html'] = html_link d['pagelink_html'] = page.link_to(request, text=page.split_title(force=force_split)) # print time of change d['time_html'] = None if request.cfg.changed_time_fmt: tdiff = long(tnow - wikiutil.version2timestamp(long(line.ed_time_usecs))) / 60 # has to be long for py 2.2.x if tdiff < 100: d['time_html'] = _("%(mins)dm ago") % { 'mins': tdiff} else: d['time_html'] = time.strftime(request.cfg.changed_time_fmt, line.time_tuple) # print editor name or IP d['editors'] = None if request.cfg.show_names: if len(lines) > 1: counters = {} for idx in range(len(lines)): name = lines[idx].getEditor(request) if not name in counters: counters[name] = [] counters[name].append(idx+1) poslist = [(v, k) for k, v in counters.items()] poslist.sort() d['editors'] = [] for positions, name in poslist: d['editors'].append("%s [%s]" % ( name, util.rangelist(positions))) else: d['editors'] = [line.getEditor(request)] comments = [] for idx in range(len(lines)): comment = format_comment(request, lines[idx]) if comment: comments.append((idx+1, wikiutil.escape(comment))) d['changecount'] = len(lines) d['comments'] = comments img = request.theme.make_icon('info') d['info_html'] = page.link_to_raw(request, img, querystr={'action': 'info'}, rel='nofollow') return request.theme.recentchanges_entry(d)
def item_slide_title(self): return wikiutil.escape(self.page.titleAt(self.slideNumber))
def item_date(self): return wikiutil.escape(self.request.getPragma('date', defval=''))
def do_user_browser(request): """ Browser for SystemAdmin macro. """ _ = request.getText groups = request.groups data = TupleDataset() data.columns = [ Column('name', label=_('Username')), Column('groups', label=_('Member of Groups')), Column('email', label=_('Email')), Column('jabber', label=_('Jabber')), Column('action', label=_('Action')), ] # Iterate over users for uid in user.getUserList(request): account = user.User(request, uid) account_groups = set(groups.groups_with_member(account.name)) wiki_groups = set([group for group in account_groups if isinstance(groups[group], WikiGroup)]) other_groups = list(account_groups - wiki_groups) # First show groups that are defined in wikipages linking to it # after show groups from other backends. grouppage_links = ', '.join([Page(request, group_name).link_to(request) for group_name in wiki_groups] + other_groups) userhomepage = Page(request, account.name) if userhomepage.exists(): namelink = userhomepage.link_to(request) else: namelink = wikiutil.escape(account.name) # creates the POST data for account disable/enable val = "1" text=_('Disable user') if account.disabled: text=_('Enable user') val = "0" namelink += " (%s)" % _("disabled") url = request.page.url(request) ret = html.FORM(action=url) ret.append(html.INPUT(type='hidden', name='action', value='userprofile')) ticket = wikiutil.createTicket(request, action='userprofile') ret.append(html.INPUT(type="hidden", name="ticket", value="%s" % ticket)) ret.append(html.INPUT(type='hidden', name='name', value=account.name)) ret.append(html.INPUT(type='hidden', name='key', value="disabled")) ret.append(html.INPUT(type='hidden', name='val', value=val)) ret.append(html.INPUT(type='submit', name='userprofile', value=text)) enable_disable_link = unicode(unicode(ret)) # creates the POST data for recoverpass url = request.page.url(request) ret = html.FORM(action=url) ret.append(html.INPUT(type='hidden', name='action', value='recoverpass')) ret.append(html.INPUT(type='hidden', name='email', value=account.email)) ret.append(html.INPUT(type='hidden', name='account_sendmail', value="1")) ret.append(html.INPUT(type='hidden', name='sysadm', value="users")) ret.append(html.INPUT(type='submit', name='recoverpass', value=_('Mail account data'))) recoverpass_link = unicode(unicode(ret)) if account.email: email_link = (request.formatter.url(1, 'mailto:' + account.email, css='mailto') + request.formatter.text(account.email) + request.formatter.url(0)) else: email_link = '' if account.jid: jabber_link = (request.formatter.url(1, 'xmpp:' + account.jid, css='mailto') + request.formatter.text(account.jid) + request.formatter.url(0)) else: jabber_link = '' data.addRow(( (request.formatter.rawHTML(namelink), account.name), request.formatter.rawHTML(grouppage_links), email_link, jabber_link, recoverpass_link + enable_disable_link )) if data: from MoinMoin.widget.browser import DataBrowserWidget browser = DataBrowserWidget(request) browser.setData(data, sort_columns=[0]) return browser.render() # No data return ''
def formatLink(self, url, text, **attributes): return '<a href="%(url)s"%(attributes)s>%(text)s</a>' % { 'url': wikiutil.escape(url), 'attributes': self.formatAttributes(attributes), 'text': wikiutil.escape(text), }
def sendEditor(self, **kw): """ Send the editor form page. @keyword preview: if given, show this text in preview mode @keyword staytop: don't go to #preview @keyword comment: comment field (when preview is true) """ from MoinMoin import i18n from MoinMoin.action import SpellCheck request = self.request form = request.form _ = self._ raw_body = '' msg = None conflict_msg = None edit_lock_message = None preview = kw.get('preview', None) staytop = kw.get('staytop', 0) # check edit permissions if not request.user.may.write(self.page_name): msg = _('You are not allowed to edit this page.') elif not self.isWritable(): msg = _('Page is immutable!') elif self.rev: # Trying to edit an old version, this is not possible via # the web interface, but catch it just in case... msg = _('Cannot edit old revisions!') else: # try to acquire edit lock ok, edit_lock_message = self.lock.acquire() if not ok: # failed to get the lock if preview is not None: edit_lock_message = _('The lock you held timed out. Be prepared for editing conflicts!' ) + "<br>" + edit_lock_message else: msg = edit_lock_message # Did one of the prechecks fail? if msg: request.theme.add_msg(msg, "error") self.send_page() return # Emit http_headers after checks (send_page) request.disableHttpCaching(level=2) # check if we want to load a draft use_draft = None if 'button_load_draft' in form: wanted_draft_timestamp = int(form.get('draft_ts', '0')) if wanted_draft_timestamp: draft = self._load_draft() if draft is not None: draft_timestamp, draft_rev, draft_text = draft if draft_timestamp == wanted_draft_timestamp: use_draft = draft_text # Check for draft / normal / preview submit if use_draft is not None: title = _('Draft of "%(pagename)s"') # Propagate original revision rev = int(form['draft_rev']) self.set_raw_body(use_draft, modified=1) preview = use_draft elif preview is None: title = _('Edit "%(pagename)s"') else: title = _('Preview of "%(pagename)s"') # Propagate original revision rev = request.rev self.set_raw_body(preview, modified=1) # send header stuff lock_timeout = self.lock.timeout / 60 lock_page = wikiutil.escape(self.page_name, quote=1) lock_expire = _("Your edit lock on %(lock_page)s has expired!") % {'lock_page': lock_page} lock_mins = _("Your edit lock on %(lock_page)s will expire in # minutes.") % {'lock_page': lock_page} lock_secs = _("Your edit lock on %(lock_page)s will expire in # seconds.") % {'lock_page': lock_page} # get request parameters try: text_rows = int(form['rows']) except StandardError: text_rows = self.cfg.edit_rows if request.user.valid: text_rows = int(request.user.edit_rows) if preview is not None: # Check for editing conflicts if not self.exists(): # page does not exist, are we creating it? if rev: conflict_msg = _('Someone else deleted this page while you were editing!') elif rev != self.current_rev(): conflict_msg = _('Someone else changed this page while you were editing!') if self.mergeEditConflict(rev): conflict_msg = _("""Someone else saved this page while you were editing! Please review the page and save then. Do not save this page as it is!""") rev = self.current_rev() if conflict_msg: # We don't show preview when in conflict preview = None elif self.exists(): # revision of existing page rev = self.current_rev() else: # page creation rev = 0 self.setConflict(bool(conflict_msg)) # Page editing is done using user language request.setContentLanguage(request.lang) # Get the text body for the editor field. # TODO: what about deleted pages? show the text of the last revision or use the template? if preview is not None: raw_body = self.get_raw_body() if use_draft: request.write(_("[Content loaded from draft]"), '<br>') elif self.exists(): # If the page exists, we get the text from the page. # TODO: maybe warn if template argument was ignored because the page exists? raw_body = self.get_raw_body() elif 'template' in request.values: # If the page does not exist, we try to get the content from the template parameter. template_page = wikiutil.unquoteWikiname(request.values['template']) template_page_escaped = wikiutil.escape(template_page) if request.user.may.read(template_page): raw_body = Page(request, template_page).get_raw_body() if raw_body: request.write(_("[Content of new page loaded from %s]") % (template_page_escaped, ), '<br>') else: request.write(_("[Template %s not found]") % (template_page_escaped, ), '<br>') else: request.write(_("[You may not read %s]") % (template_page_escaped, ), '<br>') # Make backup on previews - but not for new empty pages if not use_draft and preview and raw_body: self._save_draft(raw_body, rev) draft_message = None loadable_draft = False if preview is None: draft = self._load_draft() if draft is not None: draft_timestamp, draft_rev, draft_text = draft if draft_text != raw_body: loadable_draft = True page_rev = rev draft_timestamp_str = request.user.getFormattedDateTime(draft_timestamp) draft_message = _(u"'''<<BR>>Your draft based on revision %(draft_rev)d (saved %(draft_timestamp_str)s) can be loaded instead of the current revision %(page_rev)d by using the load draft button - in case you lost your last edit somehow without saving it.''' A draft gets saved for you when you do a preview, cancel an edit or unsuccessfully save.", wiki=True, percent=True) % locals() # Setup status message status = [kw.get('msg', ''), conflict_msg, edit_lock_message, draft_message] status = [msg for msg in status if msg] status = ' '.join(status) status = Status(request, content=status) request.theme.add_msg(status, "error") request.theme.send_title( title % {'pagename': self.split_title(), }, page=self, html_head=self.lock.locktype and ( PageEditor._countdown_js % { 'countdown_script': request.theme.externalScript('countdown'), 'lock_timeout': lock_timeout, 'lock_expire': lock_expire, 'lock_mins': lock_mins, 'lock_secs': lock_secs, }) or '', editor_mode=1, allow_doubleclick=1, ) request.write(request.formatter.startContent("content")) # Generate default content for new pages if not raw_body: raw_body = _('Describe %s here.') % (self.page_name, ) # send form request.write('<form id="editor" method="post" action="%s#preview">' % ( request.href(self.page_name) )) # yet another weird workaround for broken IE6 (it expands the text # editor area to the right after you begin to type...). IE sucks... # http://fplanque.net/2003/Articles/iecsstextarea/ request.write('<fieldset style="border:none;padding:0;">') request.write(unicode(html.INPUT(type="hidden", name="action", value="edit"))) # Send revision of the page our edit is based on request.write('<input type="hidden" name="rev" value="%d">' % (rev, )) # Add src format (e.g. 'wiki') into a hidden form field, so that # we can load the correct converter after POSTing. request.write('<input type="hidden" name="format" value="%s">' % self.pi['format']) # Create and send a ticket, so we can check the POST request.write('<input type="hidden" name="ticket" value="%s">' % wikiutil.createTicket(request)) # Save backto in a hidden input backto = request.values.get('backto') if backto: request.write(unicode(html.INPUT(type="hidden", name="backto", value=backto))) # button bar button_spellcheck = '<input class="button" type="submit" name="button_spellcheck" value="%s">' % _('Check Spelling') save_button_text = _('Save Changes') cancel_button_text = _('Cancel') if self.cfg.page_license_enabled: request.write('<p><em>', _( """By hitting '''%(save_button_text)s''' you put your changes under the %(license_link)s. If you don't want that, hit '''%(cancel_button_text)s''' to cancel your changes.""", wiki=True) % { 'save_button_text': save_button_text, 'cancel_button_text': cancel_button_text, 'license_link': wikiutil.getLocalizedPage(request, self.cfg.page_license_page).link_to(request), }, '</em></p>') request.write(''' <input class="button" type="submit" name="button_save" value="%s"> <input class="button" type="submit" name="button_preview" value="%s"> <input class="button" type="submit" name="button_switch" value="%s"> ''' % (save_button_text, _('Preview'), _('Text mode'), )) if loadable_draft: request.write(''' <input class="button" type="submit" name="button_load_draft" value="%s" onClick="flgChange = false;"> <input type="hidden" name="draft_ts" value="%d"> <input type="hidden" name="draft_rev" value="%d"> ''' % (_('Load Draft'), draft_timestamp, draft_rev)) request.write(''' %s <input class="button" type="submit" name="button_cancel" value="%s"> <input type="hidden" name="editor" value="gui"> ''' % (button_spellcheck, cancel_button_text, )) if self.cfg.mail_enabled: request.write(''' <script type="text/javascript"> function toggle_trivial(CheckedBox) { TrivialBoxes = document.getElementsByName("trivial"); for (var i = 0; i < TrivialBoxes.length; i++) TrivialBoxes[i].checked = CheckedBox.checked; } </script> <input type="checkbox" name="trivial" id="chktrivialtop" value="1" %(checked)s onclick="toggle_trivial(this)"> <label for="chktrivialtop">%(label)s</label> ''' % { 'checked': ('', 'checked')[form.get('trivial', '0') == '1'], 'label': _("Trivial change"), }) from MoinMoin.security.textcha import TextCha request.write(TextCha(request).render()) self.sendconfirmleaving() # TODO update state of flgChange to make this work, see PageEditor # Add textarea with page text lang = self.pi.get('language', request.cfg.language_default) contentlangdirection = i18n.getDirection(lang) # 'ltr' or 'rtl' uilanguage = request.lang url_prefix_static = request.cfg.url_prefix_static url_prefix_local = request.cfg.url_prefix_local wikipage = wikiutil.quoteWikinameURL(self.page_name) fckbasepath = request.cfg.url_prefix_fckeditor wikiurl = request.script_root + '/' themepath = '%s/%s' % (url_prefix_static, request.theme.name) smileypath = themepath + '/img' # auto-generating a list for SmileyImages does NOT work from here! text_rows = int(request.user.edit_rows) if not text_rows: # if no specific value is given for editor height, but 0, we # compute the rows from the raw_body line count plus some # extra rows for adding new text in the editor. Maybe this helps # with the "double slider" usability issue, esp. for devices like # the iphone where you can't operate both sliders. current_rows = len(raw_body.split('\n')) text_rows = max(10, int(current_rows * 1.5)) editor_size = text_rows * 22 # 22 height_pixels/line word_rule = self.word_rule() request.write(""" <script type="text/javascript" src="%(fckbasepath)s/fckeditor.js"></script> <script type="text/javascript"> <!-- var oFCKeditor = new FCKeditor( 'savetext', '100%%', %(editor_size)s, 'MoinDefault' ) ; oFCKeditor.BasePath= '%(fckbasepath)s/' ; oFCKeditor.Config['WikiBasePath'] = '%(wikiurl)s' ; oFCKeditor.Config['WikiPage'] = '%(wikipage)s' ; oFCKeditor.Config['PluginsPath'] = '%(url_prefix_local)s/applets/moinFCKplugins/' ; oFCKeditor.Config['CustomConfigurationsPath'] = '%(url_prefix_local)s/applets/moinfckconfig.js' ; oFCKeditor.Config['WordRule'] = %(word_rule)s ; oFCKeditor.Config['SmileyPath'] = '%(smileypath)s/' ; oFCKeditor.Config['EditorAreaCSS'] = '%(themepath)s/css/common.css' ; oFCKeditor.Config['SkinPath'] = '%(fckbasepath)s/editor/skins/silver/' ; oFCKeditor.Config['AutoDetectLanguage'] = false ; oFCKeditor.Config['DefaultLanguage'] = '%(uilanguage)s' ; oFCKeditor.Config['ContentLangDirection'] = '%(contentlangdirection)s' ; oFCKeditor.Value= """ % locals()) from MoinMoin.formatter.text_gedit import Formatter self.formatter = Formatter(request) self.formatter.page = self output = request.redirectedOutput(self.send_page_content, request, raw_body, format=self.pi['format'], do_cache=False) output = repr(output) if output[0] == 'u': output = output[1:] request.write(output) request.write(""" ; oFCKeditor.Create() ; //--> </script> """) request.write("<p>") request.write(_("Comment:"), ' <input id="editor-comment" type="text" name="comment" value="%s" size="80" maxlength="200">' % ( wikiutil.escape(kw.get('comment', ''), 1), )) request.write("</p>") # Category selection filterfn = self.cfg.cache.page_category_regexact.search cat_pages = request.rootpage.getPageList(filter=filterfn) cat_pages.sort() cat_pages = [wikiutil.pagelinkmarkup(p) for p in cat_pages] cat_pages.insert(0, ('', _('<No addition>'))) request.write("<p>") request.write(_('Add to: %(category)s') % { 'category': unicode(web.makeSelection('category', cat_pages)), }) if self.cfg.mail_enabled: request.write(''' <input type="checkbox" name="trivial" id="chktrivial" value="1" %(checked)s onclick="toggle_trivial(this)"> <label for="chktrivial">%(label)s</label> ''' % { 'checked': ('', 'checked')[form.get('trivial', '0') == '1'], 'label': _("Trivial change"), }) request.write(''' <input type="checkbox" name="rstrip" id="chkrstrip" value="1" %(checked)s> <label for="chkrstrip">%(label)s</label> </p> ''' % { 'checked': ('', 'checked')[form.get('rstrip', '0') == '1'], 'label': _('Remove trailing whitespace from each line') }) request.write("</p>") badwords_re = None if preview is not None: if 'button_spellcheck' in form or 'button_newwords' in form: badwords, badwords_re, msg = SpellCheck.checkSpelling(self, request, own_form=0) request.write("<p>%s</p>" % msg) request.write('</fieldset>') request.write("</form>") if preview is not None: if staytop: content_id = 'previewbelow' else: content_id = 'preview' self.send_page(content_id=content_id, content_only=1, hilite_re=badwords_re) request.write(request.formatter.endContent()) # end content div request.theme.send_footer(self.page_name) request.theme.send_closing_html()
def handle_action(context, pagename, action_name='show'): """ Actual dispatcher function for non-XMLRPC actions. Also sets up the Page object for this request, normalizes and redirects to canonical pagenames and checks for non-allowed actions. """ _ = context.getText cfg = context.cfg # pagename could be empty after normalization e.g. '///' -> '' # Use localized FrontPage if pagename is empty if not pagename: context.page = wikiutil.getFrontPage(context) else: context.page = Page(context, pagename) if '_' in pagename and not context.page.exists(): pagename = pagename.replace('_', ' ') page = Page(context, pagename) if page.exists(): url = page.url(context) return context.http_redirect(url) msg = None # Complain about unknown actions if not action_name in get_names(cfg): msg = _("Unknown action %(action_name)s.") % { 'action_name': wikiutil.escape(action_name), } # Disallow non available actions elif action_name[0].isupper() and not action_name in \ get_available_actions(cfg, context.page, context.user): msg = _("You are not allowed to do %(action_name)s on this page.") % { 'action_name': wikiutil.escape(action_name), } if context.user.valid: log_attempt(action_name + '/action unavailable', False, context.request, context.user.name, pagename=pagename) else: log_attempt(action_name + '/action unavailable', False, context.request, pagename=pagename) # Suggest non valid user to login msg += " " + _("Login and try again.") if msg: context.theme.add_msg(msg, "error") context.page.send_page() # Try action else: from MoinMoin import action handler = action.getHandler(context, action_name) if handler is None: msg = _( "You are not allowed to do %(action_name)s on this page.") % { 'action_name': wikiutil.escape(action_name), } if context.user.valid: log_attempt(action_name + '/no handler', False, context.request, context.user.name, pagename=pagename) else: log_attempt(action_name + '/no handler', False, context.request, pagename=pagename) # Suggest non valid user to login msg += " " + _("Login and try again.") context.theme.add_msg(msg, "error") context.page.send_page() else: handler(context.page.page_name, context) return context
def escapedText(self, text, **kw): return wikiutil.escape(text)
def collectpackage(self, pagelist, fileobject, pkgname="", include_attachments=False): """ Expects a list of pages as an argument, and fileobject to be an open file object, which a zipfile will get written to. @param pagelist: pages to package @param fileobject: open file object to write to @param pkgname: optional file name, to prevent self packaging @rtype: string or None @return: error message, if one happened @rtype: boolean @param include_attachments: True if you want attachments collected """ _ = self.request.getText COMPRESSION_LEVEL = zipfile.ZIP_DEFLATED pages = [] for pagename in pagelist: pagename = wikiutil.normalize_pagename(pagename, self.request.cfg) if pagename: page = Page(self.request, pagename) if page.exists() and self.request.user.may.read(pagename): pages.append(page) if not pages: return (_('No pages like "%s"!') % wikiutil.escape(pagelist)) # Set zipfile output zf = zipfile.ZipFile(fileobject, "w", COMPRESSION_LEVEL) cnt = 0 userid = user.getUserIdentification(self.request) script = [ packLine(['MoinMoinPackage', '1']), ] for page in pages: cnt += 1 files = _get_files(self.request, page.page_name) script.append( packLine([ "AddRevision", str(cnt), page.page_name, userid, "Created by the PackagePages action." ])) timestamp = wikiutil.version2timestamp(page.mtime_usecs()) zi = zipfile.ZipInfo( filename=str(cnt), date_time=datetime.fromtimestamp(timestamp).timetuple()[:6]) zi.compress_type = COMPRESSION_LEVEL zf.writestr(zi, page.get_raw_body().encode("utf-8")) if include_attachments: for attname in files: if attname != pkgname: cnt += 1 zipname = "%d_attachment" % cnt script.append( packLine([ "AddAttachment", zipname, attname, page.page_name, userid, "Created by the PackagePages action." ])) filename = AttachFile.getFilename( self.request, page.page_name, attname) zf.write(filename, zipname) script += [packLine(['Print', 'Thank you for using PackagePages!'])] zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8")) zf.close()
def execute(pagename, request): """ Send recent changes as an RSS document """ if not wikixml.ok: request.mimetype = 'text/plain' request.write( "rss_rc action is not supported because of missing pyxml module.") return if request.isSpiderAgent: # reduce bot cpu usage return '' cfg = request.cfg _ = request.getText # get params def_max_items = max_items = cfg.rss_items_default items_limit = cfg.rss_items_limit unique = cfg.rss_unique diffs = cfg.rss_diffs ddiffs = cfg.rss_ddiffs max_lines = cfg.rss_lines_default lines_limit = cfg.rss_lines_limit show_att = cfg.rss_show_attachment_entries page_pattern = cfg.rss_page_filter_pattern try: max_items = min(int(request.values.get('items', max_items)), items_limit) except ValueError: pass try: unique = int(request.values.get('unique', unique)) except ValueError: pass try: diffs = int(request.values.get('diffs', diffs)) except ValueError: pass ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003 try: ddiffs = int(request.values.get('ddiffs', ddiffs)) except ValueError: pass try: max_lines = min(int(request.values.get('lines', max_lines)), lines_limit) except ValueError: pass try: show_att = int(request.values.get('show_att', show_att)) except ValueError: pass try: page_pattern = request.values.get('page', page_pattern) except ValueError: pass # if we are just interested in a specific page, using the local edit-log # of that page is much faster than the global one - esp. if the page was # NOT recently changed and the global edit-log is rather big. kw = dict(rootpagename=page_pattern) if is_single_page_match( page_pattern) else {} log = editlog.EditLog(request, **kw) logdata = [] counter = 0 pages = {} lastmod = 0 for line in log.reverse(): if not request.user.may.read(line.pagename): continue if ((not show_att and not line.action.startswith('SAVE')) or ((line.pagename in pages) and unique) or not match_page(line.pagename, page_pattern)): continue line.editor = line.getInterwikiEditorData(request) line.time = timefuncs.tmtuple( wikiutil.version2timestamp(line.ed_time_usecs)) # UTC logdata.append(line) pages[line.pagename] = None if not lastmod: lastmod = wikiutil.version2timestamp(line.ed_time_usecs) counter += 1 if counter >= max_items: break del log timestamp = timefuncs.formathttpdate(lastmod) etag = "%d-%d-%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique, max_lines, show_att) # for 304, we look at if-modified-since and if-none-match headers, # one of them must match and the other is either not there or must match. if request.if_modified_since == timestamp: if request.if_none_match: if request.if_none_match == etag: request.status_code = 304 else: request.status_code = 304 elif request.if_none_match == etag: if request.if_modified_since: if request.if_modified_since == timestamp: request.status_code = 304 else: request.status_code = 304 else: # generate an Expires header, using whatever setting the admin # defined for suggested cache lifetime of the RecentChanges RSS doc expires = time.time() + cfg.rss_cache request.mimetype = 'application/rss+xml' request.expires = expires request.last_modified = lastmod request.headers['Etag'] = etag # send the generated XML document baseurl = request.url_root logo = re.search(r'src="([^"]*)"', cfg.logo_string) if logo: logo = request.getQualifiedURL(logo.group(1)) # prepare output out = StringIO.StringIO() handler = RssGenerator(out) # start SAX stream handler.startDocument() handler.write( u'<!--\n' u' Add an "items=nnn" URL parameter to get more than the \n' u' default %(def_max_items)d items. You cannot get more than \n' u' %(items_limit)d items though.\n' u' \n' u' Add "unique=1" to get a list of changes where page names are unique,\n' u' i.e. where only the latest change of each page is reflected.\n' u' \n' u' Add "diffs=1" to add change diffs to the description of each items.\n' u' \n' u' Add "ddiffs=1" to link directly to the diff (good for FeedReader).\n' u' \n' u' Add "lines=nnn" to change maximum number of diff/body lines \n' u' to show. Cannot be more than %(lines_limit)d.\n' u' \n' u' Add "show_att=1" to show items related to attachments.\n' u' \n' u' Add "page=pattern" to show feed only for specific pages.\n' u' Pattern can be empty (it would match to all pages), \n' u' can start with circumflex (it would be interpreted as \n' u' regular expression in this case), end with slash (for \n' u' getting feed for page tree) or point to specific page (if \n' u' none of the above can be applied).\n' u' \n' u' Current settings: items=%(max_items)i, unique=%(unique)i, \n' u' diffs=%(diffs)i, ddiffs=%(ddiffs)i, lines=%(max_lines)i, \n' u' show_att=%(show_att)i\n' u'-->\n' % locals()) # emit channel description handler.startNode('channel', { (handler.xmlns['rdf'], 'about'): request.url_root, }) handler.simpleNode('title', cfg.sitename) page = Page(request, pagename) handler.simpleNode('link', full_url(request, page)) handler.simpleNode('description', u'RecentChanges at %s' % cfg.sitename) if logo: handler.simpleNode('image', None, { (handler.xmlns['rdf'], 'resource'): logo, }) if cfg.interwikiname: handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname) handler.startNode('items') handler.startNode(('rdf', 'Seq')) for item in logdata: anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6] page = Page(request, item.pagename) link = full_url(request, page, anchor=anchor) handler.simpleNode(('rdf', 'li'), None, attr={ (handler.xmlns['rdf'], 'resource'): link, }) handler.endNode(('rdf', 'Seq')) handler.endNode('items') handler.endNode('channel') # emit logo data if logo: handler.startNode('image', attr={ (handler.xmlns['rdf'], 'about'): logo, }) handler.simpleNode('title', cfg.sitename) handler.simpleNode('link', baseurl) handler.simpleNode('url', logo) handler.endNode('image') # Mapping { oldname: curname } for maintaining page renames pagename_map = {} # emit items for item in logdata: if item.pagename in pagename_map: cur_pagename = pagename_map[item.pagename] else: cur_pagename = item.pagename page = Page(request, cur_pagename) action = item.action comment = item.comment anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6] rdflink = full_url(request, page, anchor=anchor) handler.startNode('item', attr={ (handler.xmlns['rdf'], 'about'): rdflink, }) # general attributes handler.simpleNode('title', item.pagename) handler.simpleNode(('dc', 'date'), timefuncs.W3CDate(item.time)) show_diff = diffs if action.startswith('ATT'): # Attachment show_diff = 0 filename = wikiutil.url_unquote(item.extra) att_exists = AttachFile.exists(request, cur_pagename, filename) if action == 'ATTNEW': # Once attachment deleted this link becomes invalid but we # preserve it to prevent appearance of new RSS entries in # RSS readers. if ddiffs: handler.simpleNode( 'link', attach_url(request, cur_pagename, filename, do='view')) comment = _(u"Upload of attachment '%(filename)s'.") % { 'filename': filename } elif action == 'ATTDEL': if ddiffs: handler.simpleNode( 'link', full_url(request, page, querystr={'action': 'AttachFile'})) comment = _(u"Attachment '%(filename)s' deleted.") % { 'filename': filename } elif action == 'ATTDRW': if ddiffs: handler.simpleNode( 'link', attach_url(request, cur_pagename, filename, do='view')) comment = _(u"Drawing '%(filename)s' saved.") % { 'filename': filename } elif action.startswith('SAVE'): if action == 'SAVE/REVERT': to_rev = int(item.extra) comment = (_(u"Revert to revision %(rev)d.") % { 'rev': to_rev}) + "<br />" \ + _("Comment:") + " " + comment elif action == 'SAVE/RENAME': show_diff = 0 comment = (_(u"Renamed from '%(oldpagename)s'.") % { 'oldpagename': item.extra}) + "<br />" \ + _("Comment:") + " " + comment if item.pagename in pagename_map: newpage = pagename_map[item.pagename] del pagename_map[item.pagename] pagename_map[item.extra] = newpage else: pagename_map[item.extra] = item.pagename elif action == 'SAVENEW': comment = _(u"New page:\n") + comment item_rev = int(item.rev) # If we use diffs/ddiffs, we should calculate proper links and # content if ddiffs: # first revision can't have older revisions to diff with if item_rev == 1: handler.simpleNode( 'link', full_url(request, page, querystr={ 'action': 'recall', 'rev': str(item_rev) })) else: handler.simpleNode( 'link', full_url(request, page, querystr={ 'action': 'diff', 'rev1': str(item_rev), 'rev2': str(item_rev - 1) })) if show_diff: if item_rev == 1: lines = Page(request, cur_pagename, rev=item_rev).getlines() else: lines = wikiutil.pagediff(request, cur_pagename, item_rev - 1, cur_pagename, item_rev, ignorews=1) if len(lines) > max_lines: lines = lines[:max_lines] + ['...\n'] lines = '\n'.join(lines) lines = wikiutil.escape(lines) comment = u'%s\n<pre>\n%s\n</pre>\n' % (comment, lines) if not ddiffs: handler.simpleNode('link', full_url(request, page)) if comment: handler.simpleNode('description', comment) # contributor if cfg.show_names: edattr = {} if cfg.show_hosts: edattr[(handler.xmlns['wiki'], 'host')] = item.hostname if item.editor[0] == 'interwiki': edname = "%s:%s" % item.editor[1] ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname) else: # 'ip' edname = item.editor[1] ##edattr[(None, 'link')] = link + "?action=info" # this edattr stuff, esp. None as first tuple element breaks things (tracebacks) # if you know how to do this right, please send us a patch handler.startNode(('dc', 'contributor')) handler.startNode(('rdf', 'Description'), attr=edattr) handler.simpleNode(('rdf', 'value'), edname) handler.endNode(('rdf', 'Description')) handler.endNode(('dc', 'contributor')) # wiki extensions handler.simpleNode(('wiki', 'version'), "%i" % (item.ed_time_usecs)) handler.simpleNode(('wiki', 'status'), ('deleted', 'updated')[page.exists()]) handler.simpleNode(('wiki', 'diff'), full_url(request, page, querystr={'action': 'diff'})) handler.simpleNode(('wiki', 'history'), full_url(request, page, querystr={'action': 'info'})) # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor ) # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA ) handler.endNode('item') # end SAX stream handler.endDocument() request.write(out.getvalue())
def item_title(self): return wikiutil.escape(self.page.page_name)
def get_form_html(self, buttons_html): _ = self._ form = values_to_form(self.request.values) if self.subpages: subpages = ' '.join(map(wikiutil.escape, self.subpages)) d = { 'subpage': subpages, 'subpages_checked': ('', 'checked')[form.get('subpages_checked', ['0'])[0] == '1'], 'subpage_label': _('Rename all /subpages too?'), 'links_label': _('Rename links to page too?'), 'links_checked': ('checked', '')[form.get('subpages_checked', ['0'])[0] == '1'], 'pagename': wikiutil.escape(self.pagename, True), 'newname_label': _("New name"), 'comment_label': _("Optional reason for the renaming"), 'buttons_html': buttons_html, 'querytext': _('Really rename this page?') } return ''' <strong>%(querytext)s</strong> <br> <br> <table> <tr> <dd> %(subpage_label)s<input type="checkbox" name="rename_subpages" value="1" %(subpages_checked)s> </dd> <dd> <class="label"><subpage> %(subpage)s</subpage> </dd> </tr> </table> <table> <tr> <td class="label"><label>%(newname_label)s</label></td> <td class="content"> <input type="text" name="newpagename" value="%(pagename)s" size="80"> </td> </tr> <tr> <td class="label"><label>%(comment_label)s</label></td> <td class="content"> <input type="text" name="comment" size="80" maxlength="200"> </td> </tr> <tr> <td> <dd> %(links_label)s<input type="checkbox" name="rename_links" value="1" %(links_checked)s> </dd> </td> </tr> <tr> <td></td> <td class="buttons"> %(buttons_html)s </td> </tr> </table> ''' % d else: d = { 'pagename': wikiutil.escape(self.pagename, True), 'newname_label': _("New name"), 'comment_label': _("Optional reason for the renaming"), 'links_label': _('Rename links to page too?'), 'links_checked': ('checked', '')[form.get('subpages_checked', ['0'])[0] == '1'], 'buttons_html': buttons_html, } return ''' <table> <tr> <td class="label"><label>%(newname_label)s</label></td> <td class="content"> <input type="text" name="newpagename" value="%(pagename)s" size="80"> </td> </tr> <tr> <td class="label"><label>%(comment_label)s</label></td> <td class="content"> <input type="text" name="comment" size="80" maxlength="200"> </td> </tr> <tr> <td> <dd> %(links_label)s<input type="checkbox" name="rename_links" value="1" %(links_checked)s> </dd> </td> </tr> <tr> <td></td> <td class="buttons"> %(buttons_html)s </td> </tr> </table> ''' % d
def item_slide_link_base(self): return wikiutil.escape(self.pageURL) + '?action=%s&n=' % self.name
def escape(x): if x is None: return x else: return wikiutil.escape(x)
def item_author(self): return wikiutil.escape(self.request.getPragma('author', defval=''))
def diff(request, old, new, old_top='', new_top='', old_bottom='', new_bottom='', old_top_class='', new_top_class='', old_bottom_class='', new_bottom_class=''): """ Find changes between old and new and return HTML markup visualising them. @param old: old text [unicode] @param new: new text [unicode] @param old_top: Custom html for adding ontop of old revision column (optional) @param old_bottom: Custom html for adding at bottom of old revision column (optional) @param new_top: Custom html for adding ontop of new revision column (optional) @param new_bottom: Custom html for adding at bottom of new revision column (optional) @param old_top_class: Custom class for <td> with old_top content (optional) @param new_top_class: Custom class for <td> with new_top content (optional) @param old_bottom_class: Custom class for <td> with old_bottom content (optional) @param new_bottom_class: Custom class for <td> with new_bottom content (optional) """ _ = request.getText t_line = _("Line") + " %d" seq1 = old.splitlines() seq2 = new.splitlines() seqobj = difflib.SequenceMatcher(None, seq1, seq2) linematch = seqobj.get_matching_blocks() result = """ <table class="diff"> """ if old_top or new_top: result += '<tr><td class="%s">%s</td><td class="%s">%s</td></tr>' % (old_top_class, old_top, new_top_class, new_top) if len(seq1) == len(seq2) and linematch[0] == (0, 0, len(seq1)): # No differences. result += '<tr><td class="diff-same" colspan="2">' + _("No differences found!") + '</td></tr>' else: result += """ <tr> <td class="diff-removed"><span>%s</span></td> <td class="diff-added"><span>%s</span></td> </tr> """ % (_('Deletions are marked like this.'), _('Additions are marked like this.'), ) lastmatch = (0, 0) # Print all differences for match in linematch: # Starts of pages identical? if lastmatch == match[0:2]: lastmatch = (match[0] + match[2], match[1] + match[2]) continue llineno, rlineno = lastmatch[0]+1, lastmatch[1]+1 result += """ <tr class="diff-title"> <td>%s:</td> <td>%s:</td> </tr> """ % (request.formatter.line_anchorlink(1, llineno) + request.formatter.text(t_line % llineno) + request.formatter.line_anchorlink(0), request.formatter.line_anchorlink(1, rlineno) + request.formatter.text(t_line % rlineno) + request.formatter.line_anchorlink(0)) leftpane = '' rightpane = '' linecount = max(match[0] - lastmatch[0], match[1] - lastmatch[1]) for line in range(linecount): if line < match[0] - lastmatch[0]: if line > 0: leftpane += '\n' leftpane += seq1[lastmatch[0] + line] if line < match[1] - lastmatch[1]: if line > 0: rightpane += '\n' rightpane += seq2[lastmatch[1] + line] charobj = difflib.SequenceMatcher(None, leftpane, rightpane) charmatch = charobj.get_matching_blocks() if charobj.ratio() < 0.5: # Insufficient similarity. if leftpane: leftresult = """<span>%s</span>""" % indent(escape(leftpane)) else: leftresult = '' if rightpane: rightresult = """<span>%s</span>""" % indent(escape(rightpane)) else: rightresult = '' else: # Some similarities; markup changes. charlast = (0, 0) leftresult = '' rightresult = '' for thismatch in charmatch: if thismatch[0] - charlast[0] != 0: leftresult += """<span>%s</span>""" % indent( escape(leftpane[charlast[0]:thismatch[0]])) if thismatch[1] - charlast[1] != 0: rightresult += """<span>%s</span>""" % indent( escape(rightpane[charlast[1]:thismatch[1]])) leftresult += escape(leftpane[thismatch[0]:thismatch[0] + thismatch[2]]) rightresult += escape(rightpane[thismatch[1]:thismatch[1] + thismatch[2]]) charlast = (thismatch[0] + thismatch[2], thismatch[1] + thismatch[2]) leftpane = '<br>'.join([indent(x) for x in leftresult.splitlines()]) rightpane = '<br>'.join([indent(x) for x in rightresult.splitlines()]) # removed width="50%%" result += """ <tr> <td class="diff-removed">%s</td> <td class="diff-added">%s</td> </tr> """ % (leftpane, rightpane) lastmatch = (match[0] + match[2], match[1] + match[2]) if old_bottom or new_bottom: result += '<tr><td class="%s">%s</td><td class="%s">%s</td></tr>' % (old_top_class, old_top, new_top_class, new_top) result += '</table>\n' return result
def save(self, editor, newtext, rev, **kw): BLACKLISTPAGES = ["BadContent", "LocalBadContent"] if not editor.page_name in BLACKLISTPAGES: request = editor.request # Start timing of antispam operation request.clock.start('antispam') blacklist = [] latest_mtime = 0 for pn in BLACKLISTPAGES: do_update = ( pn != "LocalBadContent" and request.cfg.interwikiname != 'MoinMaster' ) # MoinMaster wiki shall not fetch updates from itself blacklist_mtime, blacklist_entries = getblacklist( request, pn, do_update) blacklist += blacklist_entries latest_mtime = max(latest_mtime, blacklist_mtime) if blacklist: invalid_cache = not getattr(request.cfg.cache, "antispam_blacklist", None) if invalid_cache or request.cfg.cache.antispam_blacklist[ 0] < latest_mtime: mmblcache = [] for blacklist_re in blacklist: try: mmblcache.append(re.compile(blacklist_re, re.I)) except re.error, err: logging.error( "Error in regex '%s': %s. Please check the pages %s." % (blacklist_re, str(err), ', '.join(BLACKLISTPAGES))) request.cfg.cache.antispam_blacklist = (latest_mtime, mmblcache) from MoinMoin.Page import Page oldtext = "" if rev > 0: # rev is the revision of the old page page = Page(request, editor.page_name, rev=rev) oldtext = page.get_raw_body() newset = frozenset(newtext.splitlines(1)) oldset = frozenset(oldtext.splitlines(1)) difference = newset - oldset addedtext = kw.get('comment', u'') + u''.join(difference) for blacklist_re in request.cfg.cache.antispam_blacklist[1]: match = blacklist_re.search(addedtext) if match: # Log error and raise SaveError, PageEditor should handle this. _ = editor.request.getText msg = _( 'Sorry, can not save page because "%(content)s" is not allowed in this wiki.' ) % { 'content': wikiutil.escape(match.group()) } logging.info(msg) raise editor.SaveError(msg) request.clock.stop('antispam')
def get_form_html(self, buttons_html): _ = self._ if self.subpages: redirect_label = _('Create redirect for renamed page(s)?') subpages = ' '.join( [wikiutil.escape(page) for page in self.subpages]) subpages_html = """ <tr> <dd> %(subpage_label)s<input type="checkbox" name="rename_subpages" value="1" %(subpages_checked)s> </dd> <dd> <class="label"><subpage> %(subpage)s</subpage> </dd> </tr> """ % { 'subpage': subpages, 'subpages_checked': ('', 'checked' )[self.request.args.get('subpages_checked', '0') == '1'], 'subpage_label': _('Rename all /subpages too?'), } else: redirect_label = _('Create redirect for renamed page?') subpages_html = "" if self.show_redirect: redirect_html = '<tr><dd>%(redirect_label)s<input type="checkbox" name="rename_redirect" value="1" %(redirect)s></dd></tr>' % { 'redirect': self.rename_redirect, 'redirect_label': redirect_label, } else: redirect_html = '' if self.show_redirect or self.subpages: options_html = """ <table> %(subpages_html)s %(redirect_html)s </table> """ % { "subpages_html": subpages_html, "redirect_html": redirect_html, } else: options_html = "" d = { 'querytext': _('Really rename this page?'), 'pagename': wikiutil.escape(self.pagename, True), 'newname_label': _("New name"), 'comment_label': _("Optional reason for the renaming"), 'buttons_html': buttons_html, 'options_html': options_html, } return ''' <strong>%(querytext)s</strong> <br> <br> %(options_html)s <table> <tr> <td class="label"><label>%(newname_label)s</label></td> <td class="content"> <input type="text" name="newpagename" value="%(pagename)s" size="80"> </td> </tr> <tr> <td class="label"><label>%(comment_label)s</label></td> <td class="content"> <input type="text" name="comment" size="80" maxlength="200"> </td> </tr> <tr> <td></td> <td class="buttons"> %(buttons_html)s </td> </tr> </table> ''' % d
def macro_EmbedObject( macro, target=wikiutil.required_arg(unicode), pagename=None, width=wikiutil.UnitArgument(None, float, ['px', 'em', 'pt', 'in', 'mm', '%'], defaultunit='px'), height=wikiutil.UnitArgument(None, float, ['px', 'em', 'pt', 'in', 'mm', '%'], defaultunit='px'), alt=u'', play=False, stop=True, loop=False, quality=(u'high', u'low', u'medium'), op=True, repeat=False, autostart=False, align=(u'middle', u'top', u'bottom'), hidden=False, menu=True, wmode=u'transparent', url_mimetype=None): """ This macro is used to embed an object into a wiki page """ # Join unit arguments with their units if width: if width[1] == 'px': width = '%dpx' % int(width[0]) else: width = '%g%s' % width if height: if height[1] == 'px': height = '%dpx' % int(height[0]) else: height = '%g%s' % height request = macro.request _ = macro.request.getText fmt = macro.formatter # AttachFile calls always with pagename. Users can call the macro from a different page as the attachment is saved. if not pagename: pagename = fmt.page.page_name if not wikiutil.is_URL(target): pagename, fname = AttachFile.absoluteName(target, pagename) if not AttachFile.exists(request, pagename, fname): linktext = _('Upload new attachment "%(filename)s"') % { 'filename': fname } target = AttachFile.getAttachUrl(pagename, fname, request, do='upload_form') return (fmt.url(1, target) + fmt.text(linktext) + fmt.url(0)) url = AttachFile.getAttachUrl(pagename, fname, request) mt = wikiutil.MimeType(filename=fname) else: if not url_mimetype: return fmt.text( _('%(extension_name)s %(extension_type)s: Required argument %(argument_name)s missing.' ) % { "extension_name": extension_name, "extension_type": extension_type, "argument_name": "url_mimetype", }) else: url = target mt = wikiutil.MimeType() # initialize dict try: mt.major, mt.minor = url_mimetype.split('/') except ValueError: return fmt.text( _('%(extension_name)s %(extension_type)s: Invalid %(argument_name)s=%(argument_value)s!' ) % { "extension_name": extension_name, "extension_type": extension_type, "argument_name": "url_mimetype", "argument_value": str(url_mimetype), }) mime_type = "%s/%s" % ( mt.major, mt.minor, ) dangerous = mime_type in request.cfg.mimetypes_xss_protect if not mime_type in request.cfg.mimetypes_embed or dangerous: return "%s: %s%s%s" % (fmt.text( _("Current configuration does not allow embedding of the file %(file)s because of its mimetype %(mimetype)s." ) % { "mimetype": mime_type, "file": target }), fmt.url(1, url), fmt.text(target), fmt.url(0)) if not alt: alt = "%(text)s %(mime_type)s" % { 'text': _("Embedded"), 'mime_type': mime_type } embed_src = '' if mt.major == 'video': if not width and not height: width = '400px' height = '400px' embed_src = ''' <object %(ob_data)s %(ob_type)s %(ob_width)s %(ob_height)s %(ob_align)s %(ob_standby)s %(ob_stop)s> %(wmode)s%(movie)s%(play)s%(stop)s%(repeat)s%(autostart)s%(op)s%(menu)s <p>%(alt)s</p> </object>''' % { "ob_data": _check_object_value("data", url), "ob_type": _check_object_value("type", mime_type), "ob_width": _check_object_value("width", width), "ob_height": _check_object_value("height", height), "ob_align": _check_object_value("align", align), "ob_standby": _check_object_value("standby", alt), "ob_stop": _check_object_value("stop", stop), "wmode": _check_param_value("wmode", wmode, "data"), "movie": _check_param_value("movie", url, "data"), "play": _check_param_value("play", play, "data"), "stop": _check_param_value("stop", stop, "data"), "repeat": _check_param_value("repeat", repeat, "data"), "autostart": _check_param_value("autostart", autostart, "data"), "op": _check_param_value("op", op, "data"), "menu": _check_param_value("menu", menu, "data"), "alt": wikiutil.escape(alt), } elif mt.major in ['image', 'chemical', 'x-world']: embed_src = ''' <object %(ob_data)s %(ob_type)s %(ob_width)s %(ob_height)s %(ob_align)s> %(name)s <p>%(alt)s</p> </object>''' % { "mime_type": mime_type, "ob_data": _check_object_value("data", url), "ob_width": _check_object_value("width", width), "ob_height": _check_object_value("height", height), "ob_type": _check_object_value("type", mime_type), "ob_align": _check_object_value("align", align), "name": _check_param_value("name", url, "data"), "alt": wikiutil.escape(alt), } elif mt.major == 'audio': if not width and not height: width = '400px' height = '100px' embed_src = ''' <object %(ob_data)s %(ob_type)s %(ob_width)s %(ob_height)s %(ob_align)s> %(audio)s%(repeat)s%(autostart)s%(op)s%(play)s%(stop)s%(hidden)s<p>%(alt)s</p> </object>''' % { "ob_data": _check_object_value("data", url), "ob_width": _check_object_value("width", width or "60"), "ob_height": _check_object_value("height", height or "20"), "ob_type": _check_object_value("type", mime_type), "ob_align": _check_object_value("align", align), "audio": _check_param_value("audio", url, "data"), "repeat": _check_param_value("repeat", repeat, "data"), "autostart": _check_param_value("autostart", autostart, "data"), "op": _check_param_value("op", op, "data"), "play": _check_param_value("play", play, "data"), "stop": _check_param_value("stop", stop, "data"), "hidden": _check_param_value("hidden", hidden, "data"), "alt": wikiutil.escape(alt), } elif mt.major == 'application': # workaround for the acroread browser plugin not knowing the size to embed # we use a width of 100% for the case that there is no width given. # A height of 100% gives a fullscreen pdf file view without embedding it into the wikicontent. if mt.minor == 'pdf': width = width or '100%' height = height or '800px' embed_src = ''' <object %(ob_data)s %(ob_type)s %(ob_width)s %(ob_height)s %(ob_align)s> <p>%(alt)s</p> </object>''' % { "ob_data": _check_object_value("data", url), "ob_width": _check_object_value("width", width), "ob_height": _check_object_value("height", height), "ob_type": _check_object_value("type", mime_type), "ob_align": _check_object_value("align", align), "alt": wikiutil.escape(alt), } else: embed_src = ''' <object %(ob_data)s %(ob_type)s %(ob_width)s %(ob_height)s %(ob_align)s> %(movie)s%(quality)s%(wmode)s%(autostart)s%(play)s%(loop)s%(menu)s<p>%(alt)s</p> </object>''' % { "ob_data": _check_object_value("data", url), "ob_width": _check_object_value("width", width), "ob_height": _check_object_value("height", height), "ob_type": _check_object_value("type", mime_type), "ob_align": _check_object_value("align", align), "movie": _check_param_value("movie", url, "data"), "quality": _check_param_value("quality", quality, "data"), "wmode": _check_param_value("wmode", wmode, "data"), "autostart": _check_param_value("autostart", autostart, "data"), "play": _check_param_value("play", play, "data"), "loop": _check_param_value("loop", loop, "data"), "menu": _check_param_value("menu", menu, "data"), "alt": wikiutil.escape(alt), } return fmt.rawHTML(embed_src)
def format(self, formatter, **kw): _ = self.request.getText fmt = PygmentsFormatter(formatter, start_line=self.start_line) # adding line number anchors for process instruction lines for lineno in range(1, self.num_start + 1): fmt.result.append(formatter.line_anchordef(lineno)) fmt.result.append(formatter.div(1, css_class="highlight %s" % self.syntax)) self._code_id = hashlib.new('sha1', self.raw.encode(config.charset)).hexdigest() msg = None if self.filename is not None: try: lexer = pygments.lexers.get_lexer_for_filename(self.filename) except pygments.util.ClassNotFound: fmt.result.append(formatter.text(self.filename)) lexer = pygments.lexers.TextLexer() else: try: lexer = pygments.lexers.get_lexer_by_name(self.syntax) except pygments.util.ClassNotFound: f = self.request.formatter url = ''.join([ f.url(1, href=Page(self.request, _("HelpOnParsers")).url(self.request, escape=0)), _("HelpOnParsers"), f.url(0)]) msg = _("Syntax highlighting not supported for '%(syntax)s', see %(highlight_help_page)s.") % {"syntax": wikiutil.escape(self.syntax), "highlight_help_page": url } lexer = pygments.lexers.TextLexer() fmt.result.append(formatter.code_area(1, self._code_id, self.syntax, self.show_nums, self.num_start, self.num_step, msg)) pygments.highlight(self.raw, lexer, fmt) fmt.result.append(formatter.code_area(0, self._code_id)) fmt.result.append(formatter.div(0)) self.request.write("".join(fmt.result))
def execute(macro, text): request = macro.request formatter = macro.formatter _ = request.getText # return immediately if getting links for the current page if request.mode_getpagelinks: return '' currentyear, currentmonth, currentday, h, m, s, wd, yd, ds = request.user.getTime( time.time()) thispage = formatter.page.page_name # does the url have calendar params (= somebody has clicked on prev/next links in calendar) ? if 'calparms' in macro.request.args: has_calparms = 1 # yes! text2 = macro.request.args['calparms'] cparmpagename, cparmyear, cparmmonth, cparmoffset, cparmoffset2, cparmheight6, cparmanniversary, cparmtemplate = \ parseargs(request, text2, thispage, currentyear, currentmonth, 0, 0, False, False, u'') # Note: cparmheight6 and cparmanniversary are not used, they are just there # to have a consistent parameter string in calparms and macro args else: has_calparms = 0 if text is None: # macro call without parameters text = u'' # parse and check arguments parmpagename, parmyear, parmmonth, parmoffset, parmoffset2, parmheight6, anniversary, parmtemplate = \ parseargs(request, text, thispage, currentyear, currentmonth, 0, 0, False, False, u'') # does url have calendar params and is THIS the right calendar to modify (we can have multiple # calendars on the same page)? #if has_calparms and (cparmpagename,cparmyear,cparmmonth,cparmoffset) == (parmpagename,parmyear,parmmonth,parmoffset): # move all calendars when using the navigation: if has_calparms and cparmpagename == parmpagename: year, month = yearmonthplusoffset(parmyear, parmmonth, parmoffset + cparmoffset2) parmoffset2 = cparmoffset2 parmtemplate = cparmtemplate else: year, month = yearmonthplusoffset(parmyear, parmmonth, parmoffset) if request.isSpiderAgent and abs(currentyear - year) > 1: return '' # this is a bot and it didn't follow the rules (see below) if currentyear == year: attrs = {} else: attrs = { 'rel': 'nofollow' } # otherwise even well-behaved bots will index forever # get the calendar monthcal = calendar.monthcalendar(year, month) # european / US differences months = ('January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December') # Set things up for Monday or Sunday as the first day of the week if calendar.firstweekday() == calendar.MONDAY: wkend = (5, 6) wkdays = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun') if calendar.firstweekday() == calendar.SUNDAY: wkend = (0, 6) wkdays = ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat') colorstep = 85 p = Page(request, thispage) qpagenames = '*'.join( [wikiutil.quoteWikinameURL(pn) for pn in parmpagename]) qtemplate = wikiutil.quoteWikinameURL(parmtemplate) querystr = "calparms=%%s,%d,%d,%d,%%d,,,%%s" % (parmyear, parmmonth, parmoffset) prevlink = p.url(request, querystr % (qpagenames, parmoffset2 - 1, qtemplate)) nextlink = p.url(request, querystr % (qpagenames, parmoffset2 + 1, qtemplate)) prevylink = p.url(request, querystr % (qpagenames, parmoffset2 - 12, qtemplate)) nextylink = p.url(request, querystr % (qpagenames, parmoffset2 + 12, qtemplate)) prevmonth = formatter.url(1, prevlink, 'cal-link', ** attrs) + '<' + formatter.url(0) nextmonth = formatter.url(1, nextlink, 'cal-link', ** attrs) + '>' + formatter.url(0) prevyear = formatter.url(1, prevylink, 'cal-link', ** attrs) + '<<' + formatter.url(0) nextyear = formatter.url(1, nextylink, 'cal-link', ** attrs) + '>>' + formatter.url(0) if parmpagename != [thispage]: pagelinks = '' r, g, b = (255, 0, 0) l = len(parmpagename[0]) steps = len(parmpagename) maxsteps = (255 / colorstep) if steps > maxsteps: steps = maxsteps chstep = int(l / steps) st = 0 while st < l: ch = parmpagename[0][st:st + chstep] r, g, b = cliprgb(r, g, b) link = Page(request, parmpagename[0]).link_to( request, ch, rel='nofollow', style= 'background-color:#%02x%02x%02x;color:#000000;text-decoration:none' % (r, g, b)) pagelinks = pagelinks + link r, g, b = (r, g + colorstep, b) st = st + chstep r, g, b = (255 - colorstep, 255, 255 - colorstep) for page in parmpagename[1:]: link = Page(request, page).link_to( request, page, rel='nofollow', style= 'background-color:#%02x%02x%02x;color:#000000;text-decoration:none' % (r, g, b)) pagelinks = pagelinks + '*' + link showpagename = ' %s<BR>\n' % pagelinks else: showpagename = '' if calendar.firstweekday() == calendar.SUNDAY: resth1 = ' <th colspan="7" class="cal-header">\n' \ '%s' \ ' %s %s <b> %s %s</b> %s\n %s\n' \ ' </th>\n' % (showpagename, prevyear, prevmonth, months[month-1], str(year), nextmonth, nextyear) if calendar.firstweekday() == calendar.MONDAY: resth1 = ' <th colspan="7" class="cal-header">\n' \ '%s' \ ' %s %s <b> %s / %s</b> %s\n %s\n' \ ' </th>\n' % (showpagename, prevyear, prevmonth, str(year), month, nextmonth, nextyear) restr1 = ' <tr>\n%s </tr>\n' % resth1 r7 = range(7) restd2 = [] for wkday in r7: wday = _(wkdays[wkday]) if wkday in wkend: cssday = "cal-weekend" else: cssday = "cal-workday" restd2.append(' <td class="%s">%s</td>\n' % (cssday, wday)) restr2 = ' <tr>\n%s </tr>\n' % "".join(restd2) if parmheight6: while len(monthcal) < 6: monthcal = monthcal + [[0, 0, 0, 0, 0, 0, 0]] maketip_js = [] restrn = [] for week in monthcal: restdn = [] for wkday in r7: day = week[wkday] if not day: restdn.append(' <td class="cal-invalidday"> </td>\n') else: page = parmpagename[0] if anniversary: link = "%s/%02d-%02d" % (page, month, day) else: link = "%s/%4d-%02d-%02d" % (page, year, month, day) daypage = Page(request, link) if daypage.exists() and request.user.may.read(link): csslink = "cal-usedday" query = {} r, g, b, u = (255, 0, 0, 1) daycontent = daypage.get_raw_body() header1_re = re.compile(r'^\s*=\s(.*)\s=$', re.MULTILINE) # re.UNICODE titletext = [] for match in header1_re.finditer(daycontent): if match: title = match.group(1) title = wikiutil.escape(title).replace("'", "\\'") titletext.append(title) tipname_unescaped = link.replace("'", "\\'") link = wikiutil.escape(link).replace("'", "\\'") tipname = link tiptitle = link tiptext = '<br>'.join(titletext) maketip_js.append("maketip('%s','%s','%s');" % (tipname, tiptitle, tiptext)) attrs = { 'onMouseOver': "tip('%s')" % tipname_unescaped, 'onMouseOut': "untip()" } else: csslink = "cal-emptyday" if parmtemplate: query = {'action': 'edit', 'template': parmtemplate} else: query = {} r, g, b, u = (255, 255, 255, 0) if wkday in wkend: csslink = "cal-weekend" attrs = {'rel': 'nofollow'} for otherpage in parmpagename[1:]: otherlink = "%s/%4d-%02d-%02d" % (otherpage, year, month, day) otherdaypage = Page(request, otherlink) if otherdaypage.exists(): csslink = "cal-usedday" if u == 0: r, g, b = (r - colorstep, g, b - colorstep) else: r, g, b = (r, g + colorstep, b) r, g, b = cliprgb(r, g, b) style = 'background-color:#%02x%02x%02x' % (r, g, b) fmtlink = formatter.url(1, daypage.url(request, query), csslink, ** attrs) + str(day) + formatter.url(0) if day == currentday and month == currentmonth and year == currentyear: cssday = "cal-today" fmtlink = "<b>%s</b>" % fmtlink # for browser with CSS probs else: cssday = "cal-nottoday" restdn.append(' <td style="%s" class="%s">%s</td>\n' % (style, cssday, fmtlink)) restrn.append(' <tr>\n%s </tr>\n' % "".join(restdn)) restable = '<table border="2" cellspacing="2" cellpadding="2">\n<col width="14%%" span="7">%s%s%s</table>\n' restable = restable % (restr1, restr2, "".join(restrn)) if maketip_js: tip_js = '''<script language="JavaScript" type="text/javascript"> <!-- %s // --> </script> ''' % '\n'.join(maketip_js) else: tip_js = '' result = """\ <script type="text/javascript" src="%s/common/js/infobox.js"></script> <div id="%s" style="position:absolute; visibility:hidden; z-index:20; top:-999em; left:0px;"></div> %s%s """ % (request.cfg.url_prefix_static, formatter.make_id_unique('infodiv'), tip_js, restable) return formatter.rawHTML(result)
except GroupException, ge: tmp = "User invitation mail sent to address '%s', but could not add the user to group '%s': %s" if myuser.email != email: tmp += " Please note that the email address was converted to lowercase!" return True, wikiutil.escape( tmp % (email, mygrouppage, unicode(ge))) tmp = "User invitation mail sent to address '%s' and the user was added to group '%s'." if myuser.email != email: tmp += " Please note that the email address was converted to lowercase!" return True, wikiutil.escape(tmp % (email, mygrouppage)) except InviteException, ie: return False, wikiutil.escape(unicode(ie).encode(config.charset)) return True, wikiutil.escape("Invitation mail sent to address '%s'." % email) def get_form_html(self, buttons_html): template_html = '' for template in self.template: if type(template) == type(list()): value = name = template[0] if len(template) > 1: value += "," + template[1] if len(template) > 2: name = template[2] else: value = name = template value = wikiutil.escape(value, True)