def image_emit(self, node): target = node.content text = self.get_text(node) m = self.addr_re.match(target) if m: if m.group("page_name"): # inserted anchors url = wikiutil.url_unquote(target, want_unicode=True) if target.startswith("#"): return self.formatter.rawHTML(u'<a name="%s"></a>' % url[1:]) # default to images return self.formatter.attachment_image(url, alt=text, html_class="image") elif m.group("extern_addr"): # external link address = m.group("extern_addr") proto = m.group("extern_proto") url = wikiutil.url_unquote(address, want_unicode=True) return self.formatter.image(src=url, alt=text, html_class="external_image") elif m.group("attach_scheme"): # link to an attachment scheme = m.group("attach_scheme") attachment = m.group("attach_addr") url = wikiutil.url_unquote(attachment, want_unicode=True) if scheme == "image": return self.formatter.attachment_image(url, alt=text, html_class="image") elif scheme == "drawing": return self.formatter.attachment_drawing(url, text) else: pass elif m.group("inter_wiki"): # interwiki link pass # return "".join(["{{", self.formatter.text(target), "}}"]) url = wikiutil.url_unquote(node.content, want_unicode=True) return self.formatter.attachment_inlined(url, text)
def revert_pages(request, editor, timestamp): _ = request.getText editor = wikiutil.url_unquote(editor) timestamp = int(timestamp * 1000000) log = editlog.EditLog(request) pages = {} revertpages = [] for line in log.reverse(): if line.ed_time_usecs < timestamp: break if not request.user.may.read(line.pagename): continue if not line.pagename in pages: pages[line.pagename] = 1 if repr(line.getInterwikiEditorData(request)) == editor: revertpages.append(line.pagename) request.write("Pages to revert:<br>%s" % "<br>".join([wikiutil.escape(p) for p in revertpages])) for pagename in revertpages: request.write("Begin reverting %s ...<br>" % wikiutil.escape(pagename)) msg = revert_page(request, pagename, editor) if msg: request.write("<p>%s: %s</p>" % ( Page.Page(request, pagename).link_to(request), msg)) request.write("Finished reverting %s.<br>" % wikiutil.escape(pagename))
def attachment(self, url_and_text, **kw): """ This gets called on attachment URLs. """ _ = self._ if len(url_and_text) == 1: url = url_and_text[0] text = None else: url, text = url_and_text inline = url[0] == 'i' drawing = url[0] == 'd' url = url.split(":", 1)[1] url = wikiutil.url_unquote(url, want_unicode=True) text = text or url from MoinMoin.action import AttachFile if drawing: return self.formatter.attachment_drawing(url, text) # check for image URL, and possibly return IMG tag # (images are always inlined, just like for other URLs) if not kw.get('pretty_url', 0) and wikiutil.isPicture(url): return self.formatter.attachment_image(url) # inline the attachment if inline: return self.formatter.attachment_inlined(url, text) return self.formatter.attachment_link(url, text)
def translate_path(self, uri): """ Translate a /-separated PATH to the local filename syntax. Components that mean special things to the local file system (e.g. drive or directory names) are ignored. """ path = wikiutil.url_unquote(uri, want_unicode=False) path = path.replace('\\', '/') words = path.split('/') words = filter(None, words) path = self.server.htdocs bad_uri = 0 for word in words: drive, word = os.path.splitdrive(word) if drive: bad_uri = 1 head, word = os.path.split(word) if word in (os.curdir, os.pardir): bad_uri = 1 continue path = os.path.join(path, word) if bad_uri: self.log_error("Detected bad request URI '%s', translated to '%s'" % (uri, path, )) return path
def link_emit(self, node): target = node.content m = self.addr_re.match(target) if m: if m.group("page_name"): # link to a page word = m.group("page_name") if word.startswith(wikiutil.PARENT_PREFIX): word = word[wikiutil.PARENT_PREFIX_LEN :] elif word.startswith(wikiutil.CHILD_PREFIX): word = "%s/%s" % (self.formatter.page.page_name, word[wikiutil.CHILD_PREFIX_LEN :]) # handle anchors parts = rsplit(word, "#", 1) anchor = "" if len(parts) == 2: word, anchor = parts return "".join( [ self.formatter.pagelink(1, word, anchor=anchor), self.emit_children(node) or self.formatter.text(target), self.formatter.pagelink(0, word), ] ) elif m.group("extern_addr"): # external link address = m.group("extern_addr") proto = m.group("extern_proto") return "".join( [ self.formatter.url(1, address, css=proto), self.emit_children(node) or self.formatter.text(target), self.formatter.url(0), ] ) elif m.group("inter_wiki"): # interwiki link wiki = m.group("inter_wiki") page = m.group("inter_page") return "".join( [ self.formatter.interwikilink(1, wiki, page), self.emit_children(node) or self.formatter.text(page), self.formatter.interwikilink(0), ] ) elif m.group("attach_scheme"): # link to an attachment scheme = m.group("attach_scheme") attachment = m.group("attach_addr") url = wikiutil.url_unquote(attachment, want_unicode=True) text = self.get_text(node) return "".join( [ self.formatter.attachment_link(1, url), self.formatter.text(text), self.formatter.attachment_link(0), ] ) return "".join(["[[", self.formatter.text(target), "]]"])
def _instr(self, text): """ Convert string we get from xmlrpc into internal representation @param text: quoted text (str or unicode object) @rtype: unicode @return: text """ return wikiutil.url_unquote(text) # config.charset must be utf-8
def decodeURI(quotedstring): try: unquotedstring = wikiutil.url_unquote(quotedstring) except AttributeError: # for compatibility with old versions unquotedstring = url_unquote(quotedstring) return unquotedstring
def image_emit(self, node): target = node.content text = self.get_text(node) m = self.rules.addr_re.match(target) if m: if m.group('page_name'): # inserted anchors url = wikiutil.url_unquote(target) if target.startswith('#'): return self.formatter.anchordef(url[1:]) # default to images return self.formatter.attachment_image(url, alt=text, html_class='image') elif m.group('extern_addr'): # external link address = m.group('extern_addr') proto = m.group('extern_proto') url = wikiutil.url_unquote(address) return self.formatter.image(src=url, alt=text, html_class='external_image') elif m.group('attach_scheme'): # link to an attachment scheme = m.group('attach_scheme') attachment = m.group('attach_addr') url = wikiutil.url_unquote(attachment) if scheme == 'image': return self.formatter.attachment_image(url, alt=text, html_class='image') elif scheme == 'drawing': url = wikiutil.drawing2fname(url) return self.formatter.attachment_drawing(url, text, alt=text) else: pass elif m.group('inter_wiki'): # interwiki link pass # return "".join(["{{", self.formatter.text(target), "}}"]) url = wikiutil.url_unquote(node.content) return self.formatter.attachment_inlined(url, text)
def interwiki(self, target_and_text, **kw): scheme, rest = target_and_text.split(':', 1) wikiname, pagename, text = wikiutil160a.split_wiki(rest) #if (pagename.startswith(wikiutil.CHILD_PREFIX) or # fancy link to subpage [wiki:/SubPage text] # Page(self.request, pagename).exists()): # fancy link to local page [wiki:LocalPage text] # # XXX OtherWiki:FooPage markup -> checks for local FooPage -sense??? # pagename = wikiutil.url_unquote(pagename) # pagename = self._replace_target(pagename) # return '[[%s%s]]' % (pagename, text) if wikiname in ('Self', self.request.cfg.interwikiname, ''): # [wiki:Self:LocalPage text] or [:LocalPage:text] orig_pagename = pagename pagename = wikiutil.url_unquote(pagename) pagename = self._replace_target(pagename) camelcase = wikiutil.isStrictWikiname(pagename) if camelcase and (not text or text == orig_pagename): return pagename # optimize special case else: if text: text = '|' + text return '[[%s%s]]' % (pagename, text) wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_wiki( self.request, wikiname + ':') if wikitag_bad: # likely we got some /InterWiki as wikitail, we don't want that! pagename = wikiutil.url_unquote(pagename) pagename = self._replace_target(pagename) wikitail = pagename else: # good wikitail = wikiutil.url_unquote(pagename) # link to self? if wikiutil.isPicture(wikitail): return '{{%s:%s%s}}' % (wikitag, wikitail, text) else: if ' ' not in wikitail and not text: return '%s:%s' % (wikitag, wikitail) else: if text: text = '|' + text return '[[%s:%s%s]]' % (wikitag, wikitail, text)
def interwiki(self, url_and_text): if len(url_and_text) == 1: url = url_and_text[0] text = '' else: url, text = url_and_text text = '|' + text # keep track of whether this is a self-reference, so links # are always shown even the page doesn't exist. scheme, url = url.split(':', 1) wikiname, pagename = wikiutil.split_wiki(url) if (url.startswith(wikiutil.CHILD_PREFIX) or # fancy link to subpage [wiki:/SubPage text] Page(self.request, url).exists() ): # fancy link to local page [wiki:LocalPage text] pagename = wikiutil.url_unquote(url) pagename = self._replace_target(pagename) return '[[%s%s]]' % (pagename, text) if wikiname in ('Self', self.request.cfg.interwikiname, ''): # [wiki:Self:LocalPage text] or [:LocalPage:text] pagename = wikiutil.url_unquote(pagename) pagename = self._replace_target(pagename) return '[[%s%s]]' % (pagename, text) wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_wiki( self.request, url) if wikitag_bad: # likely we got some /InterWiki as wikitail, we don't want that! pagename = wikiutil.url_unquote(pagename) pagename = self._replace_target(pagename) wikitail = pagename else: # good wikitail = wikiutil.url_unquote(wikitail) # link to self? if wikiutil.isPicture(wikitail): return '{{%s:%s%s}}' % (wikitag, wikitail, text) else: if ' ' not in wikitail and not text: return '%s:%s' % (wikitag, wikitail) else: return '[[%s:%s%s]]' % (wikitag, wikitail, text)
def interwiki(self, target_and_text, **kw): scheme, rest = target_and_text.split(':', 1) wikiname, pagename, text = wikiutil160a.split_wiki(rest) #if (pagename.startswith(wikiutil.CHILD_PREFIX) or # fancy link to subpage [wiki:/SubPage text] # Page(self.request, pagename).exists()): # fancy link to local page [wiki:LocalPage text] # # XXX OtherWiki:FooPage markup -> checks for local FooPage -sense??? # pagename = wikiutil.url_unquote(pagename) # pagename = self._replace_target(pagename) # return '[[%s%s]]' % (pagename, text) if wikiname in ('Self', self.request.cfg.interwikiname, ''): # [wiki:Self:LocalPage text] or [:LocalPage:text] orig_pagename = pagename pagename = wikiutil.url_unquote(pagename) pagename = self._replace_target(pagename) camelcase = wikiutil.isStrictWikiname(pagename) if camelcase and (not text or text == orig_pagename): return pagename # optimize special case else: if text: text = '|' + text return '[[%s%s]]' % (pagename, text) wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_wiki(self.request, wikiname+':') if wikitag_bad: # likely we got some /InterWiki as wikitail, we don't want that! pagename = wikiutil.url_unquote(pagename) pagename = self._replace_target(pagename) wikitail = pagename else: # good wikitail = wikiutil.url_unquote(pagename) # link to self? if wikiutil.isPicture(wikitail): return '{{%s:%s%s}}' % (wikitag, wikitail, text) else: if ' ' not in wikitail and not text: return '%s:%s' % (wikitag, wikitail) else: if text: text = '|' + text return '[[%s:%s%s]]' % (wikitag, wikitail, text)
def text(pagename, request, params=''): from MoinMoin.util.dataset import TupleDataset, Column from MoinMoin.widget.browser import DataBrowserWidget _ = request.getText # check params filterpage = None if params.startswith('page='): params = params[len('page='):] params = wikiutil.url_unquote(params, want_unicode=False) filterpage = wikiutil.decodeUserInput(params) if request and request.form and 'page' in request.form: filterpage = request.form['page'][0] days, views, edits = get_data(pagename, request, filterpage) hits = TupleDataset() hits.columns = [Column('day', label=_("Date"), align='left'), Column('views', label=_("Views/day"), align='right'), Column('edits', label=_("Edits/day"), align='right'), ] maxentries = 30 if maxentries < len(days): step = float(len(days))/ maxentries else: step = 1 sv = 0.0 se = 0.0 sd = 0.0 cnt = 0 for i in xrange(len(days)-1, -1, -1): d, v, e = days[i], views[i], edits[i] # sum up views and edits to step days sd += 1 cnt += 1 sv += v se += e if cnt >= step: cnt -= step hits.addRow((d, "%.1f" % (sv/sd), "%.1f" % (se/sd))) sv = 0.0 se = 0.0 sd = 0.0 table = DataBrowserWidget(request) table.setData(hits) return table.render(method="GET")
def image_emit(self, node): target = node.content text = self.get_text(node) m = self.rules.addr_re.match(target) if m: if m.group('page_name'): # inserted anchors url = wikiutil.url_unquote(target) if target.startswith('#'): return self.formatter.anchordef(url[1:]) # default to images return self.formatter.attachment_image( url, alt=text, html_class='image') elif m.group('extern_addr'): # external link address = m.group('extern_addr') proto = m.group('extern_proto') url = wikiutil.url_unquote(address) return self.formatter.image( src=url, alt=text, html_class='external_image') elif m.group('attach_scheme'): # link to an attachment scheme = m.group('attach_scheme') attachment = m.group('attach_addr') url = wikiutil.url_unquote(attachment) if scheme == 'image': return self.formatter.attachment_image( url, alt=text, html_class='image') elif scheme == 'drawing': url = wikiutil.drawing2fname(url) return self.formatter.attachment_drawing(url, text, alt=text) else: pass elif m.group('inter_wiki'): # interwiki link pass # return "".join(["{{", self.formatter.text(target), "}}"]) url = wikiutil.url_unquote(node.content) return self.formatter.attachment_inlined(url, text)
def interwikilink(self, on, interwiki='', pagename='', **kw): """ calls pagelink() for internal interwikilinks to make sure they get counted for self.pagelinks. IMPORTANT: on and off must be called with same parameters, see also the text_html formatter. """ wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_wiki(self.request, '%s:%s' % (interwiki, pagename)) if wikitag == 'Self' or wikitag == self.request.cfg.interwikiname: if wikitail.find('#') > -1: wikitail, kw['anchor'] = wikitail.split('#', 1) wikitail = wikiutil.url_unquote(wikitail) return self.pagelink(on, wikitail, **kw) return ''
def interwiki(self, url_and_text): if len(url_and_text) == 1: url = url_and_text[0] text = '' else: url, text = url_and_text text = '|' + text # keep track of whether this is a self-reference, so links # are always shown even the page doesn't exist. scheme, url = url.split(':', 1) wikiname, pagename = wikiutil.split_wiki(url) if (url.startswith(wikiutil.CHILD_PREFIX) or # fancy link to subpage [wiki:/SubPage text] Page(self.request, url).exists()): # fancy link to local page [wiki:LocalPage text] pagename = wikiutil.url_unquote(url) pagename = self._replace_target(pagename) return '[[%s%s]]' % (pagename, text) if wikiname in ('Self', self.request.cfg.interwikiname, ''): # [wiki:Self:LocalPage text] or [:LocalPage:text] pagename = wikiutil.url_unquote(pagename) pagename = self._replace_target(pagename) return '[[%s%s]]' % (pagename, text) wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_wiki(self.request, url) if wikitag_bad: # likely we got some /InterWiki as wikitail, we don't want that! pagename = wikiutil.url_unquote(pagename) pagename = self._replace_target(pagename) wikitail = pagename else: # good wikitail = wikiutil.url_unquote(wikitail) # link to self? if wikiutil.isPicture(wikitail): return '{{%s:%s%s}}' % (wikitag, wikitail, text) else: if ' ' not in wikitail and not text: return '%s:%s' % (wikitag, wikitail) else: return '[[%s:%s%s]]' % (wikitag, wikitail, text)
def text(pagename, request, params=''): from MoinMoin.util.dataset import TupleDataset, Column from MoinMoin.widget.browser import DataBrowserWidget _ = request.getText # check params filterpage = None if params.startswith('page='): filterpage = wikiutil.url_unquote(params[len('page='):]) if request and request.values and 'page' in request.values: filterpage = request.values['page'] days, views, edits = get_data(pagename, request, filterpage) hits = TupleDataset() hits.columns = [Column('day', label=_("Date"), align='left'), Column('views', label=_("Views/day"), align='right'), Column('edits', label=_("Edits/day"), align='right'), ] maxentries = 30 if maxentries < len(days): step = float(len(days))/ maxentries else: step = 1 sv = 0.0 se = 0.0 sd = 0.0 cnt = 0 for i in xrange(len(days)-1, -1, -1): d, v, e = days[i], views[i], edits[i] # sum up views and edits to step days sd += 1 cnt += 1 sv += v se += e if cnt >= step: cnt -= step hits.addRow((d, "%.1f" % (sv/sd), "%.1f" % (se/sd))) sv = 0.0 se = 0.0 sd = 0.0 table = DataBrowserWidget(request) table.setData(hits) return table.render(method="GET")
def link_emit(self, node): target = node.content m = self.rules.addr_re.match(target) if m: if m.group('page_name'): # link to a page word = m.group('page_name') if word.startswith(wikiutil.PARENT_PREFIX): word = word[wikiutil.PARENT_PREFIX_LEN:] elif word.startswith(wikiutil.CHILD_PREFIX): word = "%s/%s" % (self.formatter.page.page_name, word[wikiutil.CHILD_PREFIX_LEN:]) word, anchor = wikiutil.split_anchor(word) return ''.join([ self.formatter.pagelink(1, word, anchor=anchor), self.emit_children(node) or self.formatter.text(target), self.formatter.pagelink(0, word), ]) elif m.group('extern_addr'): # external link address = m.group('extern_addr') proto = m.group('extern_proto') return ''.join([ self.formatter.url(1, address, css=proto), self.emit_children(node) or self.formatter.text(target), self.formatter.url(0), ]) elif m.group('inter_wiki'): # interwiki link wiki = m.group('inter_wiki') page = m.group('inter_page') page, anchor = wikiutil.split_anchor(page) return ''.join([ self.formatter.interwikilink(1, wiki, page, anchor=anchor), self.emit_children(node) or self.formatter.text(page), self.formatter.interwikilink(0), ]) elif m.group('attach_scheme'): # link to an attachment scheme = m.group('attach_scheme') attachment = m.group('attach_addr') url = wikiutil.url_unquote(attachment) text = self.get_text(node) return ''.join([ self.formatter.attachment_link(1, url), self.formatter.text(text), self.formatter.attachment_link(0) ]) return "".join(["[[", self.formatter.text(target), "]]"])
def format_comment(request, line): comment = line.comment action = line.action _ = request.getText if action.startswith("ATT"): filename = wikiutil.url_unquote(line.extra) if action == "ATTNEW": comment = _("Upload of attachment '%(filename)s'.") % {"filename": filename} elif action == "ATTDEL": comment = _("Attachment '%(filename)s' deleted.") % {"filename": filename} elif action == "ATTDRW": comment = _("Drawing '%(filename)s' saved.") % {"filename": filename} elif "/REVERT" in action: rev = int(line.extra) comment = (_("Revert to revision %(rev)d.") % {"rev": rev}) + " " + comment elif "/RENAME" in action: comment = (_("Renamed from '%(oldpagename)s'.") % {"oldpagename": line.extra}) + " " + comment return wikiutil.make_breakable(comment, _MAX_COMMENT_LENGTH)
def format_comment(request, line): comment = line.comment _ = request.getText if line.action.startswith('ATT'): filename = wikiutil.url_unquote(line.extra) if line.action == 'ATTNEW': comment = _("Upload of attachment '%(filename)s'.") % { 'filename': filename} elif line.action == 'ATTDEL': comment = _("Attachment '%(filename)s' deleted.") % { 'filename': filename} elif line.action == 'ATTDRW': comment = _("Drawing '%(filename)s' saved.") % { 'filename': filename} elif line.action.find('/REVERT') != -1: rev = int(line.extra) comment = _("Revert to revision %(rev)d.") % {'rev': rev} return wikiutil.make_breakable(comment, _MAX_COMMENT_LENGTH)
def _interwiki_repl(self, word): """Handle InterWiki links.""" wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_wiki(self.request, word) if wikitag_bad: return word else: wikiname, pagename = word.split(':', 1) pagename = wikiutil.url_unquote(pagename) # maybe someone has used %20 for blanks in pagename camelcase = wikiutil.isStrictWikiname(pagename) if wikiname in ('Self', self.request.cfg.interwikiname): pagename = self._replace(('PAGE', pagename)) if camelcase: return '%s' % pagename # optimize special case else: return '[[%s]]' % pagename # optimize special case else: if ' ' in pagename: # we could get a ' ' by urlunquoting return '[[%s:%s]]' % (wikiname, pagename) else: return '%s:%s' % (wikiname, pagename)
def format_comment(request, line): comment = line.comment action = line.action _ = request.getText if action.startswith('ATT'): filename = wikiutil.url_unquote(line.extra) if action == 'ATTNEW': comment = _("Upload of attachment '%(filename)s'.") % { 'filename': filename} elif action == 'ATTDEL': comment = _("Attachment '%(filename)s' deleted.") % { 'filename': filename} elif action == 'ATTDRW': comment = _("Drawing '%(filename)s' saved.") % { 'filename': filename} elif '/REVERT' in action: rev = int(line.extra) comment = (_("Revert to revision %(rev)d.") % {'rev': rev}) + " " + comment elif '/RENAME' in action: comment = (_("Renamed from '%(oldpagename)s'.") % {'oldpagename': line.extra}) + " " + comment return wikiutil.make_breakable(comment, _MAX_COMMENT_LENGTH)
def _interwiki_repl(self, word): """Handle InterWiki links.""" wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_wiki( self.request, word) if wikitag_bad: return word else: wikiname, pagename = word.split(':', 1) pagename = wikiutil.url_unquote( pagename) # maybe someone has used %20 for blanks in pagename camelcase = wikiutil.isStrictWikiname(pagename) if wikiname in ('Self', self.request.cfg.interwikiname): pagename = self._replace(('PAGE', pagename)) if camelcase: return '%s' % pagename # optimize special case else: return '[[%s]]' % pagename # optimize special case else: if ' ' in pagename: # we could get a ' ' by urlunquoting return '[[%s:%s]]' % (wikiname, pagename) else: return '%s:%s' % (wikiname, pagename)
def attachment(self, url_and_text): """ This gets called on attachment URLs. """ if len(url_and_text) == 1: url = url_and_text[0] text = '' else: url, text = url_and_text text = '|' + text scheme, fname = url.split(":", 1) #scheme, fname, text = wikiutil.split_wiki(target_and_text) pagename, fname = AttachFile.absoluteName(fname, self.pagename) from_this_page = pagename == self.pagename fname = self._replace(('FILE', pagename, fname)) fname = wikiutil.url_unquote(fname) fname = self._replace(('FILE', pagename, fname)) pagename = self._replace(('PAGE', pagename)) if from_this_page: name = fname else: name = "%s/%s" % (pagename, fname) if scheme == 'drawing': return "{{drawing:%s%s}}" % (name, text) # check for image URL, and possibly return IMG tag # (images are always inlined, just like for other URLs) if wikiutil.isPicture(name): return "{{attachment:%s%s}}" % (name, text) # inline the attachment if scheme == 'inline': return '{{attachment:%s%s}}' % (name, text) else: # 'attachment' return '[[attachment:%s%s]]' % (name, text)
def fixURI(self, env): """ Fix problems with script_name and path_info using PythonOption directive to rewrite URI. This is needed when using Apache 1 or other server which does not support adding custom headers per request. With mod_python we can use the PythonOption directive: <Location /url/to/mywiki/> PythonOption X-Moin-Location /url/to/mywiki/ </location> Note that *neither* script_name *nor* path_info can be trusted when Moin is invoked as a mod_python handler with apache1, so we must build both using request_uri and the provided PythonOption. """ # Be compatible with release 1.3.5 "Location" option # TODO: Remove in later release, we should have one option only. old_location = "Location" options_table = self.mpyreq.get_options() if not hasattr(options_table, "get"): options = dict(options_table) else: options = options_table location = options.get(self.moin_location) or options.get(old_location) if location: env[self.moin_location] = location # Try to recreate script_name and path_info from request_uri. import urlparse scriptAndPath = urlparse.urlparse(self.request_uri)[2] self.script_name = location.rstrip("/") path = scriptAndPath.replace(self.script_name, "", 1) self.path_info = wikiutil.url_unquote(path, want_unicode=False) RequestBase.fixURI(self, env)
def history(page, pagename, request): # show history as default _ = request.getText default_count, limit_max_count = request.cfg.history_count[0:2] paging = request.cfg.history_paging try: max_count = int(request.values.get('max_count', default_count)) except ValueError: max_count = default_count max_count = max(1, min(max_count, limit_max_count)) # read in the complete log of this page log = editlog.EditLog(request, rootpagename=pagename) offset = 0 paging_info_html = "" paging_nav_html = "" count_select_html = "" f = request.formatter if paging: log_size = log.lines() try: offset = int(request.values.get('offset', 0)) except ValueError: offset = 0 offset = max(min(offset, log_size - 1), 0) paging_info_html += f.paragraph(1, css_class="searchstats info-paging-info") + _("Showing page edit history entries from '''%(start_offset)d''' to '''%(end_offset)d''' out of '''%(total_count)d''' entries total.", wiki=True) % { 'start_offset': log_size - min(log_size, offset + max_count) + 1, 'end_offset': log_size - offset, 'total_count': log_size, } + f.paragraph(0) # generating offset navigating links if max_count < log_size or offset != 0: offset_links = [] cur_offset = max_count near_count = 5 # request.cfg.pagination_size min_offset = max(0, (offset + max_count - 1) / max_count - near_count) max_offset = min((log_size - 1) / max_count, offset / max_count + near_count) offset_added = False def add_offset_link(offset, caption=None): offset_links.append(f.table_cell(1, css_class="info-offset-item") + page.link_to(request, on=1, querystr={ 'action': 'info', 'offset': str(offset), 'max_count': str(max_count), }, css_class="info-offset-nav-link", rel="nofollow") + f.text(caption or str(log_size - offset)) + page.link_to(request, on=0) + f.table_cell(0) ) # link to previous page - only if not at start if offset > 0: add_offset_link(((offset - 1) / max_count) * max_count, _("Newer")) # link to beggining of event log - if min_offset is not minimal if min_offset > 0: add_offset_link(0) # adding gap only if min_offset not explicitly following beginning if min_offset > 1: offset_links.append(f.table_cell(1, css_class="info-offset-gap") + f.text(u'\u2026') + f.table_cell(0)) # generating near pages links for cur_offset in range(min_offset, max_offset + 1): # note that current offset may be not multiple of max_count, # so we check whether we should add current offset marker like this if not offset_added and offset <= cur_offset * max_count: # current info history view offset offset_links.append(f.table_cell(1, css_class="info-offset-item info-cur-offset") + f.text(str(log_size - offset)) + f.table_cell(0)) offset_added = True # add link, if not at this offset if offset != cur_offset * max_count: add_offset_link(cur_offset * max_count) # link to the last page of event log if max_offset < (log_size - 1) / max_count: if max_offset < (log_size - 1) / max_count - 1: offset_links.append(f.table_cell(1, css_class="info-offset-gap") + f.text(u'\u2026') + f.table_cell(0)) add_offset_link(((log_size - 1) / max_count) * max_count) # special case - if offset is greater than max_offset * max_count if offset > max_offset * max_count: offset_links.append(f.table_cell(1, css_class="info-offset-item info-cur-offset") + f.text(str(log_size - offset)) + f.table_cell(0)) # link to next page if offset < (log_size - max_count): add_offset_link(((offset + max_count) / max_count) * max_count, _("Older")) # generating html paging_nav_html += "".join([ f.table(1, css_class="searchpages"), f.table_row(1), "".join(offset_links), f.table_row(0), f.table(0), ]) # generating max_count switcher # we do it only in case history_count has additional values if len(request.cfg.history_count) > 2: max_count_possibilities = list(set(request.cfg.history_count)) max_count_possibilities.sort() max_count_html = [] cur_count_added = False for count in max_count_possibilities: # max count value can be not in list of predefined values if max_count <= count and not cur_count_added: max_count_html.append("".join([ f.span(1, css_class="info-count-item info-cur-count"), f.text(str(max_count)), f.span(0), ])) cur_count_added = True # checking for limit_max_count to prevent showing unavailable options if max_count != count and count <= limit_max_count: max_count_html.append("".join([ f.span(1, css_class="info-count-item"), page.link_to(request, on=1, querystr={ 'action': 'info', 'offset': str(offset), 'max_count': str(count), }, css_class="info-count-link", rel="nofollow"), f.text(str(count)), page.link_to(request, on=0), f.span(0), ])) count_select_html += "".join([ f.span(1, css_class="info-count-selector"), f.text(" ("), f.text(_("%s items per page")) % (f.span(1, css_class="info-count-selector info-count-selector-divider") + f.text(" | ") + f.span(0)).join(max_count_html), f.text(")"), f.span(0), ]) # open log for this page from MoinMoin.util.dataset import TupleDataset, Column history = TupleDataset() history.columns = [ Column('rev', label='#', align='right'), Column('mtime', label=_('Date'), align='right'), Column('size', label=_('Size'), align='right'), Column('diff', label='<input type="submit" value="%s">' % (_("Diff"))), Column('editor', label=_('Editor'), hidden=not request.cfg.show_names), Column('comment', label=_('Comment')), Column('action', label=_('Action')), ] # generate history list def render_action(text, query, **kw): kw.update(dict(rel='nofollow')) return page.link_to(request, text, querystr=query, **kw) def render_file_action(text, pagename, filename, request, do): url = AttachFile.getAttachUrl(pagename, filename, request, do=do) if url: f = request.formatter link = f.url(1, url) + f.text(text) + f.url(0) return link may_write = request.user.may.write(pagename) may_delete = request.user.may.delete(pagename) count = 0 pgactioncount = 0 for line in log.reverse(): count += 1 if paging and count <= offset: continue rev = int(line.rev) actions = [] if line.action in ('SAVE', 'SAVENEW', 'SAVE/REVERT', 'SAVE/RENAME', ): size = page.size(rev=rev) actions.append(render_action(_('view'), {'action': 'recall', 'rev': '%d' % rev})) if pgactioncount == 0: rchecked = ' checked="checked"' lchecked = '' elif pgactioncount == 1: lchecked = ' checked="checked"' rchecked = '' else: lchecked = rchecked = '' diff = '<input type="radio" name="rev1" value="%d"%s><input type="radio" name="rev2" value="%d"%s>' % (rev, lchecked, rev, rchecked) if rev > 1: diff += render_action(' ' + _('to previous'), {'action': 'diff', 'rev1': rev-1, 'rev2': rev}) comment = line.comment if not comment: if '/REVERT' in line.action: comment = _("Revert to revision %(rev)d.") % {'rev': int(line.extra)} elif '/RENAME' in line.action: comment = _("Renamed from '%(oldpagename)s'.") % {'oldpagename': line.extra} pgactioncount += 1 else: # ATT* rev = '-' diff = '-' filename = wikiutil.url_unquote(line.extra) comment = "%s: %s %s" % (line.action, filename, line.comment) if AttachFile.exists(request, pagename, filename): size = AttachFile.size(request, pagename, filename) actions.append(render_file_action(_('view'), pagename, filename, request, do='view')) actions.append(render_file_action(_('get'), pagename, filename, request, do='get')) if may_delete: actions.append(render_file_action(_('del'), pagename, filename, request, do='del')) if may_write: actions.append(render_file_action(_('edit'), pagename, filename, request, do='modify')) else: size = 0 history.addRow(( rev, request.user.getFormattedDateTime(wikiutil.version2timestamp(line.ed_time_usecs)), str(size), diff, line.getEditor(request) or _("N/A"), wikiutil.escape(comment) or ' ', " ".join(a for a in actions if a), )) if (count >= max_count + offset) or (paging and count >= log_size): break # print version history from MoinMoin.widget.browser import DataBrowserWidget request.write(unicode(html.H2().append(_('Revision History')))) if not count: # there was no entry in logfile request.write(_('No log entries found.')) return history_table = DataBrowserWidget(request) history_table.setData(history) div = html.DIV(id="page-history") div.append(html.INPUT(type="hidden", name="action", value="diff")) div.append(history_table.render(method="GET")) form = html.FORM(method="GET", action="") if paging: form.append(f.div(1, css_class="info-paging-info") + paging_info_html + count_select_html + f.div(0)) form.append("".join([ f.div(1, css_class="info-paging-nav info-paging-nav-top"), paging_nav_html, f.div(0), ])) form.append(div) if paging: form.append("".join([ f.div(1, css_class="info-paging-nav info-paging-nav-bottom"), paging_nav_html, f.div(0) ])) request.write(unicode(form))
def history(page, pagename, request): # show history as default _ = request.getText # open log for this page from MoinMoin.logfile import editlog from MoinMoin.util.dataset import TupleDataset, Column history = TupleDataset() history.columns = [ Column('rev', label='#', align='right'), Column('mtime', label=_('Date'), align='right'), Column('size', label=_('Size'), align='right'), Column('diff', label='<input type="submit" value="%s">' % (_("Diff"))), Column('editor', label=_('Editor'), hidden=not request.cfg.show_names), Column('comment', label=_('Comment')), Column('action', label=_('Action')), ] # generate history list revisions = page.getRevList() versions = len(revisions) may_revert = request.user.may.revert(pagename) # read in the complete log of this page log = editlog.EditLog(request, rootpagename=pagename) count = 0 for line in log.reverse(): rev = int(line.rev) actions = "" if line.action in ['SAVE','SAVENEW','SAVE/REVERT',]: if count == 0: # latest page actions = '%s %s' % (actions, page.link_to(request, text=_('view'), querystr='')) actions = '%s %s' % (actions, page.link_to(request, text=_('raw'), querystr='action=raw')) actions = '%s %s' % (actions, page.link_to(request, text=_('print'), querystr='action=print')) else: actions = '%s %s' % (actions, page.link_to(request, text=_('view'), querystr='action=recall&rev=%d' % rev)) actions = '%s %s' % (actions, page.link_to(request, text=_('raw'), querystr='action=raw&rev=%d' % rev)) actions = '%s %s' % (actions, page.link_to(request, text=_('print'), querystr='action=print&rev=%d' % rev)) if may_revert: actions = '%s %s' % (actions, page.link_to(request, text=_('revert'), querystr='action=revert&rev=%d' % (rev,))) if count == 0: rchecked=' checked="checked"' lchecked = '' elif count == 1: lchecked=' checked="checked"' rchecked = '' else: lchecked = rchecked = '' diff = '<input type="radio" name="rev1" value="%d"%s><input type="radio" name="rev2" value="%d"%s>' % (rev,lchecked,rev,rchecked) comment = line.comment if not comment and line.action.find('/REVERT') != -1: comment = _("Revert to revision %(rev)d.") % {'rev': int(line.extra)} size = page.size(rev=rev) else: # ATT* rev = '-' diff = '-' filename = wikiutil.url_unquote(line.extra) comment = "%s: %s %s" % (line.action, filename, line.comment) size = 0 if line.action != 'ATTDEL': from MoinMoin.action import AttachFile page_dir = AttachFile.getAttachDir(request, pagename) filepath = os.path.join(page_dir, filename) try: # FIXME, wrong path on non-std names size = os.path.getsize(filepath) except: pass if line.action == 'ATTNEW': actions = '%s %s' % (actions, page.link_to(request, text=_('view'), querystr='action=AttachFile&do=view&target=%s' % filename)) elif line.action == 'ATTDRW': actions = '%s %s' % (actions, page.link_to(request, text=_('edit'), querystr='action=AttachFile&drawing=%s' % filename.replace(".draw",""))) actions = '%s %s' % (actions, page.link_to(request, text=_('get'), querystr='action=AttachFile&do=get&target=%s' % filename)) actions = '%s %s' % (actions, page.link_to(request, text=_('del'), querystr='action=AttachFile&do=del&target=%s' % filename)) # XXX use?: wikiutil.escape(filename) history.addRow(( rev, request.user.getFormattedDateTime(wikiutil.version2timestamp(line.ed_time_usecs)), str(size), diff, line.getEditor(request) or _("N/A"), wikiutil.escape(comment) or ' ', actions, )) count += 1 if count >= 100: break # print version history from MoinMoin.widget.browser import DataBrowserWidget request.write('<h2>%s</h2>\n' % _('Revision History')) if not count: # there was no entry in logfile request.write(_('No log entries found.')) return # TODO: this form activates revert, which should use POST, but # other actions should use get. Maybe we should put the revert # into the page view itself, and not in this form. request.write('<form method="GET" action="">\n') request.write('<div id="page-history">\n') request.write('<input type="hidden" name="action" value="diff">\n') history_table = DataBrowserWidget(request) history_table.setData(history) history_table.render() request.write('</div>\n') request.write('</form>\n')
def execute(pagename, request): """ Send recent changes as an RSS document """ if not wikixml.ok: request.mimetype = 'text/plain' request.write("rss_rc action is not supported because of missing pyxml module.") return if request.isSpiderAgent: # reduce bot cpu usage return '' cfg = request.cfg _ = request.getText # get params def_max_items = max_items = cfg.rss_items_default items_limit = cfg.rss_items_limit unique = cfg.rss_unique diffs = cfg.rss_diffs ddiffs = cfg.rss_ddiffs max_lines = cfg.rss_lines_default lines_limit = cfg.rss_lines_limit show_att = cfg.rss_show_attachment_entries page_pattern = cfg.rss_page_filter_pattern try: max_items = min(int(request.values.get('items', max_items)), items_limit) except ValueError: pass try: unique = int(request.values.get('unique', unique)) except ValueError: pass try: diffs = int(request.values.get('diffs', diffs)) except ValueError: pass ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003 try: ddiffs = int(request.values.get('ddiffs', ddiffs)) except ValueError: pass try: max_lines = min(int(request.values.get('lines', max_lines)), lines_limit) except ValueError: pass try: show_att = int(request.values.get('show_att', show_att)) except ValueError: pass try: page_pattern = request.values.get('page', page_pattern) except ValueError: pass # if we are just interested in a specific page, using the local edit-log # of that page is much faster than the global one - esp. if the page was # NOT recently changed and the global edit-log is rather big. kw = dict(rootpagename=page_pattern) if is_single_page_match(page_pattern) else {} log = editlog.EditLog(request, **kw) logdata = [] counter = 0 pages = {} lastmod = 0 for line in log.reverse(): if not request.user.may.read(line.pagename): continue if ((not show_att and not line.action.startswith('SAVE')) or ((line.pagename in pages) and unique) or not match_page(line.pagename, page_pattern)): continue line.editor = line.getInterwikiEditorData(request) line.time = timefuncs.tmtuple(wikiutil.version2timestamp(line.ed_time_usecs)) # UTC logdata.append(line) pages[line.pagename] = None if not lastmod: lastmod = wikiutil.version2timestamp(line.ed_time_usecs) counter += 1 if counter >= max_items: break del log timestamp = timefuncs.formathttpdate(lastmod) etag = "%d-%d-%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique, max_lines, show_att) # for 304, we look at if-modified-since and if-none-match headers, # one of them must match and the other is either not there or must match. if request.if_modified_since == timestamp: if request.if_none_match: if request.if_none_match == etag: request.status_code = 304 else: request.status_code = 304 elif request.if_none_match == etag: if request.if_modified_since: if request.if_modified_since == timestamp: request.status_code = 304 else: request.status_code = 304 else: # generate an Expires header, using whatever setting the admin # defined for suggested cache lifetime of the RecentChanges RSS doc expires = time.time() + cfg.rss_cache request.mimetype = 'application/rss+xml' request.expires = expires request.last_modified = lastmod request.headers['Etag'] = etag # send the generated XML document baseurl = request.url_root logo = re.search(r'src="([^"]*)"', cfg.logo_string) if logo: logo = request.getQualifiedURL(logo.group(1)) # prepare output out = StringIO.StringIO() handler = RssGenerator(out) # start SAX stream handler.startDocument() handler._write( u'<!--\n' u' Add an "items=nnn" URL parameter to get more than the \n' u' default %(def_max_items)d items. You cannot get more than \n' u' %(items_limit)d items though.\n' u' \n' u' Add "unique=1" to get a list of changes where page names are unique,\n' u' i.e. where only the latest change of each page is reflected.\n' u' \n' u' Add "diffs=1" to add change diffs to the description of each items.\n' u' \n' u' Add "ddiffs=1" to link directly to the diff (good for FeedReader).\n' u' \n' u' Add "lines=nnn" to change maximum number of diff/body lines \n' u' to show. Cannot be more than %(lines_limit)d.\n' u' \n' u' Add "show_att=1" to show items related to attachments.\n' u' \n' u' Add "page=pattern" to show feed only for specific pages.\n' u' Pattern can be empty (it would match to all pages), \n' u' can start with circumflex (it would be interpreted as \n' u' regular expression in this case), end with slash (for \n' u' getting feed for page tree) or point to specific page (if \n' u' none of the above can be applied).\n' u' \n' u' Current settings: items=%(max_items)i, unique=%(unique)i, \n' u' diffs=%(diffs)i, ddiffs=%(ddiffs)i, lines=%(max_lines)i, \n' u' show_att=%(show_att)i\n' u'-->\n' % locals() ) # emit channel description handler.startNode('channel', { (handler.xmlns['rdf'], 'about'): request.url_root, }) handler.simpleNode('title', cfg.sitename) page = Page(request, pagename) handler.simpleNode('link', full_url(request, page)) handler.simpleNode('description', u'RecentChanges at %s' % cfg.sitename) if logo: handler.simpleNode('image', None, { (handler.xmlns['rdf'], 'resource'): logo, }) if cfg.interwikiname: handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname) handler.startNode('items') handler.startNode(('rdf', 'Seq')) for item in logdata: anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6] page = Page(request, item.pagename) link = full_url(request, page, anchor=anchor) handler.simpleNode(('rdf', 'li'), None, attr={(handler.xmlns['rdf'], 'resource'): link, }) handler.endNode(('rdf', 'Seq')) handler.endNode('items') handler.endNode('channel') # emit logo data if logo: handler.startNode('image', attr={ (handler.xmlns['rdf'], 'about'): logo, }) handler.simpleNode('title', cfg.sitename) handler.simpleNode('link', baseurl) handler.simpleNode('url', logo) handler.endNode('image') # Mapping { oldname: curname } for maintaining page renames pagename_map = {} # emit items for item in logdata: if item.pagename in pagename_map: cur_pagename = pagename_map[item.pagename] else: cur_pagename = item.pagename page = Page(request, cur_pagename) action = item.action comment = item.comment anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6] rdflink = full_url(request, page, anchor=anchor) handler.startNode('item', attr={(handler.xmlns['rdf'], 'about'): rdflink, }) # general attributes handler.simpleNode('title', item.pagename) handler.simpleNode(('dc', 'date'), timefuncs.W3CDate(item.time)) show_diff = diffs if action.startswith('ATT'): # Attachment show_diff = 0 filename = wikiutil.url_unquote(item.extra) att_exists = AttachFile.exists(request, cur_pagename, filename) if action == 'ATTNEW': # Once attachment deleted this link becomes invalid but we # preserve it to prevent appearance of new RSS entries in # RSS readers. if ddiffs: handler.simpleNode('link', attach_url(request, cur_pagename, filename, do='view')) comment = _(u"Upload of attachment '%(filename)s'.") % { 'filename': filename} elif action == 'ATTDEL': if ddiffs: handler.simpleNode('link', full_url(request, page, querystr={'action': 'AttachFile'})) comment = _(u"Attachment '%(filename)s' deleted.") % { 'filename': filename} elif action == 'ATTDRW': if ddiffs: handler.simpleNode('link', attach_url(request, cur_pagename, filename, do='view')) comment = _(u"Drawing '%(filename)s' saved.") % { 'filename': filename} elif action.startswith('SAVE'): if action == 'SAVE/REVERT': to_rev = int(item.extra) comment = (_(u"Revert to revision %(rev)d.") % { 'rev': to_rev}) + "<br />" \ + _("Comment:") + " " + comment elif action == 'SAVE/RENAME': show_diff = 0 comment = (_(u"Renamed from '%(oldpagename)s'.") % { 'oldpagename': item.extra}) + "<br />" \ + _("Comment:") + " " + comment if item.pagename in pagename_map: newpage = pagename_map[item.pagename] del pagename_map[item.pagename] pagename_map[item.extra] = newpage else: pagename_map[item.extra] = item.pagename elif action == 'SAVENEW': comment = _(u"New page:\n") + comment item_rev = int(item.rev) # If we use diffs/ddiffs, we should calculate proper links and # content if ddiffs: # first revision can't have older revisions to diff with if item_rev == 1: handler.simpleNode('link', full_url(request, page, querystr={'action': 'recall', 'rev': str(item_rev)})) else: handler.simpleNode('link', full_url(request, page, querystr={'action': 'diff', 'rev1': str(item_rev), 'rev2': str(item_rev - 1)})) if show_diff: if item_rev == 1: lines = Page(request, cur_pagename, rev=item_rev).getlines() else: lines = wikiutil.pagediff(request, cur_pagename, item_rev - 1, cur_pagename, item_rev, ignorews=1) if len(lines) > max_lines: lines = lines[:max_lines] + ['...\n'] lines = '\n'.join(lines) lines = wikiutil.escape(lines) comment = u'%s\n<pre>\n%s\n</pre>\n' % (comment, lines) if not ddiffs: handler.simpleNode('link', full_url(request, page)) if comment: handler.simpleNode('description', comment) # contributor if cfg.show_names: edattr = {} if cfg.show_hosts: edattr[(handler.xmlns['wiki'], 'host')] = item.hostname if item.editor[0] == 'interwiki': edname = "%s:%s" % item.editor[1] ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname) else: # 'ip' edname = item.editor[1] ##edattr[(None, 'link')] = link + "?action=info" # this edattr stuff, esp. None as first tuple element breaks things (tracebacks) # if you know how to do this right, please send us a patch handler.startNode(('dc', 'contributor')) handler.startNode(('rdf', 'Description'), attr=edattr) handler.simpleNode(('rdf', 'value'), edname) handler.endNode(('rdf', 'Description')) handler.endNode(('dc', 'contributor')) # wiki extensions handler.simpleNode(('wiki', 'version'), "%i" % (item.ed_time_usecs)) handler.simpleNode(('wiki', 'status'), ('deleted', 'updated')[page.exists()]) handler.simpleNode(('wiki', 'diff'), full_url(request, page, querystr={'action': 'diff'})) handler.simpleNode(('wiki', 'history'), full_url(request, page, querystr={'action': 'info'})) # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor ) # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA ) handler.endNode('item') # end SAX stream handler.endDocument() request.write(out.getvalue())
def execute(pagename, request): """ Send recent changes as an RSS document """ if not wikixml.ok: request.mimetype = "text/plain" request.write("rss_rc action is not supported because of missing pyxml module.") return cfg = request.cfg _ = request.getText # get params def_max_items = max_items = cfg.rss_items_default items_limit = cfg.rss_items_limit unique = cfg.rss_unique diffs = cfg.rss_diffs ddiffs = cfg.rss_ddiffs max_lines = cfg.rss_lines_default lines_limit = cfg.rss_lines_limit show_att = cfg.rss_show_attachment_entries page_pattern = cfg.rss_page_filter_pattern try: max_items = min(int(request.values.get("items", max_items)), items_limit) except ValueError: pass try: unique = int(request.values.get("unique", unique)) except ValueError: pass try: diffs = int(request.values.get("diffs", diffs)) except ValueError: pass ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003 try: ddiffs = int(request.values.get("ddiffs", ddiffs)) except ValueError: pass try: max_lines = min(int(request.values.get("lines", max_lines)), lines_limit) except ValueError: pass try: show_att = int(request.values.get("show_att", show_att)) except ValueError: pass try: page_pattern = request.values.get("page", page_pattern) except ValueError: pass # get data log = editlog.EditLog(request) logdata = [] counter = 0 pages = {} lastmod = 0 for line in log.reverse(): if not request.user.may.read(line.pagename): continue if ( (not show_att and not line.action.startswith("SAVE")) or ((line.pagename in pages) and unique) or not match_page(line.pagename, page_pattern) ): continue line.editor = line.getInterwikiEditorData(request) line.time = timefuncs.tmtuple(wikiutil.version2timestamp(line.ed_time_usecs)) # UTC logdata.append(line) pages[line.pagename] = None if not lastmod: lastmod = wikiutil.version2timestamp(line.ed_time_usecs) counter += 1 if counter >= max_items: break del log timestamp = timefuncs.formathttpdate(lastmod) etag = "%d-%d-%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique, max_lines, show_att) # for 304, we look at if-modified-since and if-none-match headers, # one of them must match and the other is either not there or must match. if request.if_modified_since == timestamp: if request.if_none_match: if request.if_none_match == etag: request.status_code = 304 else: request.status_code = 304 elif request.if_none_match == etag: if request.if_modified_since: if request.if_modified_since == timestamp: request.status_code = 304 else: request.status_code = 304 else: # generate an Expires header, using whatever setting the admin # defined for suggested cache lifetime of the RecentChanges RSS doc expires = time.time() + cfg.rss_cache request.mimetype = "application/rss+xml" request.expires = expires request.last_modified = lastmod request.headers["Etag"] = etag # send the generated XML document baseurl = request.url_root logo = re.search(r'src="([^"]*)"', cfg.logo_string) if logo: logo = request.getQualifiedURL(logo.group(1)) # prepare output out = StringIO.StringIO() handler = RssGenerator(out) # start SAX stream handler.startDocument() handler._out.write( unicode( "<!--\n" ' Add an "items=nnn" URL parameter to get more than the \n' " default %(def_max_items)d items. You cannot get more than \n" " %(items_limit)d items though.\n" " \n" ' Add "unique=1" to get a list of changes where page names are unique,\n' " i.e. where only the latest change of each page is reflected.\n" " \n" ' Add "diffs=1" to add change diffs to the description of each items.\n' " \n" ' Add "ddiffs=1" to link directly to the diff (good for FeedReader).\n' " \n" ' Add "lines=nnn" to change maximum number of diff/body lines \n' " to show. Cannot be more than %(lines_limit)d.\n" " \n" ' Add "show_att=1" to show items related to attachments.\n' " \n" ' Add "page=pattern" to show feed only for specific pages.\n' " Pattern can be empty (it would match to all pages), \n" " can start with circumflex (it would be interpreted as \n" " regular expression in this case), end with slash (for \n" " getting feed for page tree) or point to specific page (if \n" " none of the above can be applied).\n" " \n" " Current settings: items=%(max_items)i, unique=%(unique)i, \n" " diffs=%(diffs)i, ddiffs=%(ddiffs)i, lines=%(max_lines)i, \n" " show_att=%(show_att)i\n" "-->\n" % locals() ).encode(config.charset) ) # emit channel description handler.startNode("channel", {(handler.xmlns["rdf"], "about"): request.url_root}) handler.simpleNode("title", cfg.sitename) page = Page(request, pagename) handler.simpleNode("link", full_url(request, page)) handler.simpleNode("description", u"RecentChanges at %s" % cfg.sitename) if logo: handler.simpleNode("image", None, {(handler.xmlns["rdf"], "resource"): logo}) if cfg.interwikiname: handler.simpleNode(("wiki", "interwiki"), cfg.interwikiname) handler.startNode("items") handler.startNode(("rdf", "Seq")) for item in logdata: anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6] page = Page(request, item.pagename) link = full_url(request, page, anchor=anchor) handler.simpleNode(("rdf", "li"), None, attr={(handler.xmlns["rdf"], "resource"): link}) handler.endNode(("rdf", "Seq")) handler.endNode("items") handler.endNode("channel") # emit logo data if logo: handler.startNode("image", attr={(handler.xmlns["rdf"], "about"): logo}) handler.simpleNode("title", cfg.sitename) handler.simpleNode("link", baseurl) handler.simpleNode("url", logo) handler.endNode("image") # Mapping { oldname: curname } for maintaining page renames pagename_map = {} # emit items for item in logdata: if item.pagename in pagename_map: cur_pagename = pagename_map[item.pagename] else: cur_pagename = item.pagename page = Page(request, cur_pagename) action = item.action comment = item.comment anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6] rdflink = full_url(request, page, anchor=anchor) handler.startNode("item", attr={(handler.xmlns["rdf"], "about"): rdflink}) # general attributes handler.simpleNode("title", item.pagename) handler.simpleNode(("dc", "date"), timefuncs.W3CDate(item.time)) show_diff = diffs if action.startswith("ATT"): # Attachment show_diff = 0 filename = wikiutil.url_unquote(item.extra) att_exists = AttachFile.exists(request, cur_pagename, filename) if action == "ATTNEW": # Once attachment deleted this link becomes invalid but we # preserve it to prevent appearance of new RSS entries in # RSS readers. if ddiffs: handler.simpleNode("link", attach_url(request, cur_pagename, filename, do="view")) comment = _(u"Upload of attachment '%(filename)s'.") % {"filename": filename} elif action == "ATTDEL": if ddiffs: handler.simpleNode("link", full_url(request, page, querystr={"action": "AttachFile"})) comment = _(u"Attachment '%(filename)s' deleted.") % {"filename": filename} elif action == "ATTDRW": if ddiffs: handler.simpleNode("link", attach_url(request, cur_pagename, filename, do="view")) comment = _(u"Drawing '%(filename)s' saved.") % {"filename": filename} elif action.startswith("SAVE"): if action == "SAVE/REVERT": to_rev = int(item.extra) comment = ( (_(u"Revert to revision %(rev)d.") % {"rev": to_rev}) + "<br />" + _("Comment:") + " " + comment ) elif action == "SAVE/RENAME": show_diff = 0 comment = ( (_(u"Renamed from '%(oldpagename)s'.") % {"oldpagename": item.extra}) + "<br />" + _("Comment:") + " " + comment ) if item.pagename in pagename_map: newpage = pagename_map[item.pagename] del pagename_map[item.pagename] pagename_map[item.extra] = newpage else: pagename_map[item.extra] = item.pagename elif action == "SAVENEW": comment = _(u"New page:\n") + comment item_rev = int(item.rev) # If we use diffs/ddiffs, we should calculate proper links and # content if ddiffs: # first revision can't have older revisions to diff with if item_rev == 1: handler.simpleNode( "link", full_url(request, page, querystr={"action": "recall", "rev": str(item_rev)}) ) else: handler.simpleNode( "link", full_url( request, page, querystr={"action": "diff", "rev1": str(item_rev), "rev2": str(item_rev - 1)}, ), ) if show_diff: if item_rev == 1: lines = Page(request, cur_pagename, rev=item_rev).getlines() else: lines = wikiutil.pagediff( request, cur_pagename, item_rev - 1, cur_pagename, item_rev, ignorews=1 ) if len(lines) > max_lines: lines = lines[:max_lines] + ["...\n"] lines = "\n".join(lines) lines = wikiutil.escape(lines) comment = u"%s\n<pre>\n%s\n</pre>\n" % (comment, lines) if not ddiffs: handler.simpleNode("link", full_url(request, page)) if comment: handler.simpleNode("description", comment) # contributor if cfg.show_names: edattr = {} if cfg.show_hosts: edattr[(handler.xmlns["wiki"], "host")] = item.hostname if item.editor[0] == "interwiki": edname = "%s:%s" % item.editor[1] ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname) else: # 'ip' edname = item.editor[1] ##edattr[(None, 'link')] = link + "?action=info" # this edattr stuff, esp. None as first tuple element breaks things (tracebacks) # if you know how to do this right, please send us a patch handler.startNode(("dc", "contributor")) handler.startNode(("rdf", "Description"), attr=edattr) handler.simpleNode(("rdf", "value"), edname) handler.endNode(("rdf", "Description")) handler.endNode(("dc", "contributor")) # wiki extensions handler.simpleNode(("wiki", "version"), "%i" % (item.ed_time_usecs)) handler.simpleNode(("wiki", "status"), ("deleted", "updated")[page.exists()]) handler.simpleNode(("wiki", "diff"), full_url(request, page, querystr={"action": "diff"})) handler.simpleNode(("wiki", "history"), full_url(request, page, querystr={"action": "info"})) # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor ) # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA ) handler.endNode("item") # end SAX stream handler.endDocument() request.write(out.getvalue())
def execute(pagename, request): """ Send recent changes as an RSS document """ if not wikixml.ok: request.mimetype = 'text/plain' request.write( "rss_rc action is not supported because of missing pyxml module.") return if request.isSpiderAgent: # reduce bot cpu usage return '' cfg = request.cfg _ = request.getText # get params def_max_items = max_items = cfg.rss_items_default items_limit = cfg.rss_items_limit unique = cfg.rss_unique diffs = cfg.rss_diffs ddiffs = cfg.rss_ddiffs max_lines = cfg.rss_lines_default lines_limit = cfg.rss_lines_limit show_att = cfg.rss_show_attachment_entries page_pattern = cfg.rss_page_filter_pattern try: max_items = min(int(request.values.get('items', max_items)), items_limit) except ValueError: pass try: unique = int(request.values.get('unique', unique)) except ValueError: pass try: diffs = int(request.values.get('diffs', diffs)) except ValueError: pass ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003 try: ddiffs = int(request.values.get('ddiffs', ddiffs)) except ValueError: pass try: max_lines = min(int(request.values.get('lines', max_lines)), lines_limit) except ValueError: pass try: show_att = int(request.values.get('show_att', show_att)) except ValueError: pass try: page_pattern = request.values.get('page', page_pattern) except ValueError: pass # if we are just interested in a specific page, using the local edit-log # of that page is much faster than the global one - esp. if the page was # NOT recently changed and the global edit-log is rather big. kw = dict(rootpagename=page_pattern) if is_single_page_match( page_pattern) else {} log = editlog.EditLog(request, **kw) logdata = [] counter = 0 pages = {} lastmod = 0 for line in log.reverse(): if not request.user.may.read(line.pagename): continue if ((not show_att and not line.action.startswith('SAVE')) or ((line.pagename in pages) and unique) or not match_page(line.pagename, page_pattern)): continue line.editor = line.getInterwikiEditorData(request) line.time = timefuncs.tmtuple( wikiutil.version2timestamp(line.ed_time_usecs)) # UTC logdata.append(line) pages[line.pagename] = None if not lastmod: lastmod = wikiutil.version2timestamp(line.ed_time_usecs) counter += 1 if counter >= max_items: break del log timestamp = timefuncs.formathttpdate(lastmod) etag = "%d-%d-%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique, max_lines, show_att) # for 304, we look at if-modified-since and if-none-match headers, # one of them must match and the other is either not there or must match. if request.if_modified_since == timestamp: if request.if_none_match: if request.if_none_match == etag: request.status_code = 304 else: request.status_code = 304 elif request.if_none_match == etag: if request.if_modified_since: if request.if_modified_since == timestamp: request.status_code = 304 else: request.status_code = 304 else: # generate an Expires header, using whatever setting the admin # defined for suggested cache lifetime of the RecentChanges RSS doc expires = time.time() + cfg.rss_cache request.mimetype = 'application/rss+xml' request.expires = expires request.last_modified = lastmod request.headers['Etag'] = etag # send the generated XML document baseurl = request.url_root logo = re.search(r'src="([^"]*)"', cfg.logo_string) if logo: logo = request.getQualifiedURL(logo.group(1)) # prepare output out = StringIO.StringIO() handler = RssGenerator(out) # start SAX stream handler.startDocument() handler.write( u'<!--\n' u' Add an "items=nnn" URL parameter to get more than the \n' u' default %(def_max_items)d items. You cannot get more than \n' u' %(items_limit)d items though.\n' u' \n' u' Add "unique=1" to get a list of changes where page names are unique,\n' u' i.e. where only the latest change of each page is reflected.\n' u' \n' u' Add "diffs=1" to add change diffs to the description of each items.\n' u' \n' u' Add "ddiffs=1" to link directly to the diff (good for FeedReader).\n' u' \n' u' Add "lines=nnn" to change maximum number of diff/body lines \n' u' to show. Cannot be more than %(lines_limit)d.\n' u' \n' u' Add "show_att=1" to show items related to attachments.\n' u' \n' u' Add "page=pattern" to show feed only for specific pages.\n' u' Pattern can be empty (it would match to all pages), \n' u' can start with circumflex (it would be interpreted as \n' u' regular expression in this case), end with slash (for \n' u' getting feed for page tree) or point to specific page (if \n' u' none of the above can be applied).\n' u' \n' u' Current settings: items=%(max_items)i, unique=%(unique)i, \n' u' diffs=%(diffs)i, ddiffs=%(ddiffs)i, lines=%(max_lines)i, \n' u' show_att=%(show_att)i\n' u'-->\n' % locals()) # emit channel description handler.startNode('channel', { (handler.xmlns['rdf'], 'about'): request.url_root, }) handler.simpleNode('title', cfg.sitename) page = Page(request, pagename) handler.simpleNode('link', full_url(request, page)) handler.simpleNode('description', u'RecentChanges at %s' % cfg.sitename) if logo: handler.simpleNode('image', None, { (handler.xmlns['rdf'], 'resource'): logo, }) if cfg.interwikiname: handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname) handler.startNode('items') handler.startNode(('rdf', 'Seq')) for item in logdata: anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6] page = Page(request, item.pagename) link = full_url(request, page, anchor=anchor) handler.simpleNode(('rdf', 'li'), None, attr={ (handler.xmlns['rdf'], 'resource'): link, }) handler.endNode(('rdf', 'Seq')) handler.endNode('items') handler.endNode('channel') # emit logo data if logo: handler.startNode('image', attr={ (handler.xmlns['rdf'], 'about'): logo, }) handler.simpleNode('title', cfg.sitename) handler.simpleNode('link', baseurl) handler.simpleNode('url', logo) handler.endNode('image') # Mapping { oldname: curname } for maintaining page renames pagename_map = {} # emit items for item in logdata: if item.pagename in pagename_map: cur_pagename = pagename_map[item.pagename] else: cur_pagename = item.pagename page = Page(request, cur_pagename) action = item.action comment = item.comment anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6] rdflink = full_url(request, page, anchor=anchor) handler.startNode('item', attr={ (handler.xmlns['rdf'], 'about'): rdflink, }) # general attributes handler.simpleNode('title', item.pagename) handler.simpleNode(('dc', 'date'), timefuncs.W3CDate(item.time)) show_diff = diffs if action.startswith('ATT'): # Attachment show_diff = 0 filename = wikiutil.url_unquote(item.extra) att_exists = AttachFile.exists(request, cur_pagename, filename) if action == 'ATTNEW': # Once attachment deleted this link becomes invalid but we # preserve it to prevent appearance of new RSS entries in # RSS readers. if ddiffs: handler.simpleNode( 'link', attach_url(request, cur_pagename, filename, do='view')) comment = _(u"Upload of attachment '%(filename)s'.") % { 'filename': filename } elif action == 'ATTDEL': if ddiffs: handler.simpleNode( 'link', full_url(request, page, querystr={'action': 'AttachFile'})) comment = _(u"Attachment '%(filename)s' deleted.") % { 'filename': filename } elif action == 'ATTDRW': if ddiffs: handler.simpleNode( 'link', attach_url(request, cur_pagename, filename, do='view')) comment = _(u"Drawing '%(filename)s' saved.") % { 'filename': filename } elif action.startswith('SAVE'): if action == 'SAVE/REVERT': to_rev = int(item.extra) comment = (_(u"Revert to revision %(rev)d.") % { 'rev': to_rev}) + "<br />" \ + _("Comment:") + " " + comment elif action == 'SAVE/RENAME': show_diff = 0 comment = (_(u"Renamed from '%(oldpagename)s'.") % { 'oldpagename': item.extra}) + "<br />" \ + _("Comment:") + " " + comment if item.pagename in pagename_map: newpage = pagename_map[item.pagename] del pagename_map[item.pagename] pagename_map[item.extra] = newpage else: pagename_map[item.extra] = item.pagename elif action == 'SAVENEW': comment = _(u"New page:\n") + comment item_rev = int(item.rev) # If we use diffs/ddiffs, we should calculate proper links and # content if ddiffs: # first revision can't have older revisions to diff with if item_rev == 1: handler.simpleNode( 'link', full_url(request, page, querystr={ 'action': 'recall', 'rev': str(item_rev) })) else: handler.simpleNode( 'link', full_url(request, page, querystr={ 'action': 'diff', 'rev1': str(item_rev), 'rev2': str(item_rev - 1) })) if show_diff: if item_rev == 1: lines = Page(request, cur_pagename, rev=item_rev).getlines() else: lines = wikiutil.pagediff(request, cur_pagename, item_rev - 1, cur_pagename, item_rev, ignorews=1) if len(lines) > max_lines: lines = lines[:max_lines] + ['...\n'] lines = '\n'.join(lines) lines = wikiutil.escape(lines) comment = u'%s\n<pre>\n%s\n</pre>\n' % (comment, lines) if not ddiffs: handler.simpleNode('link', full_url(request, page)) if comment: handler.simpleNode('description', comment) # contributor if cfg.show_names: edattr = {} if cfg.show_hosts: edattr[(handler.xmlns['wiki'], 'host')] = item.hostname if item.editor[0] == 'interwiki': edname = "%s:%s" % item.editor[1] ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname) else: # 'ip' edname = item.editor[1] ##edattr[(None, 'link')] = link + "?action=info" # this edattr stuff, esp. None as first tuple element breaks things (tracebacks) # if you know how to do this right, please send us a patch handler.startNode(('dc', 'contributor')) handler.startNode(('rdf', 'Description'), attr=edattr) handler.simpleNode(('rdf', 'value'), edname) handler.endNode(('rdf', 'Description')) handler.endNode(('dc', 'contributor')) # wiki extensions handler.simpleNode(('wiki', 'version'), "%i" % (item.ed_time_usecs)) handler.simpleNode(('wiki', 'status'), ('deleted', 'updated')[page.exists()]) handler.simpleNode(('wiki', 'diff'), full_url(request, page, querystr={'action': 'diff'})) handler.simpleNode(('wiki', 'history'), full_url(request, page, querystr={'action': 'info'})) # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor ) # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA ) handler.endNode('item') # end SAX stream handler.endDocument() request.write(out.getvalue())
def history(page, pagename, request): # show history as default _ = request.getText default_count, limit_max_count = request.cfg.history_count try: max_count = int(request.form.get('max_count', [default_count])[0]) except: max_count = default_count max_count = min(max_count, limit_max_count) # open log for this page from MoinMoin.util.dataset import TupleDataset, Column history = TupleDataset() history.columns = [ Column('rev', label='#', align='right'), Column('mtime', label=_('Date'), align='right'), Column('size', label=_('Size'), align='right'), Column('diff', label='<input type="submit" value="%s">' % (_("Diff"))), Column('editor', label=_('Editor'), hidden=not request.cfg.show_names), Column('comment', label=_('Comment')), Column('action', label=_('Action')), ] # generate history list def render_action(text, query, **kw): kw.update(dict(rel='nofollow')) return page.link_to(request, text, querystr=query, **kw) # read in the complete log of this page log = editlog.EditLog(request, rootpagename=pagename) count = 0 pgactioncount = 0 for line in log.reverse(): rev = int(line.rev) actions = [] if line.action in ('SAVE', 'SAVENEW', 'SAVE/REVERT', 'SAVE/RENAME', ): size = page.size(rev=rev) actions.append(render_action(_('view'), {'action': 'recall', 'rev': '%d' % rev})) if pgactioncount == 0: rchecked = ' checked="checked"' lchecked = '' elif pgactioncount == 1: lchecked = ' checked="checked"' rchecked = '' else: lchecked = rchecked = '' diff = '<input type="radio" name="rev1" value="%d"%s><input type="radio" name="rev2" value="%d"%s>' % (rev, lchecked, rev, rchecked) if rev > 1: diff += render_action(' ' + _('to previous'), {'action': 'diff', 'rev1': rev-1, 'rev2': rev}) comment = line.comment if not comment: if '/REVERT' in line.action: comment = _("Revert to revision %(rev)d.") % {'rev': int(line.extra)} elif '/RENAME' in line.action: comment = _("Renamed from '%(oldpagename)s'.") % {'oldpagename': line.extra} pgactioncount += 1 else: # ATT* rev = '-' diff = '-' filename = wikiutil.url_unquote(line.extra) comment = "%s: %s %s" % (line.action, filename, line.comment) size = 0 if line.action != 'ATTDEL': from MoinMoin.action import AttachFile if AttachFile.exists(request, pagename, filename): size = AttachFile.size(request, pagename, filename) if line.action == 'ATTNEW': actions.append(render_action(_('view'), {'action': 'AttachFile', 'do': 'view', 'target': '%s' % filename})) elif line.action == 'ATTDRW': actions.append(render_action(_('edit'), {'action': 'AttachFile', 'drawing': '%s' % filename.replace(".draw", "")})) actions.append(render_action(_('get'), {'action': 'AttachFile', 'do': 'get', 'target': '%s' % filename})) if request.user.may.delete(pagename): actions.append(render_action(_('del'), {'action': 'AttachFile', 'do': 'del', 'target': '%s' % filename})) history.addRow(( rev, request.user.getFormattedDateTime(wikiutil.version2timestamp(line.ed_time_usecs)), str(size), diff, line.getEditor(request) or _("N/A"), wikiutil.escape(comment) or ' ', " ".join(actions), )) count += 1 if count >= max_count: break # print version history from MoinMoin.widget.browser import DataBrowserWidget request.write(unicode(html.H2().append(_('Revision History')))) if not count: # there was no entry in logfile request.write(_('No log entries found.')) return history_table = DataBrowserWidget(request) history_table.setData(history) div = html.DIV(id="page-history") div.append(html.INPUT(type="hidden", name="action", value="diff")) div.append(history_table.render(method="GET")) form = html.FORM(method="GET", action="") form.append(div) request.write(unicode(form))
def username(self, d): request = self.request _ = request.getText urls = [] # Add username/homepage link for registered users. We don't care # if it exists, the user can create it. if request.user.valid and request.user.name: interwiki = wikiutil.getInterwikiHomePage(request) linkpage = '#' if interwiki[0] == 'Self': wikitail = wikiutil.url_unquote(interwiki[1]) linkpage = request.script_root + '/' + wikitail name = wikiutil.escape(request.user.name) urls.append('<li class="nav-header"><a href="%s">%s</a></li>' % (linkpage, name)) plugins = wikiutil.getPlugins('userprefs', request.cfg) for sub in plugins: if sub in request.cfg.userprefs_disabled: continue cls = wikiutil.importPlugin(request.cfg, 'userprefs', sub, 'Settings') obj = cls(request) if not obj.allowed(): continue url = request.page.url(request, {'action': 'userprefs', 'sub': sub}) urls.append('<li><a href="%s">%s</a></li>' % (url, obj.title)) if request.user.valid: if request.user.auth_method in request.cfg.auth_can_logout: query = {'action': 'logout', 'logout': 'logout'} url = request.page.url(request, query) urls.append('<li><a href="%s">%s</a></li>' % (url, _('Logout'))) elif request.cfg.auth_have_login: query = {'action': 'login'} # special direct-login link if the auth methods want no input if request.cfg.auth_login_inputs == ['special_no_input']: query['login'] = '******' url = request.page.url(request, query) urls.append('<li><a href="%s">%s</a></li>' % (url, _('Login'))) formadd = getattr(request.cfg, 'user_form_add', ()) # Please note that you need to have a function of the form # lambda self, req: ... to the config as the function is # defined within a class. formcond = getattr(request.cfg, 'user_form_addcondition', lambda req: True) if formcond(request): for url, text in formadd: urls.append('<li><a href="%s">%s</a></li>' % (url, _(text))) out = "" if urls: out = u""" <ul class="nav navbar-nav navbar-right"> <li> <a class="dropdown-toggle" data-toggle="dropdown" title="%s"> <i class="glyphicon glyphicon-user"></i> </a> <ul class="dropdown-menu navbar-right"> %s </ul> </li> </ul>""" % (_('User Preferences'), ("\n" + " " * 16).join(urls)) return out