Пример #1
0
def convert_editlog(page, output = None, overwrite = False):
	pagedir = page.getPagePath()
	pagename = wikiname(pagedir)
	if not output:
		output = pagename
	edit_log = editlog.EditLog(request, page.getPagePath('edit-log'))

	changes = {}
	for log in edit_log:
		# not supported. perhaps add anyway?
		if log.action in ('ATTNEW', 'ATTDEL', 'ATTDRW'):
			continue

		# 1201095949  192.168.2.23    E   start   [email protected]
		author = log.hostname
		if log.userid:
			userdata = user.User(request, log.userid)
			if userdata.name:
				author = userdata.name

		try:
			action = {
				'SAVE' : 'E',
				'SAVENEW' : 'C',
				'SAVE/REVERT' : 'R',
			}[log.action]
		except KeyError:
			action = log.action

		mtime = str(log.ed_time_usecs / USEC)
		changes[mtime] = u"\t".join([mtime, log.addr, action, dw.cleanID(log.pagename), author, log.comment])

	# see if we have missing entries, try to recover
	page = Page(request, pagename)
	if len(page.getRevList()) != len(changes):
		print "RECOVERING edit-log, missing %d entries" % (len(page.getRevList()) - len(changes))
		for rev in page.getRevList():
			page = Page(request, pagename, rev = rev)
			mtime = page.mtime_usecs() / USEC

			if not mtime:
				pagefile, realrev, exists = page.get_rev(rev = rev);
				if os.path.exists(pagefile):
					mtime = int(os.path.getmtime(pagefile))
					print "Recovered %s: %s" % (rev, mtime)

			mtime = str(mtime)
			if not changes.has_key(mtime):
				changes[mtime] = u"\t".join([mtime, '127.0.0.1', '?', dw.cleanID(pagename), 'root', 'recovered entry'])
				print "ADDING %s" % mtime

	changes = sorted(changes.values())
	out_file = os.path.join(output_dir, 'meta', dw.metaFN(output, '.changes'))
	writefile(out_file, changes, overwrite = overwrite)
Пример #2
0
    def _do_queued_updates(self, request, amount=5):
        """ Assumes that the write lock is acquired """
        self.touch()
        writer = xapidx.Index(self.dir, True)
        writer.configure(self.prefixMap, self.indexValueMap)

        # do all page updates
        pages = self.update_queue.pages()[:amount]
        for name in pages:
            p = Page(request, name)
            if request.cfg.xapian_index_history:
                for rev in p.getRevList():
                    self._index_page(writer, Page(request, name, rev=rev), mode='update')
            else:
                self._index_page(writer, p, mode='update')
            self.update_queue.remove([name])

        # do page/attachment removals
        items = self.remove_queue.pages()[:amount]
        for item in items:
            _item = item.split('//')
            p = Page(request, _item[0])
            self._remove_item(writer, p, _item[1])
            self.remove_queue.remove([item])

        writer.close()
Пример #3
0
    def _getHits(self, pages):
        """ Get the hit tuples in pages through _get_match """
        logging.debug("_getHits searching in %d pages ..." % len(pages))
        hits = []
        revisionCache = {}
        fs_rootpage = self.fs_rootpage
        for hit in pages:

            uid = hit.get('uid')
            wikiname = hit['wikiname']
            pagename = hit['pagename']
            attachment = hit['attachment']
            revision = int(hit.get('revision', 0))

            logging.debug("_getHits processing %r %r %d %r" %
                          (wikiname, pagename, revision, attachment))

            if wikiname in (self.request.cfg.interwikiname,
                            'Self'):  # THIS wiki
                page = Page(self.request, pagename, rev=revision)

                if not self.historysearch and revision:
                    revlist = page.getRevList()
                    # revlist can be empty if page was nuked/renamed since it was included in xapian index
                    if not revlist or revlist[0] != revision:
                        # nothing there at all or not the current revision
                        logging.debug(
                            "no history search, skipping non-current revision..."
                        )
                        continue

                if attachment:
                    # revision currently is 0 ever
                    if pagename == fs_rootpage:  # not really an attachment
                        page = Page(self.request,
                                    "%s/%s" % (fs_rootpage, attachment))
                        hits.append((wikiname, page, None, None, revision))
                    else:
                        matches = self._get_match(page=None, uid=uid)
                        hits.append(
                            (wikiname, page, attachment, matches, revision))
                else:
                    matches = self._get_match(page=page, uid=uid)
                    logging.debug("self._get_match %r" % matches)
                    if matches:
                        if not self.historysearch and pagename in revisionCache and revisionCache[
                                pagename][0] < revision:
                            hits.remove(revisionCache[pagename][1])
                            del revisionCache[pagename]
                        hits.append(
                            (wikiname, page, attachment, matches, revision))
                        revisionCache[pagename] = (revision, hits[-1])

            else:  # other wiki
                hits.append((wikiname, pagename, attachment, None, revision))
        logging.debug("_getHits returning %r." % hits)
        return hits
Пример #4
0
    def _index_page(self, request, connection, pagename, mode="update"):
        """ Index a page.

        Index all revisions (if wanted by configuration) and all attachments.

        @param request: request suitable for indexing
        @param connection: the Indexer connection object
        @param pagename: a page name
        @param mode: 'add' = just add, no checks
                     'update' = check if already in index and update if needed (mtime)
        """
        page = Page(request, pagename)
        revlist = page.getRevList()  # recent revs first, does not include deleted revs
        logging.debug("indexing page %r, %d revs found" % (pagename, len(revlist)))

        if not revlist:
            # we have an empty revision list, that means the page is not there any more,
            # likely it (== all of its revisions, all of its attachments) got either renamed or nuked
            wikiname = request.cfg.interwikiname or u"Self"

            sc = self.get_search_connection()
            docs_to_delete = sc.get_all_documents_with_fields(wikiname=wikiname, pagename=pagename)
            # any page rev, any attachment
            sc.close()

            for doc in docs_to_delete:
                connection.delete(doc.id)
            logging.debug("page %s (all revs, all attachments) removed from xapian index" % pagename)

        else:
            if request.cfg.xapian_index_history:
                index_revs, remove_revs = revlist, []
            else:
                if page.exists():  # is current rev not deleted?
                    index_revs, remove_revs = revlist[:1], revlist[1:]
                else:
                    index_revs, remove_revs = [], revlist

            for revno in index_revs:
                updated = self._index_page_rev(request, connection, pagename, revno, mode=mode)
                logging.debug("updated page %r rev %d (updated==%r)" % (pagename, revno, updated))
                if not updated:
                    # we reached the revisions that are already present in the index
                    break

            for revno in remove_revs:
                # XXX remove_revs can be rather long for pages with many revs and
                # XXX most page revs usually will be already deleted. optimize?
                self._remove_page_rev(request, connection, pagename, revno)
                logging.debug("removed page %r rev %d" % (pagename, revno))

            from MoinMoin.action import AttachFile

            for attachmentname in AttachFile._get_files(request, pagename):
                self._index_attachment(request, connection, pagename, attachmentname, mode)
Пример #5
0
def test_page_change_message(request):
    page = Page(request, "FrontPage")

    print "Provided with a dumb change type argument, this should raise an exception!"
    py.test.raises(notification.UnknownChangeType,
                   notification.page_change_message,
                   "StupidType",
                   request,
                   page,
                   "en",
                   revisions=page.getRevList())
Пример #6
0
    def _getHits(self, pages, matchSearchFunction):
        """ Get the hit tuples in pages through matchSearchFunction """
        logging.debug("_getHits searching in %d pages ..." % len(pages))
        hits = []
        revisionCache = {}
        fs_rootpage = self.fs_rootpage
        for hit in pages:
            if 'values' in hit:
                valuedict = hit['values']
                uid = hit['uid']
            else:
                valuedict = hit
                uid = None

            wikiname = valuedict['wikiname']
            pagename = valuedict['pagename']
            attachment = valuedict['attachment']
            logging.debug("_getHits processing %r %r %r" % (wikiname, pagename, attachment))

            if 'revision' in valuedict and valuedict['revision']:
                revision = int(valuedict['revision'])
            else:
                revision = 0

            if wikiname in (self.request.cfg.interwikiname, 'Self'): # THIS wiki
                page = Page(self.request, pagename, rev=revision)
                if not self.historysearch and revision:
                    revlist = page.getRevList()
                    # revlist can be empty if page was nuked/renamed since it was included in xapian index
                    if not revlist or revlist[0] != revision:
                        # nothing there at all or not the current revision
                        continue
                if attachment:
                    if pagename == fs_rootpage: # not really an attachment
                        page = Page(self.request, "%s/%s" % (fs_rootpage, attachment))
                        hits.append((wikiname, page, None, None))
                    else:
                        matches = matchSearchFunction(page=None, uid=uid)
                        hits.append((wikiname, page, attachment, matches))
                else:
                    matches = matchSearchFunction(page=page, uid=uid)
                    logging.debug("matchSearchFunction %r returned %r" % (matchSearchFunction, matches))
                    if matches:
                        if not self.historysearch and \
                                pagename in revisionCache and \
                                revisionCache[pagename][0] < revision:
                            hits.remove(revisionCache[pagename][1])
                            del revisionCache[pagename]
                        hits.append((wikiname, page, attachment, matches))
                        revisionCache[pagename] = (revision, hits[-1])
            else: # other wiki
                hits.append((wikiname, pagename, attachment, None, revision))
        return hits
    def _getHits(self, pages):
        """ Get the hit tuples in pages through _get_match """
        logging.debug("_getHits searching in %d pages ..." % len(pages))
        hits = []
        revisionCache = {}
        fs_rootpage = self.fs_rootpage
        for hit in pages:

            uid = hit.get('uid')
            wikiname = hit['wikiname']
            pagename = hit['pagename']
            attachment = hit['attachment']
            revision = int(hit.get('revision', 0))

            logging.debug("_getHits processing %r %r %d %r" % (wikiname, pagename, revision, attachment))

            if wikiname in (self.request.cfg.interwikiname, 'Self'): # THIS wiki
                page = Page(self.request, pagename, rev=revision)

                if not self.historysearch and revision:
                    revlist = page.getRevList()
                    # revlist can be empty if page was nuked/renamed since it was included in xapian index
                    if not revlist or revlist[0] != revision:
                        # nothing there at all or not the current revision
                        logging.debug("no history search, skipping non-current revision...")
                        continue

                if attachment:
                    # revision currently is 0 ever
                    if pagename == fs_rootpage: # not really an attachment
                        page = Page(self.request, "%s/%s" % (fs_rootpage, attachment))
                        hits.append((wikiname, page, None, None, revision))
                    else:
                        matches = self._get_match(page=None, uid=uid)
                        hits.append((wikiname, page, attachment, matches, revision))
                else:
                    matches = self._get_match(page=page, uid=uid)
                    logging.debug("self._get_match %r" % matches)
                    if matches:
                        if not self.historysearch and pagename in revisionCache and revisionCache[pagename][0] < revision:
                            hits.remove(revisionCache[pagename][1])
                            del revisionCache[pagename]
                        hits.append((wikiname, page, attachment, matches, revision))
                        revisionCache[pagename] = (revision, hits[-1])

            else: # other wiki
                hits.append((wikiname, pagename, attachment, None, revision))
        logging.debug("_getHits returning %r." % hits)
        return hits
Пример #8
0
    def _index_pages(self, request, files=None, mode='update'):
        """ Index all pages (and all given files)

        This should be called from indexPages or indexPagesInNewThread only!

        This may take some time, depending on the size of the wiki and speed
        of the machine.

        When called in a new thread, lock is acquired before the call,
        and this method must release it when it finishes or fails.

        @param request: the current request
        @keyword files: an optional list of files to index
        @keyword mode: how to index the files, either 'add', 'update' or
                       'rebuild'
        """

        # rebuilding the DB: delete it and add everything
        if mode == 'rebuild':
            for f in os.listdir(self.dir):
                os.unlink(os.path.join(self.dir, f))
            mode = 'add'

        try:
            self.touch()
            writer = xapidx.Index(self.dir, True)
            writer.configure(self.prefixMap, self.indexValueMap)
            pages = request.rootpage.getPageList(user='', exists=1)
            logging.debug("indexing all (%d) pages..." % len(pages))
            for pagename in pages:
                p = Page(request, pagename)
                request.page = p
                if request.cfg.xapian_index_history:
                    for rev in p.getRevList():
                        self._index_page(writer,
                                Page(request, pagename, rev=rev),
                                mode)
                else:
                    self._index_page(writer, p, mode)
            if files:
                logging.debug("indexing all files...")
                for fname in files:
                    fname = fname.strip()
                    self._index_file(request, writer, fname, mode)
            writer.close()
        finally:
            writer.__del__()
Пример #9
0
    def xmlrpc_getDiff(self, pagename, from_rev, to_rev, n_name=None):
        """
        Gets the binary difference between two page revisions.

        @param pagename: unicode string qualifying the page name

        @param fromRev: integer specifying the source revision. May be None to
        refer to a virtual empty revision which leads to a diff
        containing the whole page.

        @param toRev: integer specifying the target revision. May be None to
        refer to the current revision. If the current revision is the same
        as fromRev, there will be a special error condition "ALREADY_CURRENT"

        @param n_name: do a tag check verifying that n_name was the normalised
        name of the last tag

        If both fromRev and toRev are None, this function acts similar to getPage, i.e. it will diff("",currentRev).

        @return: Returns a dict:
        * status (not a field, implicit, returned as Fault if not SUCCESS):
        * "SUCCESS" - if the diff could be retrieved successfully
        * "NOT_EXIST" - item does not exist
        * "FROMREV_INVALID" - the source revision is invalid
        * "TOREV_INVALID" - the target revision is invalid
        * "INTERNAL_ERROR" - there was an internal error
        * "INVALID_TAG" - the last tag does not match the supplied normalised name
        * "ALREADY_CURRENT" - this not merely an error condition. It rather means that
         there is no new revision to diff against which is a good thing while
         synchronisation.
        * current: the revision number of the current revision (not the one which was diff'ed against)
        * diff: Binary object that transports a zlib-compressed binary diff (see bdiff.py, taken from Mercurial)
        * conflict: if there is a conflict on the page currently
        """
        from MoinMoin.util.bdiff import textdiff, compress
        from MoinMoin.wikisync import TagStore

        pagename = self._instr(pagename)
        if n_name is not None:
            n_name = self._instr(n_name)

        # User may read page?
        if not self.request.user.may.read(pagename):
            return self.notAllowedFault()

        def allowed_rev_type(data):
            if data is None:
                return True
            return isinstance(data, int) and data > 0

        if not allowed_rev_type(from_rev):
            return xmlrpclib.Fault("FROMREV_INVALID",
                                   "Incorrect type for from_rev.")

        if not allowed_rev_type(to_rev):
            return xmlrpclib.Fault("TOREV_INVALID",
                                   "Incorrect type for to_rev.")

        currentpage = Page(self.request, pagename)
        if not currentpage.exists():
            return xmlrpclib.Fault("NOT_EXIST", "Page does not exist.")

        revisions = currentpage.getRevList()

        if from_rev is not None and from_rev not in revisions:
            return xmlrpclib.Fault("FROMREV_INVALID", "Unknown from_rev.")
        if to_rev is not None and to_rev not in revisions:
            return xmlrpclib.Fault("TOREV_INVALID", "Unknown to_rev.")

        # use lambda to defer execution in the next lines
        if from_rev is None:
            oldcontents = lambda: ""
        else:
            oldpage = Page(self.request, pagename, rev=from_rev)
            oldcontents = lambda: oldpage.get_raw_body_str()

        if to_rev is None:
            newpage = currentpage
            newcontents = lambda: currentpage.get_raw_body_str()
        else:
            newpage = Page(self.request, pagename, rev=to_rev)
            newcontents = lambda: newpage.get_raw_body_str()

        if oldcontents() and oldpage.get_real_rev() == newpage.get_real_rev():
            return xmlrpclib.Fault("ALREADY_CURRENT", "There are no changes.")

        if n_name is not None:
            tags = TagStore(newpage)
            last_tag = tags.get_last_tag()
            if last_tag is not None and last_tag.normalised_name != n_name:
                return xmlrpclib.Fault(
                    "INVALID_TAG",
                    "The used tag is incorrect because the normalised name does not match."
                )

        newcontents = newcontents()
        conflict = wikiutil.containsConflictMarker(newcontents)
        diffblob = xmlrpclib.Binary(
            compress(textdiff(oldcontents(), newcontents)))

        return {
            "conflict": conflict,
            "diff": diffblob,
            "diffversion": 1,
            "current": currentpage.get_real_rev()
        }
Пример #10
0
def convert_editlog(page, output=None, overwrite=False):
    pagedir = page.getPagePath()
    pagename = wikiname(pagedir)
    if not output:
        output = pagename
    edit_log = editlog.EditLog(request, page.getPagePath('edit-log'))

    changes = {}
    for log in edit_log:
        # not supported. perhaps add anyway?
        if log.action in ('ATTNEW', 'ATTDEL', 'ATTDRW'):
            continue

        # 1201095949  192.168.2.23    E   start   [email protected]
        author = log.hostname
        if log.userid:
            userdata = user.User(request, log.userid)
            if userdata.name:
                author = userdata.name

        try:
            action = {
                'SAVE': 'E',
                'SAVENEW': 'C',
                'SAVE/REVERT': 'R',
            }[log.action]
        except KeyError:
            action = log.action

        mtime = str(log.ed_time_usecs / USEC)
        changes[mtime] = u"\t".join([
            mtime, log.addr, action,
            dw.cleanID(log.pagename), author, log.comment
        ])

    # see if we have missing entries, try to recover
    page = Page(request, pagename)
    if len(page.getRevList()) != len(changes):
        print "RECOVERING edit-log, missing %d entries" % (
            len(page.getRevList()) - len(changes))
        for rev in page.getRevList():
            page = Page(request, pagename, rev=rev)
            mtime = page.mtime_usecs() / USEC

            if not mtime:
                pagefile, realrev, exists = page.get_rev(rev=rev)
                if os.path.exists(pagefile):
                    mtime = int(os.path.getmtime(pagefile))
                    print "Recovered %s: %s" % (rev, mtime)

            mtime = str(mtime)
            if not changes.has_key(mtime):
                changes[mtime] = u"\t".join([
                    mtime, '127.0.0.1', '?',
                    dw.cleanID(pagename), 'root', 'recovered entry'
                ])
                print "ADDING %s" % mtime

    changes = sorted(changes.values())
    out_file = os.path.join(output_dir, 'meta', dw.metaFN(output, '.changes'))
    writefile(out_file, changes, overwrite=overwrite)
Пример #11
0
def do_diff(pagename, request):
    """ Handle "action=diff"
        checking for either a "rev=formerrevision" parameter
        or rev1 and rev2 parameters
    """
    if not request.user.may.read(pagename):
        Page(request, pagename).send_page(request)
        return

    try:
        date = request.form['date'][0]
        try:
            date = long(date) # must be long for py 2.2.x
        except StandardError:
            date = 0
    except KeyError:
        date = 0

    try:
        rev1 = request.form['rev1'][0]
        try:
            rev1 = int(rev1)
        except StandardError:
            rev1 = 0
    except KeyError:
        rev1 = -1

    try:
        rev2 = request.form['rev2'][0]
        try:
            rev2 = int(rev2)
        except StandardError:
            rev2 = 0
    except KeyError:
        rev2 = 0

    if rev1 == -1 and rev2 == 0:
        try:
            rev1 = request.form['rev'][0]
            try:
                rev1 = int(rev1)
            except StandardError:
                rev1 = -1
        except KeyError:
            rev1 = -1
 
    # spacing flag?
    try:
        ignorews = int(request.form['ignorews'][0])
    except (KeyError, ValueError, TypeError):
        ignorews = 0

    _ = request.getText
    
    # get a list of old revisions, and back out if none are available
    currentpage = Page(request, pagename)
    revisions = currentpage.getRevList()
    if len(revisions) < 2:
        currentpage.send_page(request, msg=_("No older revisions available!"))
        return

    if date: # this is how we get called from RecentChanges
        rev1 = 0
        log = editlog.EditLog(request, rootpagename=pagename)
        for line in log.reverse():
            if date >= line.ed_time_usecs and int(line.rev) != 99999999:
                rev1 = int(line.rev)
                break
        else:
            rev1 = 1
        rev2 = 0

    # Start output
    # This action generate content in the user language
    request.setContentLanguage(request.lang)

    request.http_headers()
    wikiutil.send_title(request, _('Diff for "%s"') % (pagename,), pagename=pagename)
  
    if (rev1>0 and rev2>0 and rev1>rev2) or (rev1==0 and rev2>0):
        rev1,rev2 = rev2,rev1
          
    oldrev1,oldcount1 = None,0
    oldrev2,oldcount2 = None,0
    # get the filename of the version to compare to
    edit_count = 0
    for rev in revisions:
        edit_count += 1
        if rev <= rev1: 
            oldrev1,oldcount1 = rev,edit_count
        if rev2 and rev >= rev2: 
            oldrev2,oldcount2 = rev,edit_count
        if (oldrev1 and oldrev2) or (oldrev1 and not rev2):
            break
    
    if rev1 == -1:
        oldpage = Page(request, pagename, rev=revisions[1])
        oldcount1 = oldcount1 - 1
    elif rev1 == 0:
        oldpage = currentpage
        # oldcount1 is still on init value 0
    else:
        if oldrev1:
            oldpage = Page(request, pagename, rev=oldrev1)
        else:
            oldpage = Page(request, "$EmptyPage$") # hack
            oldpage.set_raw_body("")    # avoid loading from disk
            oldrev1 = 0 # XXX
              
    if rev2 == 0:
        newpage = currentpage
        # oldcount2 is still on init value 0
    else:
        if oldrev2:
            newpage = Page(request, pagename, rev=oldrev2)
        else:
            newpage = Page(request, "$EmptyPage$") # hack
            newpage.set_raw_body("")    # avoid loading from disk
            oldrev2 = 0 # XXX
    
    edit_count = abs(oldcount1 - oldcount2)

    # this should use the formatter, but there is none?
    request.write('<div id="content">\n') # start content div
    request.write('<p class="diff-header">')
    request.write(_('Differences between revisions %d and %d') % (oldpage.get_real_rev(), newpage.get_real_rev()))
    if edit_count > 1:
        request.write(' ' + _('(spanning %d versions)') % (edit_count,))
    request.write('</p>')
  
    if request.user.show_fancy_diff:
        from MoinMoin.util.diff import diff
        request.write(diff(request, oldpage.get_raw_body(), newpage.get_raw_body()))
        newpage.send_page(request, count_hit=0, content_only=1, content_id="content-below-diff")
    else:
        lines = wikiutil.linediff(oldpage.getlines(), newpage.getlines())
        if not lines:
            msg = _("No differences found!")
            if edit_count > 1:
                msg = msg + '<p>' + _('The page was saved %(count)d times, though!') % {
                    'count': edit_count}
            request.write(msg)
        else:
            if ignorews:
                request.write(_('(ignoring whitespace)') + '<br>')
            else:
                qstr = 'action=diff&ignorews=1'
                if rev1: qstr = '%s&rev1=%s' % (qstr, rev1)
                if rev2: qstr = '%s&rev2=%s' % (qstr, rev2)
                request.write(Page(request, pagename).link_to(request,
                    text=_('Ignore changes in the amount of whitespace'),
                    querystr=qstr) + '<p>')

            request.write('<pre>')
            for line in lines:
                if line[0] == "@":
                    request.write('<hr>')
                request.write(wikiutil.escape(line)+'\n')
            request.write('</pre>')

    request.write('</div>\n') # end content div
    wikiutil.send_footer(request, pagename, showpage=1)
Пример #12
0
 def testGetRevList(self):
     page = Page(self.request, u"FrontPage")
     assert 1 in page.getRevList()
    def xmlrpc_getDiff(self, pagename, from_rev, to_rev, n_name=None):
        """
        Gets the binary difference between two page revisions.

        @param pagename: unicode string qualifying the page name

        @param fromRev: integer specifying the source revision. May be None to
        refer to a virtual empty revision which leads to a diff
        containing the whole page.

        @param toRev: integer specifying the target revision. May be None to
        refer to the current revision. If the current revision is the same
        as fromRev, there will be a special error condition "ALREADY_CURRENT"

        @param n_name: do a tag check verifying that n_name was the normalised
        name of the last tag

        If both fromRev and toRev are None, this function acts similar to getPage, i.e. it will diff("",currentRev).

        @return: Returns a dict:
        * status (not a field, implicit, returned as Fault if not SUCCESS):
        * "SUCCESS" - if the diff could be retrieved successfully
        * "NOT_EXIST" - item does not exist
        * "FROMREV_INVALID" - the source revision is invalid
        * "TOREV_INVALID" - the target revision is invalid
        * "INTERNAL_ERROR" - there was an internal error
        * "INVALID_TAG" - the last tag does not match the supplied normalised name
        * "ALREADY_CURRENT" - this not merely an error condition. It rather means that
         there is no new revision to diff against which is a good thing while
         synchronisation.
        * current: the revision number of the current revision (not the one which was diff'ed against)
        * diff: Binary object that transports a zlib-compressed binary diff (see bdiff.py, taken from Mercurial)
        * conflict: if there is a conflict on the page currently
        """
        from MoinMoin.util.bdiff import textdiff, compress
        from MoinMoin.wikisync import TagStore

        pagename = self._instr(pagename)
        if n_name is not None:
            n_name = self._instr(n_name)

        # User may read page?
        if not self.request.user.may.read(pagename):
            return self.notAllowedFault()

        def allowed_rev_type(data):
            if data is None:
                return True
            return isinstance(data, int) and data > 0

        if not allowed_rev_type(from_rev):
            return xmlrpclib.Fault("FROMREV_INVALID", "Incorrect type for from_rev.")

        if not allowed_rev_type(to_rev):
            return xmlrpclib.Fault("TOREV_INVALID", "Incorrect type for to_rev.")

        currentpage = Page(self.request, pagename)
        if not currentpage.exists():
            return xmlrpclib.Fault("NOT_EXIST", "Page does not exist.")

        revisions = currentpage.getRevList()

        if from_rev is not None and from_rev not in revisions:
            return xmlrpclib.Fault("FROMREV_INVALID", "Unknown from_rev.")
        if to_rev is not None and to_rev not in revisions:
            return xmlrpclib.Fault("TOREV_INVALID", "Unknown to_rev.")

        # use lambda to defer execution in the next lines
        if from_rev is None:
            oldcontents = lambda: ""
        else:
            oldpage = Page(self.request, pagename, rev=from_rev)
            oldcontents = lambda: oldpage.get_raw_body_str()

        if to_rev is None:
            newpage = currentpage
            newcontents = lambda: currentpage.get_raw_body_str()
        else:
            newpage = Page(self.request, pagename, rev=to_rev)
            newcontents = lambda: newpage.get_raw_body_str()

        if oldcontents() and oldpage.get_real_rev() == newpage.get_real_rev():
            return xmlrpclib.Fault("ALREADY_CURRENT", "There are no changes.")

        if n_name is not None:
            tags = TagStore(newpage)
            last_tag = tags.get_last_tag()
            if last_tag is not None and last_tag.normalised_name != n_name:
                return xmlrpclib.Fault("INVALID_TAG", "The used tag is incorrect because the normalised name does not match.")

        newcontents = newcontents()
        conflict = wikiutil.containsConflictMarker(newcontents)
        diffblob = xmlrpclib.Binary(compress(textdiff(oldcontents(), newcontents)))

        return {"conflict": conflict, "diff": diffblob, "diffversion": 1, "current": currentpage.get_real_rev()}
Пример #14
0
def execute(pagename, request):
    """ Send recent changes as an RSS document
    """
    cfg = request.cfg

    # get params
    items_limit = 100
    try:
        max_items = int(request.values['items'])
        max_items = min(max_items, items_limit)  # not more than `items_limit`
    except (KeyError, ValueError):
        # not more than 15 items in a RSS file by default
        max_items = 15
    try:
        unique = int(request.values.get('unique', 0))
    except ValueError:
        unique = 0
    try:
        diffs = int(request.values.get('diffs', 0))
    except ValueError:
        diffs = 0
    ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003
    try:
        ddiffs = int(request.values.get('ddiffs', 0))
    except ValueError:
        ddiffs = 0

    urlfilter = request.values.get('filter')
    if urlfilter:
        urlfilter = re.compile(urlfilter)
    else:
        urlfilter = None

    # get data
    log = editlog.EditLog(request)
    logdata = []
    counter = 0
    pages = {}
    lastmod = 0
    for line in log.reverse():
        if urlfilter and not (urlfilter.match(line.pagename)):
            continue
        if not request.user.may.read(line.pagename):
            continue
        if (not line.action.startswith('SAVE')
                or ((line.pagename in pages) and unique)):
            continue
        #if log.dayChanged() and log.daycount > _MAX_DAYS: break
        line.editor = line.getInterwikiEditorData(request)
        line.time = timefuncs.tmtuple(
            wikiutil.version2timestamp(line.ed_time_usecs))  # UTC
        logdata.append(line)
        pages[line.pagename] = None

        if not lastmod:
            lastmod = wikiutil.version2timestamp(line.ed_time_usecs)

        counter += 1
        if counter >= max_items:
            break
    del log

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique)

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using whatever setting the admin
        # defined for suggested cache lifetime of the RecentChanges RSS doc
        expires = time.time() + cfg.rss_cache

        request.mimetype = 'application/rss+xml'
        request.expires = expires
        request.last_modified = lastmod
        request.headers['Etag'] = etag

        # send the generated XML document
        baseurl = request.url_root

        logo = re.search(r'src="([^"]*)"', cfg.logo_string)
        if logo:
            logo = request.getQualifiedURL(logo.group(1))

        # prepare output
        output = structencoder(indent=u"yes")

        FEED_HEADER_COMMENT = '''
<!--
    Add an "items=nnn" URL parameter to get more than the default 15 items.
    You cannot get more than %d items though.
    
    Add "unique=1" to get a list of changes where page names are unique,
    i.e. where only the latest change of each page is reflected.
    Add "diffs=1" to add change diffs to the description of each items.
    
    Add "ddiffs=1" to link directly to the diff (good for FeedReader).
    Current settings: items=%i, unique=%i, diffs=%i, ddiffs=%i
-->
        ''' % (items_limit, max_items, unique, diffs, ddiffs)

        # Feed envelope
        page = Page(request, pagename)
        f = output.cofeed(
            ROOT(NS(u'', ATOM_NAMESPACE), NS(u'wiki', RSSWIKI_NAMESPACE),
                 E_CURSOR((ATOM_NAMESPACE, u'feed'), )))
        f.send(
            E((ATOM_NAMESPACE, u'id'),
              full_url(request, page).encode(config.charset))),
        f.send(
            E((ATOM_NAMESPACE, u'title'),
              cfg.sitename.encode(config.charset))),
        f.send(
            E((ATOM_NAMESPACE, u'link'),
              {u'href': request.url_root.encode(config.charset)})),
        f.send(
            E((ATOM_NAMESPACE, u'summary'),
              ('RecentChanges at %s' % cfg.sitename).encode(config.charset))),
        #Icon
        #E((ATOM_NAMESPACE, u'link'), {u'href': logo.encode(config.charset)}),

        #if cfg.interwikiname:
        #    handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)

        for item in logdata:
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            page = Page(request, item.pagename)
            #link = full_url(request, page, anchor=anchor)
            if ddiffs:
                link = full_url(request, page, querystr={'action': 'diff'})
            else:
                link = full_url(request, page)

            # description
            desc_text = item.comment
            if diffs:
                # TODO: rewrite / extend wikiutil.pagediff
                # searching for the matching pages doesn't really belong here
                revisions = page.getRevList()

                rl = len(revisions)
                for idx in range(rl):
                    rev = revisions[idx]
                    if rev <= item.rev:
                        if idx + 1 < rl:
                            lines = wikiutil.pagediff(request,
                                                      item.pagename,
                                                      revisions[idx + 1],
                                                      item.pagename,
                                                      0,
                                                      ignorews=1)
                            if len(lines) > 20:
                                lines = lines[:20] + ['...\n']
                            lines = '\n'.join(lines)
                            lines = wikiutil.escape(lines)
                            desc_text = '%s\n<pre>\n%s\n</pre>\n' % (desc_text,
                                                                     lines)
                        break
            #if desc_text:
            #    handler.simpleNode('description', desc_text)

            # contributor
            edattr = {}
            #if cfg.show_hosts:
            #    edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
            if item.editor[0] == 'interwiki':
                edname = "%s:%s" % item.editor[1]
                ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
            else:  # 'ip'
                edname = item.editor[1]
                ##edattr[(None, 'link')] = link + "?action=info"

            history_link = full_url(request, page, querystr={'action': 'info'})

            f.send(
                E(
                    (ATOM_NAMESPACE, u'entry'),
                    E((ATOM_NAMESPACE, u'id'), link.encode(config.charset)),
                    E((ATOM_NAMESPACE, u'title'),
                      item.pagename.encode(config.charset)),
                    E((ATOM_NAMESPACE, u'updated'),
                      timefuncs.W3CDate(item.time).encode(config.charset)),
                    E((ATOM_NAMESPACE, u'link'),
                      {u'href': link.encode(config.charset)}),
                    E((ATOM_NAMESPACE, u'summary'),
                      desc_text.encode(config.charset)),
                    E((ATOM_NAMESPACE, u'author'),
                      E((ATOM_NAMESPACE, u'name'),
                        edname.encode(config.charset))),
                    #E((ATOM_NAMESPACE, u'title'), item.pagename.encode(config.charset)),
                    # wiki extensions
                    E((RSSWIKI_NAMESPACE, u'wiki:version'),
                      ("%i" % (item.ed_time_usecs)).encode(config.charset)),
                    E((RSSWIKI_NAMESPACE, u'wiki:status'),
                      (u'deleted', u'updated')[page.exists()]),
                    E((RSSWIKI_NAMESPACE, u'wiki:diff'),
                      link.encode(config.charset)),
                    E((RSSWIKI_NAMESPACE, u'wiki:history'),
                      history_link.encode(config.charset)),
                    # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
                    # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )
                ))

        # emit logo data
        #if logo:
        #    handler.startNode('image', attr={
        #        (handler.xmlns['rdf'], 'about'): logo,
        #        })
        #    handler.simpleNode('title', cfg.sitename)
        #    handler.simpleNode('link', baseurl)
        #    handler.simpleNode('url', logo)
        #    handler.endNode('image')

        f.close()
        request.write(output.read())
Пример #15
0
 def testGetRevList(self):
     page = Page(self.request, u"FrontPage")
     assert 1 in page.getRevList()
Пример #16
0
    def _index_page(self, request, connection, pagename, mode='update'):
        """ Index a page.

        Index all revisions (if wanted by configuration) and all attachments.

        @param request: request suitable for indexing
        @param connection: the Indexer connection object
        @param pagename: a page name
        @param mode: 'add' = just add, no checks
                     'update' = check if already in index and update if needed (mtime)
        """
        page = Page(request, pagename)
        revlist = page.getRevList(
        )  # recent revs first, does not include deleted revs
        logging.debug("indexing page %r, %d revs found" %
                      (pagename, len(revlist)))

        if not revlist:
            # we have an empty revision list, that means the page is not there any more,
            # likely it (== all of its revisions, all of its attachments) got either renamed or nuked
            wikiname = request.cfg.interwikiname or u'Self'

            sc = self.get_search_connection()
            docs_to_delete = sc.get_all_documents_with_fields(
                wikiname=wikiname, pagename=pagename)
            # any page rev, any attachment
            sc.close()

            for doc in docs_to_delete:
                connection.delete(doc.id)
            logging.debug(
                'page %s (all revs, all attachments) removed from xapian index'
                % pagename)

        else:
            if request.cfg.xapian_index_history:
                index_revs, remove_revs = revlist, []
            else:
                if page.exists():  # is current rev not deleted?
                    index_revs, remove_revs = revlist[:1], revlist[1:]
                else:
                    index_revs, remove_revs = [], revlist

            for revno in index_revs:
                updated = self._index_page_rev(request,
                                               connection,
                                               pagename,
                                               revno,
                                               mode=mode)
                logging.debug("updated page %r rev %d (updated==%r)" %
                              (pagename, revno, updated))
                if not updated:
                    # we reached the revisions that are already present in the index
                    break

            for revno in remove_revs:
                # XXX remove_revs can be rather long for pages with many revs and
                # XXX most page revs usually will be already deleted. optimize?
                self._remove_page_rev(request, connection, pagename, revno)
                logging.debug("removed page %r rev %d" % (pagename, revno))

            from MoinMoin.action import AttachFile
            for attachmentname in AttachFile._get_files(request, pagename):
                self._index_attachment(request, connection, pagename,
                                       attachmentname, mode)
Пример #17
0
def execute(pagename, request):
    """ Send recent changes as an RSS document
    """
    if not wikixml.ok:
        #XXX send error message
        pass

    cfg = request.cfg

    # get params
    items_limit = 100
    try:
        max_items = int(request.form['items'][0])
        max_items = min(max_items, items_limit) # not more than `items_limit`
    except (KeyError, ValueError):
        # not more than 15 items in a RSS file by default
        max_items = 15
    try:
        unique = int(request.form.get('unique', [0])[0])
    except ValueError:
        unique = 0
    try:
        diffs = int(request.form.get('diffs', [0])[0])
    except ValueError:
        diffs = 0
    ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003
    try:
        ddiffs = int(request.form.get('ddiffs', [0])[0])
    except ValueError:
        ddiffs = 0

    # prepare output
    out = StringIO.StringIO()
    handler = RssGenerator(out)

    # get data
    interwiki = request.getBaseURL()
    if interwiki[-1] != "/": interwiki = interwiki + "/"

    logo = re.search(r'src="([^"]*)"', cfg.logo_string)
    if logo: logo = request.getQualifiedURL(logo.group(1))
    
    log = editlog.EditLog(request)
    logdata = []
    counter = 0
    pages = {}
    for line in log.reverse():
        if not request.user.may.read(line.pagename):
            continue
        if ((line.action[:4] != 'SAVE') or
            ((line.pagename in pages) and unique)): continue
        #if log.dayChanged() and log.daycount > _MAX_DAYS: break
        line.editor = line.getEditorData(request)[1]
        line.time = util.datetime.tmtuple(wikiutil.version2timestamp(line.ed_time_usecs)) # UTC
        logdata.append(line)
        pages[line.pagename] = None
        counter += 1
        if counter >= max_items:
            break
    del log

    # start SAX stream
    handler.startDocument()
    handler._out.write(
        '<!--\n'
        '    Add an "items=nnn" URL parameter to get more than the default 15 items.\n'
        '    You cannot get more than %d items though.\n'
        '    \n'
        '    Add "unique=1" to get a list of changes where page names are unique,\n'
        '    i.e. where only the latest change of each page is reflected.\n'
        '    \n'
        '    Add "diffs=1" to add change diffs to the description of each items.\n'
        '    \n'
        '    Add "ddiffs=1" to link directly to the diff (good for FeedReader).\n'
        '    Current settings: items=%i, unique=%i, diffs=%i, ddiffs=%i'
        '-->\n' % (items_limit, max_items, unique, diffs, ddiffs)
        )

    # emit channel description
    handler.startNode('channel', {
        (handler.xmlns['rdf'], 'about'): request.getBaseURL(),
        })
    handler.simpleNode('title', cfg.sitename)
    handler.simpleNode('link', interwiki + wikiutil.quoteWikinameURL(pagename))
    handler.simpleNode('description', 'RecentChanges at %s' % cfg.sitename)
    if logo:
        handler.simpleNode('image', None, {
            (handler.xmlns['rdf'], 'resource'): logo,
            })
    if cfg.interwikiname:
        handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)

    handler.startNode('items')
    handler.startNode(('rdf', 'Seq'))
    for item in logdata:
        link = "%s%s#%04d%02d%02d%02d%02d%02d" % ((interwiki,
                wikiutil.quoteWikinameURL(item.pagename),) + item.time[:6])
        handler.simpleNode(('rdf', 'li'), None, attr={
            (handler.xmlns['rdf'], 'resource'): link,
        })
    handler.endNode(('rdf', 'Seq'))
    handler.endNode('items')
    handler.endNode('channel')

    # emit logo data
    if logo:
        handler.startNode('image', attr={
            (handler.xmlns['rdf'], 'about'): logo,
            })
        handler.simpleNode('title', cfg.sitename)
        handler.simpleNode('link', interwiki)
        handler.simpleNode('url', logo)
        handler.endNode('image')

    # emit items
    for item in logdata:
        page = Page(request, item.pagename)
        link = interwiki + wikiutil.quoteWikinameURL(item.pagename)
        rdflink = "%s#%04d%02d%02d%02d%02d%02d" % ((link,) + item.time[:6])
        handler.startNode('item', attr={
            (handler.xmlns['rdf'], 'about'): rdflink,
        })

        # general attributes
        handler.simpleNode('title', item.pagename)
        if ddiffs:
            handler.simpleNode('link', link+"?action=diff")
        else:
            handler.simpleNode('link', link)
            
        handler.simpleNode(('dc', 'date'), util.W3CDate(item.time))

        # description
        desc_text = item.comment
        if diffs:
            # TODO: rewrite / extend wikiutil.pagediff
            # searching for the matching pages doesn't really belong here
            revisions = page.getRevList()

            rl = len(revisions)
            for idx in range(rl):
                rev = revisions[idx]
                if rev <= item.rev:
                    if idx+1 < rl:
                        lines = wikiutil.pagediff(request, item.pagename, revisions[idx+1], item.pagename, 0, ignorews=1)
                        if len(lines) > 20: lines = lines[:20] + ['...\n']
                        desc_text = '%s\n<pre>\n%s\n</pre>\n' % (desc_text, '\n'.join(lines))
                    break
        if desc_text:
            handler.simpleNode('description', desc_text)

        # contributor
        edattr = {}
        if cfg.show_hosts:
            edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
        if isinstance(item.editor, Page):
            edname = item.editor.page_name
            ##edattr[(None, 'link')] = interwiki + wikiutil.quoteWikiname(edname)
        else:
            edname = item.editor
            ##edattr[(None, 'link')] = link + "?action=info"
        
        # this edattr stuff, esp. None as first tuple element breaks things (tracebacks)
        # if you know how to do this right, please send us a patch
        
        handler.startNode(('dc', 'contributor'))
        handler.startNode(('rdf', 'Description'), attr=edattr)
        handler.simpleNode(('rdf', 'value'), edname)
        handler.endNode(('rdf', 'Description'))
        handler.endNode(('dc', 'contributor'))

        # wiki extensions
        handler.simpleNode(('wiki', 'version'), "%i" % (item.ed_time_usecs))
        handler.simpleNode(('wiki', 'status'), ('deleted', 'updated')[page.exists()])
        handler.simpleNode(('wiki', 'diff'), link + "?action=diff")
        handler.simpleNode(('wiki', 'history'), link + "?action=info")
        # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor ) 
        # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA ) 

        handler.endNode('item')

    # end SAX stream
    handler.endDocument()

    # send the generated XML document
    request.http_headers(["Content-Type: text/xml; charset=%s" % config.charset] + request.nocache)
    request.write(out.getvalue())
    request.finish()
    request.no_closing_html_code = 1
Пример #18
0
def test_page_change_message(request):
    page = Page(request, "FrontPage")

    print "Provided with a dumb change type argument, this should raise an exception!"
    py.test.raises(notification.UnknownChangeType, notification.page_change_message,
                   "StupidType", request, page, "en", revisions=page.getRevList())
Пример #19
0
def execute(pagename, request):
    """ Send recent changes as an RSS document
    """
    cfg = request.cfg

    # get params
    items_limit = 100
    try:
        max_items = int(request.values["items"])
        max_items = min(max_items, items_limit)  # not more than `items_limit`
    except (KeyError, ValueError):
        # not more than 15 items in a RSS file by default
        max_items = 15
    try:
        unique = int(request.values.get("unique", 0))
    except ValueError:
        unique = 0
    try:
        diffs = int(request.values.get("diffs", 0))
    except ValueError:
        diffs = 0
    ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003
    try:
        ddiffs = int(request.values.get("ddiffs", 0))
    except ValueError:
        ddiffs = 0

    urlfilter = request.values.get("filter")
    if urlfilter:
        urlfilter = re.compile(urlfilter)
    else:
        urlfilter = None

    # get data
    log = editlog.EditLog(request)
    logdata = []
    counter = 0
    pages = {}
    lastmod = 0
    for line in log.reverse():
        if urlfilter and not (urlfilter.match(line.pagename)):
            continue
        if not request.user.may.read(line.pagename):
            continue
        if not line.action.startswith("SAVE") or ((line.pagename in pages) and unique):
            continue
        # if log.dayChanged() and log.daycount > _MAX_DAYS: break
        line.editor = line.getInterwikiEditorData(request)
        line.time = timefuncs.tmtuple(wikiutil.version2timestamp(line.ed_time_usecs))  # UTC
        logdata.append(line)
        pages[line.pagename] = None

        if not lastmod:
            lastmod = wikiutil.version2timestamp(line.ed_time_usecs)

        counter += 1
        if counter >= max_items:
            break
    del log

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique)

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using whatever setting the admin
        # defined for suggested cache lifetime of the RecentChanges RSS doc
        expires = time.time() + cfg.rss_cache

        request.mimetype = "application/rss+xml"
        request.expires = expires
        request.last_modified = lastmod
        request.headers["Etag"] = etag

        # send the generated XML document
        baseurl = request.url_root

        logo = re.search(r'src="([^"]*)"', cfg.logo_string)
        if logo:
            logo = request.getQualifiedURL(logo.group(1))

        # prepare output
        output = structencoder(indent=u"yes")

        FEED_HEADER_COMMENT = """
<!--
    Add an "items=nnn" URL parameter to get more than the default 15 items.
    You cannot get more than %d items though.
    
    Add "unique=1" to get a list of changes where page names are unique,
    i.e. where only the latest change of each page is reflected.
    Add "diffs=1" to add change diffs to the description of each items.
    
    Add "ddiffs=1" to link directly to the diff (good for FeedReader).
    Current settings: items=%i, unique=%i, diffs=%i, ddiffs=%i
-->
        """ % (
            items_limit,
            max_items,
            unique,
            diffs,
            ddiffs,
        )

        # Feed envelope
        page = Page(request, pagename)
        f = output.cofeed(
            ROOT(NS(u"", ATOM_NAMESPACE), NS(u"wiki", RSSWIKI_NAMESPACE), E_CURSOR((ATOM_NAMESPACE, u"feed")))
        )
        f.send(E((ATOM_NAMESPACE, u"id"), full_url(request, page).encode(config.charset))),
        f.send(E((ATOM_NAMESPACE, u"title"), cfg.sitename.encode(config.charset))),
        f.send(E((ATOM_NAMESPACE, u"link"), {u"href": request.url_root.encode(config.charset)})),
        f.send(E((ATOM_NAMESPACE, u"summary"), ("RecentChanges at %s" % cfg.sitename).encode(config.charset))),
        # Icon
        # E((ATOM_NAMESPACE, u'link'), {u'href': logo.encode(config.charset)}),

        # if cfg.interwikiname:
        #    handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)

        for item in logdata:
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            page = Page(request, item.pagename)
            # link = full_url(request, page, anchor=anchor)
            if ddiffs:
                link = full_url(request, page, querystr={"action": "diff"})
            else:
                link = full_url(request, page)

            # description
            desc_text = item.comment
            if diffs:
                # TODO: rewrite / extend wikiutil.pagediff
                # searching for the matching pages doesn't really belong here
                revisions = page.getRevList()

                rl = len(revisions)
                for idx in range(rl):
                    rev = revisions[idx]
                    if rev <= item.rev:
                        if idx + 1 < rl:
                            lines = wikiutil.pagediff(
                                request, item.pagename, revisions[idx + 1], item.pagename, 0, ignorews=1
                            )
                            if len(lines) > 20:
                                lines = lines[:20] + ["...\n"]
                            lines = "\n".join(lines)
                            lines = wikiutil.escape(lines)
                            desc_text = "%s\n<pre>\n%s\n</pre>\n" % (desc_text, lines)
                        break
            # if desc_text:
            #    handler.simpleNode('description', desc_text)

            # contributor
            edattr = {}
            # if cfg.show_hosts:
            #    edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
            if item.editor[0] == "interwiki":
                edname = "%s:%s" % item.editor[1]
                ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
            else:  # 'ip'
                edname = item.editor[1]
                ##edattr[(None, 'link')] = link + "?action=info"

            history_link = full_url(request, page, querystr={"action": "info"})

            f.send(
                E(
                    (ATOM_NAMESPACE, u"entry"),
                    E((ATOM_NAMESPACE, u"id"), link.encode(config.charset)),
                    E((ATOM_NAMESPACE, u"title"), item.pagename.encode(config.charset)),
                    E((ATOM_NAMESPACE, u"updated"), timefuncs.W3CDate(item.time).encode(config.charset)),
                    E((ATOM_NAMESPACE, u"link"), {u"href": link.encode(config.charset)}),
                    E((ATOM_NAMESPACE, u"summary"), desc_text.encode(config.charset)),
                    E((ATOM_NAMESPACE, u"author"), E((ATOM_NAMESPACE, u"name"), edname.encode(config.charset))),
                    # E((ATOM_NAMESPACE, u'title'), item.pagename.encode(config.charset)),
                    # wiki extensions
                    E((RSSWIKI_NAMESPACE, u"wiki:version"), ("%i" % (item.ed_time_usecs)).encode(config.charset)),
                    E((RSSWIKI_NAMESPACE, u"wiki:status"), (u"deleted", u"updated")[page.exists()]),
                    E((RSSWIKI_NAMESPACE, u"wiki:diff"), link.encode(config.charset)),
                    E((RSSWIKI_NAMESPACE, u"wiki:history"), history_link.encode(config.charset)),
                    # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
                    # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )
                )
            )

        # emit logo data
        # if logo:
        #    handler.startNode('image', attr={
        #        (handler.xmlns['rdf'], 'about'): logo,
        #        })
        #    handler.simpleNode('title', cfg.sitename)
        #    handler.simpleNode('link', baseurl)
        #    handler.simpleNode('url', logo)
        #    handler.endNode('image')

        f.close()
        request.write(output.read())
Пример #20
0
def execute(pagename, request):
    """ Send recent changes as an RSS document
    """
    if not wikixml.ok:
        httpheaders = ["Content-Type: text/plain; charset=%s" % config.charset]
        request.emit_http_headers(httpheaders)
        request.write("rss_rc action is not supported because of missing pyxml module.")
        return

    cfg = request.cfg

    # get params
    items_limit = 100
    try:
        max_items = int(request.form['items'][0])
        max_items = min(max_items, items_limit) # not more than `items_limit`
    except (KeyError, ValueError):
        # not more than 15 items in a RSS file by default
        max_items = 15
    try:
        unique = int(request.form.get('unique', [0])[0])
    except ValueError:
        unique = 0
    try:
        diffs = int(request.form.get('diffs', [1])[0])
    except ValueError:
        diffs = 1 
    ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003
    try:
        ddiffs = int(request.form.get('ddiffs', [1])[0])
    except ValueError:
        ddiffs = 1 

    # get data
    log = editlog.EditLog(request)
    logdata = []
    counter = 0
    pages = {}
    lastmod = 0
    for line in log.reverse():
        if not request.user.may.read(line.pagename):
            continue
        if (not line.action.startswith('SAVE') or
            ((line.pagename in pages) and unique)): continue
        #if log.dayChanged() and log.daycount > _MAX_DAYS: break
        line.editor = line.getInterwikiEditorData(request)
        line.time = timefuncs.tmtuple(wikiutil.version2timestamp(line.ed_time_usecs)) # UTC
        logdata.append(line)
        pages[line.pagename] = None

        if not lastmod:
            lastmod = wikiutil.version2timestamp(line.ed_time_usecs)

        counter += 1
        if counter >= max_items:
            break
    del log

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique)

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.emit_http_headers(["Status: 304 Not modified"])
        else:
            request.emit_http_headers(["Status: 304 Not modified"])
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.emit_http_headers(["Status: 304 Not modified"])
        else:
            request.emit_http_headers(["Status: 304 Not modified"])
    else:
        # generate an Expires header, using whatever setting the admin
        # defined for suggested cache lifetime of the RecentChanges RSS doc
        expires = timefuncs.formathttpdate(time.time() + cfg.rss_cache)

        httpheaders = ["Content-Type: text/xml; charset=%s" % config.charset,
                       "Expires: %s" % expires,
                       "Last-Modified: %s" % timestamp,
                       "Etag: %s" % etag, ]

        # send the generated XML document
        request.emit_http_headers(httpheaders)

        baseurl = request.getBaseURL()
        if not baseurl.endswith('/'):
            baseurl += '/'

        logo = re.search(r'src="([^"]*)"', cfg.logo_string)
        if logo:
            logo = request.getQualifiedURL(logo.group(1))

        # prepare output
        out = StringIO.StringIO()
        handler = RssGenerator(out)

        # start SAX stream
        handler.startDocument()
        handler._out.write(
            '<!--\n'
            '    Add an "items=nnn" URL parameter to get more than the default 15 items.\n'
            '    You cannot get more than %d items though.\n'
            '    \n'
            '    Add "unique=1" to get a list of changes where page names are unique,\n'
            '    i.e. where only the latest change of each page is reflected.\n'
            '    \n'
            '    Add "diffs=0" to remove change diffs to the description of each items.\n'
            '    \n'
            '    Add "ddiffs=0" to link directly to the wikipage\n'
            '    Current settings: items=%i, unique=%i, diffs=%i, ddiffs=%i'
            '-->\n' % (items_limit, max_items, unique, diffs, ddiffs)
            )

        # emit channel description
        handler.startNode('channel', {
            (handler.xmlns['rdf'], 'about'): request.getBaseURL(),
            })
        handler.simpleNode('title', cfg.sitename)
        page = Page(request, pagename)
        handler.simpleNode('link', full_url(request, page))
        handler.simpleNode('description', 'RecentChanges at %s' % cfg.sitename)
        if logo:
            handler.simpleNode('image', None, {
                (handler.xmlns['rdf'], 'resource'): logo,
                })
        if cfg.interwikiname:
            handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)

        handler.startNode('items')
        handler.startNode(('rdf', 'Seq'))
        for item in logdata:
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            page = Page(request, item.pagename)
            link = full_url(request, page, anchor=anchor)
            handler.simpleNode(('rdf', 'li'), None, attr={(handler.xmlns['rdf'], 'resource'): link, })
        handler.endNode(('rdf', 'Seq'))
        handler.endNode('items')
        handler.endNode('channel')

        # emit logo data
        if logo:
            handler.startNode('image', attr={
                (handler.xmlns['rdf'], 'about'): logo,
                })
            handler.simpleNode('title', cfg.sitename)
            handler.simpleNode('link', baseurl)
            handler.simpleNode('url', logo)
            handler.endNode('image')

        # emit items
        for item in logdata:
            page = Page(request, item.pagename)
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            rdflink = full_url(request, page, anchor=anchor)
            handler.startNode('item', attr={(handler.xmlns['rdf'], 'about'): rdflink, })

            # general attributes
            handler.simpleNode('title', item.pagename)
            if ddiffs:
                handler.simpleNode('link', full_url(request, page, querystr={'action': 'diff'}))
            else:
                handler.simpleNode('link', full_url(request, page))

            handler.simpleNode(('dc', 'date'), timefuncs.W3CDate(item.time))

            # description
            if item.editor[0] == 'interwiki':
                edname = "%s:%s" % item.editor[1]
                ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
            else: # 'ip'
                edname = item.editor[1]
                ##edattr[(None, 'link')] = link + "?action=info"

            # this edattr stuff, esp. None as first tuple element breaks things (tracebacks)
            # if you know how to do this right, please send us a patch
            user = edname.split(':')[-1]
            user_link = full_url(request, Page(request, user))

            desc_text = 'Cambio por <a href="%s">%s</a> -- "%s"' % (user_link, user, item.comment)
            if diffs:
                # TODO: rewrite / extend wikiutil.pagediff
                # searching for the matching pages doesn't really belong here
                revisions = page.getRevList()

                rl = len(revisions)
                for idx in range(rl):
                    rev = revisions[idx]
                    if rev <= item.rev:
                        if idx + 1 < rl:
                            lines = wikiutil.pagediff(request, item.pagename, revisions[idx+1], item.pagename, 0, ignorews=1)
                            if len(lines) > 20:
                                lines = lines[:20] + ['... (Continua)\n']

                            # vamos a colorear las lineas!
                            fixed_lines = []
                            for line in lines:
                                line = wikiutil.escape(line)
                                if line.startswith('+'):
                                    line = "<font color='green'>%s</font>" % line
                                elif line.startswith('-'):
                                    line = "<font color='red'>%s</font>" % line
                                fixed_lines.append(line)
                            lines = fixed_lines    
                            lines = '\n'.join(lines)
                            desc_text = '%s\n<pre>\n%s\n</pre>\n' % (desc_text, lines)
                        break
            if desc_text:
                handler.simpleNode('description', desc_text)

            # contributor
            edattr = {}
            if cfg.show_hosts:
                edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
           
            handler.startNode(('dc', 'contributor'))
            handler.startNode(('rdf', 'Description'), attr=edattr)
            handler.simpleNode(('rdf', 'value'), edname)
            handler.endNode(('rdf', 'Description'))
            handler.endNode(('dc', 'contributor'))

            # wiki extensions
            handler.simpleNode(('wiki', 'version'), "%i" % (item.ed_time_usecs))
            handler.simpleNode(('wiki', 'status'), ('deleted', 'updated')[page.exists()])
            handler.simpleNode(('wiki', 'diff'), full_url(request, page, querystr={'action': 'diff'}))
            handler.simpleNode(('wiki', 'history'), full_url(request, page, querystr={'action': 'info'}))
            # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
            # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )

            handler.endNode('item')

        # end SAX stream
        handler.endDocument()

        request.write(out.getvalue())
Пример #21
0
def execute(pagename, request):
    """ Send recent changes as an RSS document
    """
    if not wikixml.ok:
        request.mimetype = 'text/plain'
        request.write(
            "rss_rc action is not supported because of missing pyxml module.")
        return

    cfg = request.cfg

    # get params
    items_limit = 100
    try:
        max_items = int(request.values['items'])
        max_items = min(max_items, items_limit)  # not more than `items_limit`
    except (KeyError, ValueError):
        # not more than 15 items in a RSS file by default
        max_items = 15
    try:
        unique = int(request.values.get('unique', 0))
    except ValueError:
        unique = 0
    try:
        diffs = int(request.values.get('diffs', 0))
    except ValueError:
        diffs = 0
    ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003
    try:
        ddiffs = int(request.values.get('ddiffs', 0))
    except ValueError:
        ddiffs = 0

    # get data
    log = editlog.EditLog(request)
    logdata = []
    counter = 0
    pages = {}
    lastmod = 0
    for line in log.reverse():
        if not request.user.may.read(line.pagename):
            continue
        if (not line.action.startswith('SAVE')
                or ((line.pagename in pages) and unique)):
            continue
        #if log.dayChanged() and log.daycount > _MAX_DAYS: break
        line.editor = line.getInterwikiEditorData(request)
        line.time = timefuncs.tmtuple(
            wikiutil.version2timestamp(line.ed_time_usecs))  # UTC
        logdata.append(line)
        pages[line.pagename] = None

        if not lastmod:
            lastmod = wikiutil.version2timestamp(line.ed_time_usecs)

        counter += 1
        if counter >= max_items:
            break
    del log

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique)

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using whatever setting the admin
        # defined for suggested cache lifetime of the RecentChanges RSS doc
        expires = time.time() + cfg.rss_cache

        request.mimetype = 'application/rss+xml'
        request.expires = expires
        request.last_modified = lastmod
        request.headers['Etag'] = etag

        # send the generated XML document
        baseurl = request.url_root

        logo = re.search(r'src="([^"]*)"', cfg.logo_string)
        if logo:
            logo = request.getQualifiedURL(logo.group(1))

        # prepare output
        out = StringIO.StringIO()
        handler = RssGenerator(out)

        # start SAX stream
        handler.startDocument()
        handler._out.write(
            '<!--\n'
            '    Add an "items=nnn" URL parameter to get more than the default 15 items.\n'
            '    You cannot get more than %d items though.\n'
            '    \n'
            '    Add "unique=1" to get a list of changes where page names are unique,\n'
            '    i.e. where only the latest change of each page is reflected.\n'
            '    \n'
            '    Add "diffs=1" to add change diffs to the description of each items.\n'
            '    \n'
            '    Add "ddiffs=1" to link directly to the diff (good for FeedReader).\n'
            '    Current settings: items=%i, unique=%i, diffs=%i, ddiffs=%i'
            '-->\n' % (items_limit, max_items, unique, diffs, ddiffs))

        # emit channel description
        handler.startNode('channel', {
            (handler.xmlns['rdf'], 'about'): request.url_root,
        })
        handler.simpleNode('title', cfg.sitename)
        page = Page(request, pagename)
        handler.simpleNode('link', full_url(request, page))
        handler.simpleNode('description', 'RecentChanges at %s' % cfg.sitename)
        if logo:
            handler.simpleNode('image', None, {
                (handler.xmlns['rdf'], 'resource'): logo,
            })
        if cfg.interwikiname:
            handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)

        handler.startNode('items')
        handler.startNode(('rdf', 'Seq'))
        for item in logdata:
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            page = Page(request, item.pagename)
            link = full_url(request, page, anchor=anchor)
            handler.simpleNode(('rdf', 'li'),
                               None,
                               attr={
                                   (handler.xmlns['rdf'], 'resource'): link,
                               })
        handler.endNode(('rdf', 'Seq'))
        handler.endNode('items')
        handler.endNode('channel')

        # emit logo data
        if logo:
            handler.startNode('image',
                              attr={
                                  (handler.xmlns['rdf'], 'about'): logo,
                              })
            handler.simpleNode('title', cfg.sitename)
            handler.simpleNode('link', baseurl)
            handler.simpleNode('url', logo)
            handler.endNode('image')

        # emit items
        for item in logdata:
            page = Page(request, item.pagename)
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            rdflink = full_url(request, page, anchor=anchor)
            handler.startNode('item',
                              attr={
                                  (handler.xmlns['rdf'], 'about'): rdflink,
                              })

            # general attributes
            handler.simpleNode('title', item.pagename)
            if ddiffs:
                handler.simpleNode(
                    'link', full_url(request,
                                     page,
                                     querystr={'action': 'diff'}))
            else:
                handler.simpleNode('link', full_url(request, page))

            handler.simpleNode(('dc', 'date'), timefuncs.W3CDate(item.time))

            # description
            desc_text = item.comment
            if diffs:
                # TODO: rewrite / extend wikiutil.pagediff
                # searching for the matching pages doesn't really belong here
                revisions = page.getRevList()

                rl = len(revisions)
                for idx in range(rl):
                    rev = revisions[idx]
                    if rev <= item.rev:
                        if idx + 1 < rl:
                            lines = wikiutil.pagediff(request,
                                                      item.pagename,
                                                      revisions[idx + 1],
                                                      item.pagename,
                                                      0,
                                                      ignorews=1)
                            if len(lines) > 20:
                                lines = lines[:20] + ['...\n']
                            lines = '\n'.join(lines)
                            lines = wikiutil.escape(lines)
                            desc_text = '%s\n<pre>\n%s\n</pre>\n' % (desc_text,
                                                                     lines)
                        break
            if desc_text:
                handler.simpleNode('description', desc_text)

            # contributor
            edattr = {}
            if cfg.show_hosts:
                edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
            if item.editor[0] == 'interwiki':
                edname = "%s:%s" % item.editor[1]
                ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
            else:  # 'ip'
                edname = item.editor[1]
                ##edattr[(None, 'link')] = link + "?action=info"

            # this edattr stuff, esp. None as first tuple element breaks things (tracebacks)
            # if you know how to do this right, please send us a patch

            handler.startNode(('dc', 'contributor'))
            handler.startNode(('rdf', 'Description'), attr=edattr)
            handler.simpleNode(('rdf', 'value'), edname)
            handler.endNode(('rdf', 'Description'))
            handler.endNode(('dc', 'contributor'))

            # wiki extensions
            handler.simpleNode(('wiki', 'version'),
                               "%i" % (item.ed_time_usecs))
            handler.simpleNode(('wiki', 'status'),
                               ('deleted', 'updated')[page.exists()])
            handler.simpleNode(('wiki', 'diff'),
                               full_url(request,
                                        page,
                                        querystr={'action': 'diff'}))
            handler.simpleNode(('wiki', 'history'),
                               full_url(request,
                                        page,
                                        querystr={'action': 'info'}))
            # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
            # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )

            handler.endNode('item')

        # end SAX stream
        handler.endDocument()

        request.write(out.getvalue())
Пример #22
0
def execute(pagename, request):
    """ Handle "action=diff"
        checking for either a "rev=formerrevision" parameter
        or rev1 and rev2 parameters
    """
    if not request.user.may.read(pagename):
        Page(request, pagename).send_page()
        return

    try:
        date = request.form['date'][0]
        try:
            date = long(date) # must be long for py 2.2.x
        except StandardError:
            date = 0
    except KeyError:
        date = 0

    try:
        rev1 = int(request.form.get('rev1', [-1])[0])
    except StandardError:
        rev1 = 0
    try:
        rev2 = int(request.form.get('rev2', [0])[0])
    except StandardError:
        rev2 = 0

    if rev1 == -1 and rev2 == 0:
        rev1 = request.rev
        if rev1 is None:
            rev1 = -1

    # spacing flag?
    ignorews = int(request.form.get('ignorews', [0])[0])

    _ = request.getText

    # get a list of old revisions, and back out if none are available
    currentpage = Page(request, pagename)
    currentrev = currentpage.current_rev()
    if currentrev < 2:
        request.theme.add_msg(_("No older revisions available!"), "error")
        currentpage.send_page()
        return

    if date: # this is how we get called from RecentChanges
        rev1 = 0
        log = editlog.EditLog(request, rootpagename=pagename)
        for line in log.reverse():
            if date >= line.ed_time_usecs and int(line.rev) != 99999999:
                rev1 = int(line.rev)
                break
        else:
            rev1 = 1
        rev2 = 0

    # Start output
    # This action generates content in the user language
    request.setContentLanguage(request.lang)

    request.emit_http_headers()
    request.theme.send_title(_('Diff for "%s"') % (pagename, ), pagename=pagename, allow_doubleclick=1)

    if rev1 > 0 and rev2 > 0 and rev1 > rev2 or rev1 == 0 and rev2 > 0:
        rev1, rev2 = rev2, rev1

    if rev1 == -1:
        oldrev = currentrev - 1
        oldpage = Page(request, pagename, rev=oldrev)
    elif rev1 == 0:
        oldrev = currentrev
        oldpage = currentpage
    else:
        oldrev = rev1
        oldpage = Page(request, pagename, rev=oldrev)

    if rev2 == 0:
        newrev = currentrev
        newpage = currentpage
    else:
        newrev = rev2
        newpage = Page(request, pagename, rev=newrev)

    edit_count = abs(newrev - oldrev)

    f = request.formatter
    request.write(f.div(1, id="content"))

    oldrev = oldpage.get_real_rev()
    newrev = newpage.get_real_rev()

    revlist = currentpage.getRevList()

    # code below assumes that the page exists and has at least
    # one revision in the revlist, just bail out if not. Users
    # shouldn't really run into this anyway.
    if not revlist:
        request.write(f.div(0)) # end content div
        request.theme.send_footer(pagename)
        request.theme.send_closing_html()
        return

    title = _('Differences between revisions %d and %d') % (oldrev, newrev)
    if edit_count > 1:
        title += ' ' + _('(spanning %d versions)') % (edit_count, )
    title = f.text(title)

    # Revision list starts from 2...
    if oldrev == min(revlist):
        disable_prev = u' disabled="disabled"'
    else:
        disable_prev = u''

    if newrev == max(revlist):
        disable_next = u' disabled="disabled"'
    else:
        disable_next = u''

    page_url = wikiutil.escape(currentpage.url(request), True)

    revert_html = ""
    if request.user.may.revert(pagename):
        revert_html = """
 <td style="border:0">
  <form action="%s" method="get">
   <div style="text-align:center">
    <input name="action" value="revert" type="hidden">
    <input name="rev" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 </td>
 """ % (page_url, rev2, _("Revert to this revision"), disable_next)

    navigation_html = """
<span class="diff-header">%s</span>
<table class="diff">
<tr>
 <td style="border:0">
  <form action="%s" method="get">
   <div style="text-align:left">
    <input name="action" value="diff" type="hidden">
    <input name="rev1" value="%d" type="hidden">
    <input name="rev2" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 </td>
 %s
 <td style="border:0">
  <form action="%s" method="get">
   <div style="text-align:right">
    <input name="action" value="diff" type="hidden">
    <input name="rev1" value="%d" type="hidden">
    <input name="rev2" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 </td>
</tr>
</table>
""" % (title,
       page_url, oldrev - 1, oldrev, _("Previous change"), disable_prev,
       revert_html,
       page_url, newrev, newrev + 1, _("Next change"), disable_next, )

    request.write(f.rawHTML(navigation_html))

    if request.user.show_fancy_diff:
        from MoinMoin.util import diff_html
        request.write(f.rawHTML(diff_html.diff(request, oldpage.get_raw_body(), newpage.get_raw_body())))
        newpage.send_page(count_hit=0, content_only=1, content_id="content-below-diff")
    else:
        from MoinMoin.util import diff_text
        lines = diff_text.diff(oldpage.getlines(), newpage.getlines())
        if not lines:
            msg = f.text(" - " + _("No differences found!"))
            if edit_count > 1:
                msg = msg + f.paragraph(1) + f.text(_('The page was saved %(count)d times, though!') % {
                    'count': edit_count}) + f.paragraph(0)
            request.write(msg)
        else:
            if ignorews:
                request.write(f.text(_('(ignoring whitespace)')), f.linebreak())
            else:
                qstr = {'action': 'diff', 'ignorews': '1', }
                if rev1:
                    qstr['rev1'] = str(rev1)
                if rev2:
                    qstr['rev2'] = str(rev2)
                request.write(f.paragraph(1), Page(request, pagename).link_to(request,
                    text=_('Ignore changes in the amount of whitespace'),
                    querystr=qstr, rel='nofollow'), f.paragraph(0))

            request.write(f.preformatted(1))
            for line in lines:
                if line[0] == "@":
                    request.write(f.rule(1))
                request.write(f.text(line + '\n'))
            request.write(f.preformatted(0))

    request.write(f.div(0)) # end content div
    request.theme.send_footer(pagename)
    request.theme.send_closing_html()