Beispiel #1
0
    def _load_group(self):
        request = self.request
        group_name = self.name

        page = Page(request, group_name)
        if page.exists():
            arena = 'pagegroups'
            key = wikiutil.quoteWikinameFS(group_name)
            cache = caching.CacheEntry(request,
                                       arena,
                                       key,
                                       scope='wiki',
                                       use_pickle=True)
            try:
                cache_mtime = cache.mtime()
                page_mtime = wikiutil.version2timestamp(page.mtime_usecs())
                # TODO: fix up-to-date check mtime granularity problems.
                #
                # cache_mtime is float while page_mtime is integer
                # The comparision needs to be done on the lowest type of both
                if int(cache_mtime) > int(page_mtime):
                    # cache is uptodate
                    return cache.content()
                else:
                    raise caching.CacheError
            except caching.CacheError:
                # either cache does not exist, is erroneous or not uptodate: recreate it
                members, member_groups = super(WikiGroup, self)._load_group()
                cache.update((members, member_groups))
                return members, member_groups
        else:
            raise GroupDoesNotExistError(group_name)
Beispiel #2
0
    def _load_group(self):
        request = self.request
        group_name = self.name

        page = Page(request, group_name)
        if page.exists():
            arena = 'pagegroups'
            key = wikiutil.quoteWikinameFS(group_name)
            cache = caching.CacheEntry(request, arena, key, scope='wiki', use_pickle=True)
            try:
                cache_mtime = cache.mtime()
                page_mtime = wikiutil.version2timestamp(page.mtime_usecs())
                # TODO: fix up-to-date check mtime granularity problems.
                #
                # cache_mtime is float while page_mtime is integer
                # The comparision needs to be done on the lowest type of both
                if int(cache_mtime) > int(page_mtime):
                    # cache is uptodate
                    return cache.content()
                else:
                    raise caching.CacheError
            except caching.CacheError:
                # either cache does not exist, is erroneous or not uptodate: recreate it
                members, member_groups = super(WikiGroup, self)._load_group()
                cache.update((members, member_groups))
                return members, member_groups
        else:
            raise GroupDoesNotExistError(group_name)
Beispiel #3
0
    def _load_dict(self):
        request = self.request
        dict_name = self.name

        page = Page(request, dict_name)
        if page.exists():
            arena = "pagedicts"
            key = wikiutil.quoteWikinameFS(dict_name)
            cache = caching.CacheEntry(request, arena, key, scope="wiki", use_pickle=True)
            try:
                cache_mtime = cache.mtime()
                page_mtime = wikiutil.version2timestamp(page.mtime_usecs())
                # TODO: fix up-to-date check mtime granularity problems.
                #
                # cache_mtime is float while page_mtime is integer
                # The comparision needs to be done on the lowest type of both
                if int(cache_mtime) > int(page_mtime):
                    # cache is uptodate
                    return cache.content()
                else:
                    raise caching.CacheError
            except caching.CacheError:
                # either cache does not exist, is erroneous or not uptodate: recreate it
                d = super(WikiDict, self)._load_dict()
                cache.update(d)
                return d
        else:
            raise DictDoesNotExistError(dict_name)
Beispiel #4
0
 def lastEditInfo(self, request=None):
     time = wikiutil.version2timestamp(self.last_modified())
     if request:
         time = request.user.getFormattedDateTime(time) # Use user time format
     else:
         time = datetime.datetime.fromtimestamp(time*1000*1000).strftime('%Y-%m-%d %H:%M:%S') # Use user time format
     return {'editor': self.user, 'time': time}
Beispiel #5
0
    def _addRevisionHistory(self, targetNode):
        """
        This will generate a revhistory element which it will populate with
        revision nodes. Each revision has the revnumber, date and author-
        initial elements, and if a comment was supplied, the comment element.

        The date elements format depends on the users settings, so it will
        be in the same format as the revision history as viewed in the
        page info on the wiki.

        The authorinitials will be the UserName or if it was an anonymous
        edit, then it will be the hostname/ip-address.

        The revision history of included documents is NOT included at the
        moment due to technical difficulties.
        """
        _ = self.request.getText
        log = editlog.EditLog(self.request, rootpagename=self.title)
        user_cache = {}

        history = tree.element(None, u"revhistory")

        # read in the complete log of this page
        for line in log.reverse():
            if not line.action in ("SAVE", "SAVENEW", "SAVE/REVERT", "SAVE/RENAME"):
                # Let's ignore adding of attachments
                continue
            revision = tree.element(None, u"revision")

            # Revision number (without preceeding zeros)
            self._addTextElem(revision, u"revnumber", line.rev.lstrip("0"))

            # Date of revision
            date_text = self.request.user.getFormattedDateTime(wikiutil.version2timestamp(line.ed_time_usecs))
            self._addTextElem(revision, u"date", date_text)

            # Author or revision
            if not (line.userid in user_cache):
                user_cache[line.userid] = user.User(self.request, line.userid, auth_method="text_docbook:740")
            author = user_cache[line.userid]
            if author and author.name:
                self._addTextElem(revision, u"authorinitials", author.name)
            else:
                self._addTextElem(revision, u"authorinitials", line.hostname)

            # Comment from author of revision
            comment = line.comment
            if not comment:
                if "/REVERT" in line.action:
                    comment = _("Revert to revision %(rev)d.") % {"rev": int(line.extra)}
                elif "/RENAME" in line.action:
                    comment = _("Renamed from '%(oldpagename)s'.") % {"oldpagename": line.extra}
            if comment:
                self._addTextElem(revision, u"revremark", comment)

            history.xml_append(revision)

        if history.xml_first_child:
            # only add revision history is there is history to add
            targetNode.xml_append(history)
Beispiel #6
0
    def packagePages(self, pagelist, filename, function):
        """ Puts pages from pagelist into filename and calls function on them on installation. """
        request = self.request
        try:
            os.remove(filename)
        except OSError:
            pass
        zf = zipfile.ZipFile(filename, "w", COMPRESSION_LEVEL)

        cnt = 0
        script = [packLine(['MoinMoinPackage', '1']), ]

        for pagename in pagelist:
            pagename = pagename.strip()
            page = Page(request, pagename)
            if page.exists():
                cnt += 1
                script.append(packLine([function, str(cnt), pagename]))
                timestamp = wikiutil.version2timestamp(page.mtime_usecs())
                zi = zipfile.ZipInfo(filename=str(cnt), date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
                zi.compress_type = COMPRESSION_LEVEL
                zf.writestr(zi, page.get_raw_body().encode("utf-8"))
            else:
                #import sys
                #print >>sys.stderr, "Could not find the page %s." % pagename.encode("utf-8")
                pass

        script += [packLine(['Print', 'Installed MoinMaster page bundle %s.' % os.path.basename(filename)])]

        zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
        zf.close()
Beispiel #7
0
    def xmlrpc_getPageInfoVersion(self, pagename, rev):
        """
        Return page information for specific revision

        @param pagename: the name of the page (utf-8)
        @param rev: revision to get info about (int)
        @rtype: dict
        @return: page information
            * name (string): the canonical page name, UTF-8.
            * lastModified (date): Last modification date, UTC.
            * author (string): author name, UTF-8.
            * version (int): current version

        """
        pn = self._instr(pagename)

        # User may read this page?
        if not self.request.user.may.read(pn):
            return self.notAllowedFault()

        if rev is not None:
            page = Page(self.request, pn, rev=rev)
        else:
            page = Page(self.request, pn)
            rev = page.current_rev()

        # Non existing page?
        if not page.exists():
            return self.noSuchPageFault()

        # Get page info
        edit_info = page.edit_info()
        if not edit_info:
            return self.noLogEntryFault()

        mtime = wikiutil.version2timestamp(long(
            edit_info['timestamp']))  # must be long for py 2.2.x
        gmtuple = tuple(time.gmtime(mtime))

        version = rev  # our new rev numbers: 1,2,3,4,....

        #######################################################################
        # BACKWARDS COMPATIBILITY CODE - remove when 1.2.x is regarded stone age
        # as we run a feed for BadContent on MoinMaster, we want to stay
        # compatible here for a while with 1.2.x moins asking us for BadContent
        # 1.3 uses the lastModified field for checking for updates, so it
        # should be no problem putting the old UNIX timestamp style of version
        # number in the version field
        if self.request.cfg.sitename == 'MoinMaster' and pagename == 'BadContent':
            version = int(mtime)
        #######################################################################

        return {
            'name': self._outstr(page.page_name),
            'lastModified': xmlrpclib.DateTime(gmtuple),
            'author': self._outstr(edit_info['editor']),
            'version': version,
        }
Beispiel #8
0
    def collectpackage(self, pagelist, fileobject, pkgname="", include_attachments=False):
        """ Expects a list of pages as an argument, and fileobject to be an open
        file object, which a zipfile will get written to.

        @param pagelist: pages to package
        @param fileobject: open file object to write to
        @param pkgname: optional file name, to prevent self packaging
        @rtype: string or None
        @return: error message, if one happened
        @rtype: boolean
        @param include_attachments: True if you want attachments collected
        """
        _ = self.request.getText
        COMPRESSION_LEVEL = zipfile.ZIP_DEFLATED

        pages = []
        for pagename in pagelist:
            pagename = wikiutil.normalize_pagename(pagename, self.request.cfg)
            if pagename:
                page = Page(self.request, pagename)
                if page.exists() and self.request.user.may.read(pagename):
                    pages.append(page)
        if not pages:
            return (_('No pages like "%s"!') % wikiutil.escape(pagelist))

        # Set zipfile output
        zf = zipfile.ZipFile(fileobject, "w", COMPRESSION_LEVEL)

        cnt = 0
        userid = user.getUserIdentification(self.request)
        script = [packLine(['MoinMoinPackage', '1']), ]

        for page in pages:
            cnt += 1
            files = _get_files(self.request, page.page_name)
            script.append(packLine(["AddRevision", str(cnt), page.page_name, userid, "Created by the PackagePages action."]))

            timestamp = wikiutil.version2timestamp(page.mtime_usecs())

            # avoid getting strange exceptions from zipfile in case of pre-1980 timestamps
            nineteeneighty = (10 * 365 + 3) * 24 * 3600 # 1970 + 10y + 3d
            timestamp = max(nineteeneighty, timestamp) # zip can not store timestamps before 1980

            zi = zipfile.ZipInfo(filename=str(cnt), date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
            zi.compress_type = COMPRESSION_LEVEL
            zf.writestr(zi, page.get_raw_body().encode("utf-8"))
            if include_attachments:
                for attname in files:
                    if attname != pkgname:
                        cnt += 1
                        zipname = "%d_attachment" % cnt
                        script.append(packLine(["AddAttachment", zipname, attname, page.page_name, userid, "Created by the PackagePages action."]))
                        filename = AttachFile.getFilename(self.request, page.page_name, attname)
                        zf.write(filename, zipname)
        script += [packLine(['Print', 'Thank you for using PackagePages!'])]

        zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
        zf.close()
    def xmlrpc_getPageInfoVersion(self, pagename, rev):
        """
        Return page information for specific revision

        @param pagename: the name of the page (utf-8)
        @param rev: revision to get info about (int)
        @rtype: dict
        @return: page information
            * name (string): the canonical page name, UTF-8.
            * lastModified (date): Last modification date, UTC.
            * author (string): author name, UTF-8.
            * version (int): current version

        """
        pn = self._instr(pagename)

        # User may read this page?
        if not self.request.user.may.read(pn):
            return self.notAllowedFault()

        if rev is not None:
            page = Page(self.request, pn, rev=rev)
        else:
            page = Page(self.request, pn)
            rev = page.current_rev()

        # Non existing page?
        if not page.exists():
            return self.noSuchPageFault()

        # Get page info
        edit_info = page.edit_info()
        if not edit_info:
            return self.noLogEntryFault()

        mtime = wikiutil.version2timestamp(long(edit_info['timestamp'])) # must be long for py 2.2.x
        gmtuple = tuple(time.gmtime(mtime))

        version = rev # our new rev numbers: 1,2,3,4,....

        #######################################################################
        # BACKWARDS COMPATIBILITY CODE - remove when 1.2.x is regarded stone age
        # as we run a feed for BadContent on MoinMaster, we want to stay
        # compatible here for a while with 1.2.x moins asking us for BadContent
        # 1.3 uses the lastModified field for checking for updates, so it
        # should be no problem putting the old UNIX timestamp style of version
        # number in the version field
        if self.request.cfg.sitename == 'MoinMaster' and pagename == 'BadContent':
            version = int(mtime)
        #######################################################################

        return {
            'name': self._outstr(page.page_name),
            'lastModified': xmlrpclib.DateTime(gmtuple),
            'author': self._outstr(edit_info['editor']),
            'version': version,
            }
Beispiel #10
0
    def packagePages(self, pagelist, filename, function):
        """ Puts pages from pagelist into filename and calls function on them on installation. """
        request = self.request
        try:
            os.remove(filename)
        except OSError:
            pass
        # page LanguageSetup needs no packing!
        existing_pages = [
            pagename for pagename in pagelist
            if Page(request, pagename).exists() and pagename != 'LanguageSetup'
        ]
        if not existing_pages:
            return

        zf = zipfile.ZipFile(filename, "w", COMPRESSION_LEVEL)

        script = [
            packLine(['MoinMoinPackage', '1']),
        ]

        cnt = 0
        for pagename in existing_pages:
            pagename = pagename.strip()
            page = Page(request, pagename)
            files = _get_files(request, pagename)
            for attname in files:
                cnt += 1
                zipname = "%d" % cnt
                script.append(
                    packLine([
                        "ReplaceUnderlayAttachment", zipname, attname, pagename
                    ]))
                attpath = AttachFile.getFilename(request, pagename, attname)
                zf.write(attpath, zipname)

            cnt += 1
            zipname = "%d" % cnt
            script.append(packLine([function, zipname, pagename]))
            timestamp = wikiutil.version2timestamp(page.mtime_usecs())
            zi = zipfile.ZipInfo(
                filename=zipname,
                date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
            zi.compress_type = COMPRESSION_LEVEL
            zf.writestr(zi, page.get_raw_body().encode("utf-8"))

        script += [
            packLine([
                'Print',
                'Installed MoinMaster page bundle %s.' %
                os.path.basename(filename)
            ])
        ]

        zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
        zf.close()
Beispiel #11
0
    def xmlrpc_getRecentChanges(self, date):
        """
        Get RecentChanges since date

        @param date: date since when rc will be listed
        @rtype: list
        @return: a list of changed pages since date, which should be in
            UTC. The result is a list, where each element is a struct:
            * name (string) :
                Name of the page. The name is in UTF-8.
            * lastModified (date) :
                Date of last modification, in UTC.
            * author (string) :
                Name of the author (if available). UTF-8.
            * version (int) :
                Current version.
        """

        return_items = []

        edit_log = editlog.EditLog(self.request)
        for log in edit_log.reverse():
            # get last-modified UTC (DateTime) from log
            gmtuple = tuple(
                time.gmtime(wikiutil.version2timestamp(log.ed_time_usecs)))
            lastModified_date = xmlrpclib.DateTime(gmtuple)

            # skip if older than "date"
            if lastModified_date < date:
                break

            # skip if knowledge not permitted
            if not self.request.user.may.read(log.pagename):
                continue

            # get page name (str) from log
            pagename_str = self._outstr(log.pagename)

            # get user name (str) from log
            author_str = log.hostname
            if log.userid:
                userdata = user.User(self.request, log.userid)
                if userdata.name:
                    author_str = userdata.name
            author_str = self._outstr(author_str)

            return_item = {
                'name': pagename_str,
                'lastModified': lastModified_date,
                'author': author_str,
                'version': int(log.rev)
            }
            return_items.append(return_item)

        return return_items
Beispiel #12
0
    def flush_pages(pages):
        # new day reached: print out stuff

        #pages.sort(lambda p, q: cmp(q.time_tuple, p.time_tuple))

        date = request.user.getFormattedDate(wikiutil.version2timestamp(pages[0].ed_time_usecs))
        output.append(u'<h5>%s</h5><ul>' % date)

        for p in pages:
            output.append(format_page_edits(macro, p))

        output.append(u'</ul>')
Beispiel #13
0
def logchain2(request, log1, log2):
    n1_next = None
    n2_next = None
    count = 0
    while True:
        count += 1
        try:
            n1 = n1_next or next(log1)
        except StopIteration:
            n1 = None
        try:
            n2 = n2_next or next(log2)
        except StopIteration:
            n2 = None

        n1_next = n1
        n2_next = n2
        # print count, n1_next, n1.pagename, n2_next, n2.pagename

        if n1 is None and n2 is None:
            break
        elif n1 is not None and n2 is not None:
            t1 = request.user.getTime(wikiutil.version2timestamp(n1.ed_time_usecs))[:5]
            t2 = request.user.getTime(wikiutil.version2timestamp(n2.ed_time_usecs))[:5]
            if t1 > t2:
                n1_next = None
                yield n1
            elif t1 == t2: #same item, discard one.
                n1_next = None
                n2_next = None
                yield n1
            else:
                n2_next = None
                yield n2
        elif n1 is None:
            n2_next = None
            yield n2
        else:
            n1_next = None
            yield n1
    def xmlrpc_getRecentChanges(self, date):
        """
        Get RecentChanges since date

        @param date: date since when rc will be listed
        @rtype: list
        @return: a list of changed pages since date, which should be in
            UTC. The result is a list, where each element is a struct:
            * name (string) :
                Name of the page. The name is in UTF-8.
            * lastModified (date) :
                Date of last modification, in UTC.
            * author (string) :
                Name of the author (if available). UTF-8.
            * version (int) :
                Current version.
        """

        return_items = []

        edit_log = editlog.EditLog(self.request)
        for log in edit_log.reverse():
            # get last-modified UTC (DateTime) from log
            gmtuple = tuple(time.gmtime(wikiutil.version2timestamp(log.ed_time_usecs)))
            lastModified_date = xmlrpclib.DateTime(gmtuple)

            # skip if older than "date"
            if lastModified_date < date:
                break

            # skip if knowledge not permitted
            if not self.request.user.may.read(log.pagename):
                continue

            # get page name (str) from log
            pagename_str = self._outstr(log.pagename)

            # get user name (str) from log
            author_str = log.hostname
            if log.userid:
                userdata = user.User(self.request, log.userid)
                if userdata.name:
                    author_str = userdata.name
            author_str = self._outstr(author_str)

            return_item = {'name': pagename_str,
                           'lastModified': lastModified_date,
                           'author': author_str,
                           'version': int(log.rev) }
            return_items.append(return_item)

        return return_items
Beispiel #15
0
def logchain(request, log1):

    # inv_map = {}
    # for k, v in request.cfg.routes.iteritems():
    #     inv_map[v] = inv_map.get(v, [])
    #     inv_map[v].append(k)

    logs = [(log1, 'wiki'),] # default wiki recent-logs
    for name, storage in request.storage.iteritems():
        data = storage.history(request)
        # name_filter_re = inv_map[name]
        logs.append((data,name))

    # next_data_checker
    next_data = []
    for a in range(len(logs)):
        next_data.append(None)

    while True:
        for idx, packed in enumerate(logs):
            # print packed
            s, storage_name = packed
            try:
                if next_data[idx]: pass
                else:
                    next_data[idx] = next(s) # if next_data is None, get the next data using 'next(s)'
                    while True:
                        mt = get_middleware_type(request, next_data[idx].pagename)
                        # mt = Page(request, next_data[idx].pagename).middleware_type()
                        if mt == storage_name:
                            break
                        else:
                            next_data[idx] = next(s)
            except StopIteration:
                next_data[idx] = None

        if not max(next_data): # all is None
            break

        # pick the latest log among storages
        times = []
        for s in next_data:
            if s is None: times.append(0)
            else:
                times.append(request.user.getTime(wikiutil.version2timestamp(s.ed_time_usecs))[:5])

        mtime = max(times)
        idx = times.index(mtime)
        ydata = next_data[idx]
        next_data[idx] = None # invalidate
        yield ydata
Beispiel #16
0
def execute(pagename, request):
    log = editlog.EditLog(request)
    try:
        lastmod = wikiutil.version2timestamp(log.date())
    except:
        lastmod = 0

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d" % lastmod

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.emit_http_headers(["Status: 304 Not modified"])
        else:
            request.emit_http_headers(["Status: 304 Not modified"])
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.emit_http_headers(["Status: 304 Not modified"])
        else:
            request.emit_http_headers(["Status: 304 Not modified"])
    else:
        # generate an Expires header, using 1d cache lifetime of sisterpages list
        expires = timefuncs.formathttpdate(time.time() + 24 * 3600)

        httpheaders = [
            "Content-Type: text/plain; charset=UTF-8",
            "Expires: %s" % expires,
            "Last-Modified: %s" % timestamp,
            "Etag: %s" % etag,
        ]

        # send the generated XML document
        request.emit_http_headers(httpheaders)

        baseurl = request.getBaseURL()
        if not baseurl.endswith("/"):
            baseurl += "/"

        # Get list of user readable pages
        pages = request.rootpage.getPageList()
        pages.sort()
        for pn in pages:
            p = Page(request, pn)
            entry = u"%s %s\r\n" % (request.getQualifiedURL(p.url(request)), p.page_name)
            request.write(entry.encode("utf-8"))
Beispiel #17
0
def getPageListFromLog (request):
    this_day = request.user.getTime(time.time())[0:3]
    log = editlog.EditLog(request)
    pages = {}
    pagelist = []
    ignore_pages = {}
    day_count = 0

    for line in log.reverse():
        if not request.user.may.read(line.pagename):
            continue

        line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs))
        day = line.time_tuple[0:3]

        if ((this_day != day or (not _MAX_DAYS))) and len(pages) > 0:
            # new day or bookmark reached: print out stuff
            this_day = day
            for page in pages:
                ignore_pages[page] = None

            for page in pages.values():
                pagelist.append(page[0].pagename)

            pages = {}
            day_count += 1
            if _MAX_DAYS and (day_count >= _MAX_DAYS):
                break

        elif this_day != day:
            # new day but no changes
            this_day = day

        if ignore_pages.has_key(line.pagename):
            continue

        # end listing by default if user has a bookmark and we reached it
        if not _MAX_DAYS:
            break

        if pages.has_key(line.pagename):
            pages[line.pagename].append(line)
        else:
            pages[line.pagename] = [line]
    else:
        if len(pages) > 0:
            for page in pages.values():
                pagelist.append(page[0].pagename)
    return pagelist
Beispiel #18
0
 def create_package(self, script, page=None):
     # creates the package example zip file
     userid = user.getUserIdentification(self.request)
     COMPRESSION_LEVEL = zipfile.ZIP_DEFLATED
     zip_file = tempfile.mkstemp(suffix='.zip')[1]
     zf = zipfile.ZipFile(zip_file, "w", COMPRESSION_LEVEL)
     if page:
         timestamp = wikiutil.version2timestamp(page.mtime_usecs())
         zi = zipfile.ZipInfo(filename="1", date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
         zi.compress_type = COMPRESSION_LEVEL
         zf.writestr(zi, page.get_raw_body().encode("utf-8"))
     zf.writestr("1_attachment", "sample attachment")
     zf.writestr(MOIN_PACKAGE_FILE, script.encode("utf-8"))
     zf.close()
     return zip_file
Beispiel #19
0
    def packagePages(self, pagelist, filename, function):
        """ Puts pages from pagelist into filename and calls function on them on installation. """
        request = self.request
        try:
            os.remove(filename)
        except OSError:
            pass
        # page LanguageSetup needs no packing!
        existing_pages = [
            pagename for pagename in pagelist if Page(request, pagename).exists() and pagename != "LanguageSetup"
        ]
        if not existing_pages:
            return

        zf = zipfile.ZipFile(filename, "w", COMPRESSION_LEVEL)

        script = [packLine(["MoinMoinPackage", "1"])]

        fallback_timestamp = int(time.time())

        cnt = 0
        for pagename in existing_pages:
            pagename = pagename.strip()
            page = Page(request, pagename)
            files = _get_files(request, pagename)
            for attname in files:
                cnt += 1
                zipname = "%d" % cnt
                script.append(packLine(["ReplaceUnderlayAttachment", zipname, attname, pagename]))
                attpath = AttachFile.getFilename(request, pagename, attname)
                zf.write(attpath, zipname)

            cnt += 1
            zipname = "%d" % cnt
            script.append(packLine([function, zipname, pagename]))
            timestamp = wikiutil.version2timestamp(page.mtime_usecs())
            if not timestamp:
                # page.mtime_usecs() returns 0 for underlay pages
                timestamp = fallback_timestamp
            dt = datetime.fromtimestamp(timestamp)
            zi = zipfile.ZipInfo(filename=zipname, date_time=dt.timetuple()[:6])
            zi.compress_type = COMPRESSION_LEVEL
            zf.writestr(zi, page.get_raw_body().encode("utf-8"))

        script += [packLine(["Print", "Installed MoinMaster page bundle %s." % os.path.basename(filename)])]

        zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
        zf.close()
    def _readLockFile(self):
        """Load lock info if not yet loaded."""
        _ = self._
        self.owner = None
        self.owner_html = wikiutil.escape(_("<unknown>"))
        self.timestamp = 0

        if self.locktype:
            try:
                entry = editlog.EditLog(self.request, filename=self._filename()).next()
            except StopIteration:
                entry = None
                                                    
            if entry:
                self.owner = entry.userid or entry.addr
                self.owner_html = entry.getEditor(self.request)
                self.timestamp = wikiutil.version2timestamp(entry.ed_time_usecs)
 def create_package(self, script, page=None):
     # creates the package example zip file
     userid = user.getUserIdentification(self.request)
     COMPRESSION_LEVEL = zipfile.ZIP_DEFLATED
     zip_file = tempfile.mkstemp(suffix='.zip')[1]
     zf = zipfile.ZipFile(zip_file, "w", COMPRESSION_LEVEL)
     if page:
         timestamp = wikiutil.version2timestamp(page.mtime_usecs())
         zi = zipfile.ZipInfo(
             filename="1",
             date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
         zi.compress_type = COMPRESSION_LEVEL
         zf.writestr(zi, page.get_raw_body().encode("utf-8"))
     zf.writestr("1_attachment", "sample attachment")
     zf.writestr(MOIN_PACKAGE_FILE, script.encode("utf-8"))
     zf.close()
     return zip_file
Beispiel #22
0
def show_pages(request, pagename, editor, timestamp):
    _ = request.getText

    timestamp = int(timestamp * 1000000)
    log = editlog.EditLog(request)
    pages = {}
    #  mimic macro object for use of RecentChanges subfunctions
    macro = tmp()
    macro.request = request
    macro.formatter = request.html_formatter

    request.write("<table>")
    for line in log.reverse():
        if line.ed_time_usecs < timestamp:
            break

        if not request.user.may.read(line.pagename):
            continue

        if not line.pagename in pages:
            pages[line.pagename] = 1
            if repr(line.getInterwikiEditorData(request)) == editor:
                line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs))
                request.write(RecentChanges.format_page_edits(macro, [line], timestamp))

    request.write(
        """
</table>
<p>
<form method="post" action="%(url)s">
<input type="hidden" name="action" value="Despam">
<input type="hidden" name="ticket" value="%(ticket)s">
<input type="hidden" name="editor" value="%(editor)s">
<input type="submit" name="ok" value="%(label)s">
</form>
</p>
"""
        % dict(
            url=request.href(pagename),
            ticket=wikiutil.createTicket(request),
            editor=wikiutil.url_quote(editor),
            label=_("Revert all!"),
        )
    )
Beispiel #23
0
def execute(pagename, request):
    log = editlog.EditLog(request)
    try:
        lastmod = wikiutil.version2timestamp(log.date())
    except:
        lastmod = 0

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d" % lastmod

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using 1d cache lifetime of sisterpages list
        expires = time.time() + 24 * 3600

        request.mimetype = 'text/plain'
        request.expires = expires
        request.last_modified = timestamp
        request.headers['Etag'] = etag

        # send the generated XML document
        # Get list of user readable pages
        pages = request.rootpage.getPageList()
        pages.sort()
        for pn in pages:
            p = Page(request, pn)
            entry = u"%s %s\r\n" % (request.getQualifiedURL(
                p.url(request)), p.page_name)
            request.write(entry.encode('utf-8'))
Beispiel #24
0
def execute(pagename, request):
    log = editlog.EditLog(request)
    try:
        lastmod = wikiutil.version2timestamp(log.date())
    except:
        lastmod = 0

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d" % lastmod

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using 1d cache lifetime of sisterpages list
        expires = time.time() + 24*3600

        request.mimetype = 'text/plain'
        request.expires = expires
        request.last_modified = timestamp
        request.headers['Etag'] = etag

        # send the generated XML document
        # Get list of user readable pages
        pages = request.rootpage.getPageList()
        pages.sort()
        for pn in pages:
            p = Page(request, pn)
            entry = u"%s %s\r\n" % (request.getQualifiedURL(p.url(request)), p.page_name)
            request.write(entry.encode('utf-8'))
Beispiel #25
0
def show_pages(request, pagename, editor, timestamp):
    _ = request.getText

    timestamp = int(timestamp * 1000000)
    log = editlog.EditLog(request)
    pages = {}
    #  mimic macro object for use of RecentChanges subfunctions
    macro = tmp()
    macro.request = request
    macro.formatter = request.html_formatter

    request.write("<table>")
    for line in log.reverse():
        if line.ed_time_usecs < timestamp:
            break

        if not request.user.may.read(line.pagename):
            continue

        if not line.pagename in pages:
            pages[line.pagename] = 1
            if repr(line.getInterwikiEditorData(request)) == editor:
                line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs))
                request.write(RecentChanges.format_page_edits(macro, [line], timestamp))

    request.write('''
</table>
<p>
<form method="post" action="%(url)s">
<input type="hidden" name="action" value="Despam">
<input type="hidden" name="ticket" value="%(ticket)s">
<input type="hidden" name="editor" value="%(editor)s">
<input type="submit" name="ok" value="%(label)s">
</form>
</p>
''' % dict(
        url=request.href(pagename),
        ticket=wikiutil.createTicket(request),
        editor=wikiutil.url_quote(editor),
        label=_("Revert all!"),
    ))
Beispiel #26
0
def execute(pagename, request):
    """ Send recent changes as an RSS document
    """
    if not wikixml.ok:
        request.mimetype = 'text/plain'
        request.write(
            "rss_rc action is not supported because of missing pyxml module.")
        return
    if request.isSpiderAgent:  # reduce bot cpu usage
        return ''

    cfg = request.cfg
    _ = request.getText

    # get params
    def_max_items = max_items = cfg.rss_items_default
    items_limit = cfg.rss_items_limit
    unique = cfg.rss_unique
    diffs = cfg.rss_diffs
    ddiffs = cfg.rss_ddiffs
    max_lines = cfg.rss_lines_default
    lines_limit = cfg.rss_lines_limit
    show_att = cfg.rss_show_attachment_entries
    page_pattern = cfg.rss_page_filter_pattern

    try:
        max_items = min(int(request.values.get('items', max_items)),
                        items_limit)
    except ValueError:
        pass
    try:
        unique = int(request.values.get('unique', unique))
    except ValueError:
        pass
    try:
        diffs = int(request.values.get('diffs', diffs))
    except ValueError:
        pass
    ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003
    try:
        ddiffs = int(request.values.get('ddiffs', ddiffs))
    except ValueError:
        pass
    try:
        max_lines = min(int(request.values.get('lines', max_lines)),
                        lines_limit)
    except ValueError:
        pass
    try:
        show_att = int(request.values.get('show_att', show_att))
    except ValueError:
        pass
    try:
        page_pattern = request.values.get('page', page_pattern)
    except ValueError:
        pass

    # if we are just interested in a specific page, using the local edit-log
    # of that page is much faster than the global one - esp. if the page was
    # NOT recently changed and the global edit-log is rather big.
    kw = dict(rootpagename=page_pattern) if is_single_page_match(
        page_pattern) else {}
    log = editlog.EditLog(request, **kw)
    logdata = []
    counter = 0
    pages = {}
    lastmod = 0
    for line in log.reverse():
        if not request.user.may.read(line.pagename):
            continue
        if ((not show_att and not line.action.startswith('SAVE'))
                or ((line.pagename in pages) and unique)
                or not match_page(line.pagename, page_pattern)):
            continue
        line.editor = line.getInterwikiEditorData(request)
        line.time = timefuncs.tmtuple(
            wikiutil.version2timestamp(line.ed_time_usecs))  # UTC
        logdata.append(line)
        pages[line.pagename] = None

        if not lastmod:
            lastmod = wikiutil.version2timestamp(line.ed_time_usecs)

        counter += 1
        if counter >= max_items:
            break
    del log

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d-%d-%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique,
                                     max_lines, show_att)

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using whatever setting the admin
        # defined for suggested cache lifetime of the RecentChanges RSS doc
        expires = time.time() + cfg.rss_cache

        request.mimetype = 'application/rss+xml'
        request.expires = expires
        request.last_modified = lastmod
        request.headers['Etag'] = etag

        # send the generated XML document
        baseurl = request.url_root

        logo = re.search(r'src="([^"]*)"', cfg.logo_string)
        if logo:
            logo = request.getQualifiedURL(logo.group(1))

        # prepare output
        out = StringIO.StringIO()
        handler = RssGenerator(out)

        # start SAX stream
        handler.startDocument()
        handler.write(
            u'<!--\n'
            u'    Add an "items=nnn" URL parameter to get more than the \n'
            u'    default %(def_max_items)d items. You cannot get more than \n'
            u'    %(items_limit)d items though.\n'
            u'    \n'
            u'    Add "unique=1" to get a list of changes where page names are unique,\n'
            u'    i.e. where only the latest change of each page is reflected.\n'
            u'    \n'
            u'    Add "diffs=1" to add change diffs to the description of each items.\n'
            u'    \n'
            u'    Add "ddiffs=1" to link directly to the diff (good for FeedReader).\n'
            u'    \n'
            u'    Add "lines=nnn" to change maximum number of diff/body lines \n'
            u'    to show. Cannot be more than %(lines_limit)d.\n'
            u'    \n'
            u'    Add "show_att=1" to show items related to attachments.\n'
            u'    \n'
            u'    Add "page=pattern" to show feed only for specific pages.\n'
            u'    Pattern can be empty (it would match to all pages), \n'
            u'    can start with circumflex (it would be interpreted as \n'
            u'    regular expression in this case), end with slash (for \n'
            u'    getting feed for page tree) or point to specific page (if \n'
            u'    none of the above can be applied).\n'
            u'    \n'
            u'    Current settings: items=%(max_items)i, unique=%(unique)i, \n'
            u'    diffs=%(diffs)i, ddiffs=%(ddiffs)i, lines=%(max_lines)i, \n'
            u'    show_att=%(show_att)i\n'
            u'-->\n' % locals())

        # emit channel description
        handler.startNode('channel', {
            (handler.xmlns['rdf'], 'about'): request.url_root,
        })
        handler.simpleNode('title', cfg.sitename)
        page = Page(request, pagename)
        handler.simpleNode('link', full_url(request, page))
        handler.simpleNode('description',
                           u'RecentChanges at %s' % cfg.sitename)
        if logo:
            handler.simpleNode('image', None, {
                (handler.xmlns['rdf'], 'resource'): logo,
            })
        if cfg.interwikiname:
            handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)

        handler.startNode('items')
        handler.startNode(('rdf', 'Seq'))
        for item in logdata:
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            page = Page(request, item.pagename)
            link = full_url(request, page, anchor=anchor)
            handler.simpleNode(('rdf', 'li'),
                               None,
                               attr={
                                   (handler.xmlns['rdf'], 'resource'): link,
                               })
        handler.endNode(('rdf', 'Seq'))
        handler.endNode('items')
        handler.endNode('channel')

        # emit logo data
        if logo:
            handler.startNode('image',
                              attr={
                                  (handler.xmlns['rdf'], 'about'): logo,
                              })
            handler.simpleNode('title', cfg.sitename)
            handler.simpleNode('link', baseurl)
            handler.simpleNode('url', logo)
            handler.endNode('image')

        # Mapping { oldname: curname } for maintaining page renames
        pagename_map = {}

        # emit items
        for item in logdata:
            if item.pagename in pagename_map:
                cur_pagename = pagename_map[item.pagename]
            else:
                cur_pagename = item.pagename
            page = Page(request, cur_pagename)
            action = item.action
            comment = item.comment
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            rdflink = full_url(request, page, anchor=anchor)
            handler.startNode('item',
                              attr={
                                  (handler.xmlns['rdf'], 'about'): rdflink,
                              })

            # general attributes
            handler.simpleNode('title', item.pagename)
            handler.simpleNode(('dc', 'date'), timefuncs.W3CDate(item.time))

            show_diff = diffs

            if action.startswith('ATT'):  # Attachment
                show_diff = 0
                filename = wikiutil.url_unquote(item.extra)
                att_exists = AttachFile.exists(request, cur_pagename, filename)

                if action == 'ATTNEW':
                    # Once attachment deleted this link becomes invalid but we
                    # preserve it to prevent appearance of new RSS entries in
                    # RSS readers.
                    if ddiffs:
                        handler.simpleNode(
                            'link',
                            attach_url(request,
                                       cur_pagename,
                                       filename,
                                       do='view'))

                    comment = _(u"Upload of attachment '%(filename)s'.") % {
                        'filename': filename
                    }

                elif action == 'ATTDEL':
                    if ddiffs:
                        handler.simpleNode(
                            'link',
                            full_url(request,
                                     page,
                                     querystr={'action': 'AttachFile'}))

                    comment = _(u"Attachment '%(filename)s' deleted.") % {
                        'filename': filename
                    }

                elif action == 'ATTDRW':
                    if ddiffs:
                        handler.simpleNode(
                            'link',
                            attach_url(request,
                                       cur_pagename,
                                       filename,
                                       do='view'))

                    comment = _(u"Drawing '%(filename)s' saved.") % {
                        'filename': filename
                    }

            elif action.startswith('SAVE'):
                if action == 'SAVE/REVERT':
                    to_rev = int(item.extra)
                    comment = (_(u"Revert to revision %(rev)d.") % {
                        'rev': to_rev}) + "<br />" \
                        + _("Comment:") + " " + comment

                elif action == 'SAVE/RENAME':
                    show_diff = 0
                    comment = (_(u"Renamed from '%(oldpagename)s'.") % {
                        'oldpagename': item.extra}) + "<br />" \
                        + _("Comment:") + " " + comment
                    if item.pagename in pagename_map:
                        newpage = pagename_map[item.pagename]
                        del pagename_map[item.pagename]
                        pagename_map[item.extra] = newpage
                    else:
                        pagename_map[item.extra] = item.pagename

                elif action == 'SAVENEW':
                    comment = _(u"New page:\n") + comment

                item_rev = int(item.rev)

                # If we use diffs/ddiffs, we should calculate proper links and
                # content
                if ddiffs:
                    # first revision can't have older revisions to diff with
                    if item_rev == 1:
                        handler.simpleNode(
                            'link',
                            full_url(request,
                                     page,
                                     querystr={
                                         'action': 'recall',
                                         'rev': str(item_rev)
                                     }))
                    else:
                        handler.simpleNode(
                            'link',
                            full_url(request,
                                     page,
                                     querystr={
                                         'action': 'diff',
                                         'rev1': str(item_rev),
                                         'rev2': str(item_rev - 1)
                                     }))

                if show_diff:
                    if item_rev == 1:
                        lines = Page(request, cur_pagename,
                                     rev=item_rev).getlines()
                    else:
                        lines = wikiutil.pagediff(request,
                                                  cur_pagename,
                                                  item_rev - 1,
                                                  cur_pagename,
                                                  item_rev,
                                                  ignorews=1)

                    if len(lines) > max_lines:
                        lines = lines[:max_lines] + ['...\n']

                    lines = '\n'.join(lines)
                    lines = wikiutil.escape(lines)

                    comment = u'%s\n<pre>\n%s\n</pre>\n' % (comment, lines)

                if not ddiffs:
                    handler.simpleNode('link', full_url(request, page))

            if comment:
                handler.simpleNode('description', comment)

            # contributor
            if cfg.show_names:
                edattr = {}
                if cfg.show_hosts:
                    edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
                if item.editor[0] == 'interwiki':
                    edname = "%s:%s" % item.editor[1]
                    ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
                else:  # 'ip'
                    edname = item.editor[1]
                    ##edattr[(None, 'link')] = link + "?action=info"

                # this edattr stuff, esp. None as first tuple element breaks things (tracebacks)
                # if you know how to do this right, please send us a patch

                handler.startNode(('dc', 'contributor'))
                handler.startNode(('rdf', 'Description'), attr=edattr)
                handler.simpleNode(('rdf', 'value'), edname)
                handler.endNode(('rdf', 'Description'))
                handler.endNode(('dc', 'contributor'))

            # wiki extensions
            handler.simpleNode(('wiki', 'version'),
                               "%i" % (item.ed_time_usecs))
            handler.simpleNode(('wiki', 'status'),
                               ('deleted', 'updated')[page.exists()])
            handler.simpleNode(('wiki', 'diff'),
                               full_url(request,
                                        page,
                                        querystr={'action': 'diff'}))
            handler.simpleNode(('wiki', 'history'),
                               full_url(request,
                                        page,
                                        querystr={'action': 'info'}))
            # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
            # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )

            handler.endNode('item')

        # end SAX stream
        handler.endDocument()

        request.write(out.getvalue())
Beispiel #27
0
def get_data(pagename, request, filterpage=None):
    
    # Get results from cache
    if filterpage:
        arena = Page(request, pagename)
    else:
        arena = 'charts'
    
    cache_days, cache_views, cache_edits = [], [], []
    cache_date = 0
    cache = caching.CacheEntry(request, arena, 'hitcounts')
    if cache.exists():
        try:
            cache_date, cache_days, cache_views, cache_edits = eval(cache.content())
        except:
            cache.remove() # cache gone bad

    # Get new results from the log
    log = eventlog.EventLog(request)
    try:
        new_date = log.date()
    except logfile.LogMissing:
        new_date = None
        
    # prepare data
    days = []
    views = []
    edits = []
    ratchet_day = None
    ratchet_time = None
    if new_date is not None:
        log.set_filter(['VIEWPAGE', 'SAVEPAGE'])
        for event in log.reverse():
            #print ">>>", wikiutil.escape(repr(event)), "<br>"
    
            if event[0] <=  cache_date:
                break
            # XXX Bug: event[2].get('pagename') -> u'Aktuelle%C4nderungen' 8(
            eventpage = event[2].get('pagename','')
            if filterpage and eventpage != filterpage:
                continue
            time_tuple = request.user.getTime(wikiutil.version2timestamp(event[0]))
            day = tuple(time_tuple[0:3])
            if day != ratchet_day:
                # new day
                while ratchet_time:
                    ratchet_time -= 86400
                    rday = tuple(request.user.getTime(ratchet_time)[0:3])
                    if rday <= day: break
                    days.append(request.user.getFormattedDate(ratchet_time))
                    views.append(0)
                    edits.append(0)
                days.append(request.user.getFormattedDate(wikiutil.version2timestamp(event[0])))
                views.append(0)
                edits.append(0)
                ratchet_day = day
                ratchet_time = wikiutil.version2timestamp(event[0])
            if event[1] == 'VIEWPAGE':
                views[-1] = views[-1] + 1
            elif event[1] == 'SAVEPAGE':
                edits[-1] = edits[-1] + 1
    
        days.reverse()
        views.reverse()
        edits.reverse()

    # merge the day on the end of the cache
    if cache_days and days and days[0] == cache_days[-1]:
        cache_edits[-1] += edits[0]
        cache_views[-1] += views[0]
        days, views, edits = days[1:], views[1:], edits[1:]

    # Update and save the cache
    cache_days.extend(days)
    cache_views.extend(views)
    cache_edits.extend(edits)
    if new_date is not None:
        cache.update("(%r, %r, %r, %r)" % 
                     (new_date, cache_days, cache_views, cache_edits))

    return cache_days, cache_views, cache_edits
def macro_RecentChanges(macro, abandoned=False):
    # handle abandoned keyword
    if abandoned:
        return print_abandoned(macro)

    request = macro.request
    _ = request.getText
    output = []
    user = request.user
    page = macro.formatter.page
    pagename = page.page_name

    d = {}
    d["page"] = page
    d["q_page_name"] = wikiutil.quoteWikinameURL(pagename)

    log = editlog.EditLog(request)

    tnow = time.time()
    msg = ""

    # get bookmark from valid user
    bookmark_usecs = request.user.getBookmark() or 0

    # add bookmark link if valid user
    d["rc_curr_bookmark"] = None
    d["rc_update_bookmark"] = None
    if request.user.valid:
        d["rc_curr_bookmark"] = _("(no bookmark set)")
        if bookmark_usecs:
            currentBookmark = wikiutil.version2timestamp(bookmark_usecs)
            currentBookmark = user.getFormattedDateTime(currentBookmark)
            currentBookmark = _("(currently set to %s)") % currentBookmark
            deleteBookmark = page.link_to(
                request, _("Delete bookmark"), querystr={"action": "bookmark", "time": "del"}, rel="nofollow"
            )
            d["rc_curr_bookmark"] = currentBookmark + " " + deleteBookmark

        version = wikiutil.timestamp2version(tnow)
        d["rc_update_bookmark"] = page.link_to(
            request, _("Set bookmark"), querystr={"action": "bookmark", "time": "%d" % version}, rel="nofollow"
        )

    # set max size in days
    max_days = min(int(request.values.get("max_days", 0)), _DAYS_SELECTION[-1])
    # default to _MAX_DAYS for useres without bookmark
    if not max_days and not bookmark_usecs:
        max_days = _MAX_DAYS
    d["rc_max_days"] = max_days

    # give known user the option to extend the normal display
    if request.user.valid:
        d["rc_days"] = _DAYS_SELECTION
    else:
        d["rc_days"] = []

    output.append(request.theme.recentchanges_header(d))

    pages = {}
    ignore_pages = {}

    today = request.user.getTime(tnow)[0:3]
    this_day = today
    day_count = 0

    for line in log.reverse():
        line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs))
        day = line.time_tuple[0:3]
        hilite = line.ed_time_usecs > (bookmark_usecs or line.ed_time_usecs)

        if this_day != day or (not hilite and not max_days):
            # new day or bookmark reached: print out stuff
            this_day = day
            for p in pages:
                ignore_pages[p] = None
            pages = filter_pages(request, pages.values())
            pages.sort(cmp_lines)
            pages.reverse()

            if len(pages) > 0:
                if request.user.valid:
                    bmtime = pages[0][0].ed_time_usecs
                    d["bookmark_link_html"] = page.link_to(
                        request,
                        _("Set bookmark"),
                        querystr={"action": "bookmark", "time": "%d" % bmtime},
                        rel="nofollow",
                    )
                else:
                    d["bookmark_link_html"] = None
                d["date"] = request.user.getFormattedDate(wikiutil.version2timestamp(pages[0][0].ed_time_usecs))
                output.append(request.theme.recentchanges_daybreak(d))

                for p in pages:
                    output.append(format_page_edits(macro, p, bookmark_usecs))

                day_count += 1
                if max_days and (day_count >= max_days):
                    break

            pages = {}

        elif this_day != day:
            # new day but no changes
            this_day = day

        if line.pagename in ignore_pages:
            continue

        # end listing by default if user has a bookmark and we reached it
        if not max_days and not hilite:
            msg = _("[Bookmark reached]")
            break

        if line.pagename in pages:
            pages[line.pagename].append(line)
        else:
            pages[line.pagename] = [line]
    else:
        # end of loop reached: print out stuff
        # XXX duplicated code from above
        # but above does not trigger if we have the first day in wiki history
        for p in pages:
            ignore_pages[p] = None
        pages = filter_pages(request, pages.values())
        pages.sort(cmp_lines)
        pages.reverse()

        if len(pages) > 0:
            if request.user.valid:
                bmtime = pages[0][0].ed_time_usecs
                d["bookmark_link_html"] = page.link_to(
                    request, _("Set bookmark"), querystr={"action": "bookmark", "time": "%d" % bmtime}, rel="nofollow"
                )
            else:
                d["bookmark_link_html"] = None
            d["date"] = request.user.getFormattedDate(wikiutil.version2timestamp(pages[0][0].ed_time_usecs))
            output.append(request.theme.recentchanges_daybreak(d))

            for p in pages:
                output.append(format_page_edits(macro, p, bookmark_usecs))

    d["rc_msg"] = msg
    output.append(request.theme.recentchanges_footer(d))

    return "".join(output)
Beispiel #29
0
def getblacklist(request, pagename, do_update):
    """ Get blacklist, possibly downloading new copy

    @param request: current request (request instance)
    @param pagename: bad content page name (unicode)
    @rtype: list
    @return: list of blacklisted regular expressions
    """
    from MoinMoin.PageEditor import PageEditor
    p = PageEditor(request, pagename, uid_override="Antispam subsystem")
    mymtime = wikiutil.version2timestamp(p.mtime_usecs())
    if do_update:
        tooold = time.time() - 1800
        failure = caching.CacheEntry(request,
                                     "antispam",
                                     "failure",
                                     scope='wiki')
        fail_time = failure.mtime()  # only update if no failure in last hour
        if (mymtime < tooold) and (fail_time < tooold):
            logging.info(
                "%d *BadContent too old, have to check for an update..." %
                tooold)
            import xmlrpclib
            import socket

            timeout = 15  # time out for reaching the master server via xmlrpc
            old_timeout = socket.getdefaulttimeout()
            socket.setdefaulttimeout(timeout)

            master_url = request.cfg.antispam_master_url
            master = xmlrpclib.ServerProxy(master_url)
            try:
                # Get BadContent info
                master.putClientInfo('ANTISPAM-CHECK', request.url)
                response = master.getPageInfo(pagename)

                # It seems that response is always a dict
                if isinstance(response, dict) and 'faultCode' in response:
                    raise WikirpcError("failed to get BadContent information",
                                       response)

                # Compare date against local BadContent copy
                masterdate = response['lastModified']

                if isinstance(masterdate, datetime.datetime):
                    # for python 2.5
                    mydate = datetime.datetime(
                        *tuple(time.gmtime(mymtime))[0:6])
                else:
                    # for python <= 2.4.x
                    mydate = xmlrpclib.DateTime(tuple(time.gmtime(mymtime)))

                logging.debug("master: %s mine: %s" % (masterdate, mydate))
                if mydate < masterdate:
                    # Get new copy and save
                    logging.info("Fetching page from %s..." % master_url)
                    master.putClientInfo('ANTISPAM-FETCH', request.url)
                    response = master.getPage(pagename)
                    if isinstance(response, dict) and 'faultCode' in response:
                        raise WikirpcError("failed to get BadContent data",
                                           response)
                    p._write_file(response)
                    mymtime = wikiutil.version2timestamp(p.mtime_usecs())
                else:
                    failure.update(
                        "")  # we didn't get a modified version, this avoids
                    # permanent polling for every save when there
                    # is no updated master page

            except (socket.error, xmlrpclib.ProtocolError), err:
                logging.error(
                    'Timeout / socket / protocol error when accessing %s: %s' %
                    (master_url, str(err)))
                # update cache to wait before the next try
                failure.update("")

            except (xmlrpclib.Fault, ), err:
                logging.error('Fault on %s: %s' % (master_url, str(err)))
                # update cache to wait before the next try
                failure.update("")

            except Error, err:
                # In case of Error, we log the error and use the local BadContent copy.
                logging.error(str(err))
Beispiel #30
0
def get_data(pagename, request, filterpage=None):
    cache_days, cache_views, cache_edits = [], [], []
    cache_date = 0

    # Get results from cache
    if filterpage:
        arena = Page(request, pagename)
        cache = caching.CacheEntry(request, arena, 'hitcounts', scope='item', use_pickle=True)
    else:
        arena = 'charts'
        cache = caching.CacheEntry(request, arena, 'hitcounts', scope='wiki', use_pickle=True)

    if cache.exists():
        try:
            cache_date, cache_days, cache_views, cache_edits = cache.content()
        except:
            cache.remove() # cache gone bad

    # Get new results from the log
    log = eventlog.EventLog(request)
    try:
        new_date = log.date()
    except logfile.LogMissing:
        new_date = None

    # prepare data
    days = []
    views = []
    edits = []
    ratchet_day = None
    ratchet_time = None
    if new_date is not None:
        log.set_filter(['VIEWPAGE', 'SAVEPAGE'])
        latest = None
        for event in log.reverse():
            # don't use event_log.date()
            if latest is None:
                latest = event[0]
            event_usecs = event[0]
            if event_usecs <= cache_date:
                break
            eventpage = event[2].get('pagename', '')
            if filterpage and eventpage != filterpage:
                continue
            event_secs = wikiutil.version2timestamp(event_usecs)
            time_tuple = time.gmtime(event_secs) # must be UTC
            day = tuple(time_tuple[0:3])
            if day != ratchet_day:
                # new day
                while ratchet_time:
                    ratchet_time -= 86400 # seconds per day
                    rday = tuple(time.gmtime(ratchet_time)[0:3]) # must be UTC
                    if rday <= day:
                        break
                    days.append(DATE_FMT % rday)
                    views.append(0)
                    edits.append(0)
                days.append(DATE_FMT % day)
                views.append(0)
                edits.append(0)
                ratchet_day = day
                ratchet_time = event_secs
            if event[1] == 'VIEWPAGE':
                views[-1] += 1
            elif event[1] == 'SAVEPAGE':
                edits[-1] += 1

        days.reverse()
        views.reverse()
        edits.reverse()

    # merge the day on the end of the cache
    if cache_days and days and days[0] == cache_days[-1]:
        cache_edits[-1] += edits[0]
        cache_views[-1] += views[0]
        days, views, edits = days[1:], views[1:], edits[1:]

    # Update and save the cache
    cache_days.extend(days)
    cache_views.extend(views)
    cache_edits.extend(edits)
    if new_date is not None:
        cache.update((latest, cache_days, cache_views, cache_edits))

    return cache_days, cache_views, cache_edits
Beispiel #31
0
def get_data(pagename, request, filterpage=None):
    cache_days, cache_views, cache_edits = [], [], []
    cache_date = 0

    # Get results from cache
    if filterpage:
        arena = Page(request, pagename)
        cache = caching.CacheEntry(request, arena, 'hitcounts', scope='item', use_pickle=True)
    else:
        arena = 'charts'
        cache = caching.CacheEntry(request, arena, 'hitcounts', scope='wiki', use_pickle=True)

    if cache.exists():
        try:
            cache_date, cache_days, cache_views, cache_edits = cache.content()
        except:
            cache.remove() # cache gone bad

    # Get new results from the log
    log = eventlog.EventLog(request)
    try:
        new_date = log.date()
    except logfile.LogMissing:
        new_date = None

    # prepare data
    days = []
    views = []
    edits = []
    ratchet_day = None
    ratchet_time = None
    if new_date is not None:
        log.set_filter(['VIEWPAGE', 'SAVEPAGE'])
        latest = None
        for event in log.reverse():
            # don't use event_log.date()
            if latest is None:
                latest = event[0]
            event_usecs = event[0]
            if event_usecs <= cache_date:
                break
            eventpage = event[2].get('pagename', '')
            if filterpage and eventpage != filterpage:
                continue
            event_secs = wikiutil.version2timestamp(event_usecs)
            time_tuple = time.gmtime(event_secs) # must be UTC
            day = tuple(time_tuple[0:3])
            if day != ratchet_day:
                # new day
                while ratchet_time:
                    ratchet_time -= 86400 # seconds per day
                    rday = tuple(time.gmtime(ratchet_time)[0:3]) # must be UTC
                    if rday <= day:
                        break
                    days.append(DATE_FMT % rday)
                    views.append(0)
                    edits.append(0)
                days.append(DATE_FMT % day)
                views.append(0)
                edits.append(0)
                ratchet_day = day
                ratchet_time = event_secs
            if event[1] == 'VIEWPAGE':
                views[-1] += 1
            elif event[1] == 'SAVEPAGE':
                edits[-1] += 1

        days.reverse()
        views.reverse()
        edits.reverse()

    # merge the day on the end of the cache
    if cache_days and days and days[0] == cache_days[-1]:
        cache_edits[-1] += edits[0]
        cache_views[-1] += views[0]
        days, views, edits = days[1:], views[1:], edits[1:]

    # Update and save the cache
    cache_days.extend(days)
    cache_views.extend(views)
    cache_edits.extend(edits)
    if new_date is not None:
        cache.update((latest, cache_days, cache_views, cache_edits))

    return cache_days, cache_views, cache_edits
Beispiel #32
0
def execute(pagename, request):
    """ Handle "action=diff"
        checking for either a "rev=formerrevision" parameter
        or rev1 and rev2 parameters
    """
    if not request.user.may.read(pagename):
        Page(request, pagename).send_page()
        return

    try:
        date = request.values['date']
        try:
            date = long(date) # must be long for py 2.2.x
        except StandardError:
            date = 0
    except KeyError:
        date = 0

    try:
        rev1 = int(request.values.get('rev1', -1))
    except StandardError:
        rev1 = 0
    try:
        rev2 = int(request.values.get('rev2', 0))
    except StandardError:
        rev2 = 0

    if rev1 == -1 and rev2 == 0:
        rev1 = request.rev
        if rev1 is None:
            rev1 = -1

    # spacing flag?
    ignorews = int(request.values.get('ignorews', 0))

    _ = request.getText

    # get a list of old revisions, and back out if none are available
    currentpage = Page(request, pagename)
    currentrev = currentpage.current_rev()
    if currentrev < 2:
        request.theme.add_msg(_("No older revisions available!"), "error")
        currentpage.send_page()
        return

    if date: # this is how we get called from RecentChanges
        rev1 = 0
        log = editlog.EditLog(request, rootpagename=pagename)
        for line in log.reverse():
            if date >= line.ed_time_usecs and int(line.rev) != 99999999:
                rev1 = int(line.rev)
                break
        else:
            rev1 = 1
        rev2 = 0

    if rev1 > 0 and rev2 > 0 and rev1 > rev2 or rev1 == 0 and rev2 > 0:
        rev1, rev2 = rev2, rev1

    if rev1 == -1:
        oldrev = currentrev - 1
        oldpage = Page(request, pagename, rev=oldrev)
    elif rev1 == 0:
        oldrev = currentrev
        oldpage = currentpage
    else:
        oldrev = rev1
        oldpage = Page(request, pagename, rev=oldrev)

    if rev2 == 0:
        newrev = currentrev
        newpage = currentpage
    else:
        newrev = rev2
        newpage = Page(request, pagename, rev=newrev)

    oldlog = oldpage.editlog_entry()
    newlog = newpage.editlog_entry()

    if not oldlog or not newlog:
        # We use "No log entries found." msg because we already have i18n
        # for that. Better would "At least one log entry was not found.".
        request.theme.add_msg(_("No log entries found."), "error")
        currentpage.send_page()
        return

    edit_count = abs(newrev - oldrev)

    # Start output
    # This action generates content in the user language
    request.setContentLanguage(request.lang)

    request.theme.send_title(_('Diff for "%s"') % (pagename, ), pagename=pagename, allow_doubleclick=1)

    f = request.formatter
    request.write(f.div(1, id="content"))

    oldrev = oldpage.get_real_rev()
    newrev = newpage.get_real_rev()

    title = _('Differences between revisions %d and %d') % (oldrev, newrev)
    if edit_count > 1:
        title += ' ' + _('(spanning %d versions)') % (edit_count, )
    title = f.text(title)

    page_url = wikiutil.escape(currentpage.url(request), True)

    def enabled(val):
        return not val and u' disabled="disabled"' or u''

    revert_html = ""
    if request.user.may.revert(pagename):
        revert_html = """
  <form action="%s" method="get">
   <div style="text-align:center">
    <input name="action" value="revert" type="hidden">
    <input name="rev" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 """ % (page_url, rev2, _("Revert to this revision"), enabled(newrev < currentrev))

    other_diff_button_html = """
 <td style="border:0;">
  <form action="%s" method="get">
   <div style="text-align:%s">
    <input name="action" value="diff" type="hidden">
    <input name="rev1" value="%d" type="hidden">
    <input name="rev2" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 </td>
"""

    navigation_html = """
<span class="diff-header">%%s</span>
<table class="diff">
<tr>
 %(button)s
 <td style="border:0">
   %%s
 </td>
 %(button)s
</tr>
</table>
""" % {'button': other_diff_button_html}

    prev_oldrev = (oldrev > 1) and (oldrev - 1) or 1
    next_oldrev = (oldrev < currentrev) and (oldrev + 1) or currentrev

    prev_newrev = (newrev > 1) and (newrev - 1) or 1
    next_newrev = (newrev < currentrev) and (newrev + 1) or currentrev

    navigation_html = navigation_html % (title,
       page_url, "left", prev_oldrev, oldrev, _("Previous change"), enabled(oldrev > 1),
       revert_html,
       page_url, "right", newrev, next_newrev, _("Next change"), enabled(newrev < currentrev), )

    request.write(f.rawHTML(navigation_html))

    def rev_nav_link(enabled, old_rev, new_rev, caption, css_classes, enabled_title, disabled_title):
        if enabled:
            return currentpage.link_to(request, on=1, querystr={
                    'action': 'diff',
                    'rev1': old_rev,
                    'rev2': new_rev,
                    }, css_class="diff-nav-link %s" % css_classes, title=enabled_title) + request.formatter.text(caption) + currentpage.link_to(request, on=0)
        else:
            return '<span class="diff-no-nav-link %(css_classes)s" title="%(disabled_title)s">%(caption)s</span>' % {
                'css_classes': css_classes,
                'disabled_title': disabled_title,
                'caption': caption,
                }

    rev_info_html = """
  <div class="diff-info diff-info-header">%%(rev_first_link)s %%(rev_prev_link)s %(rev_header)s %%(rev_next_link)s %%(rev_last_link)s</div>
  <div class="diff-info diff-info-rev-size"><span class="diff-info-caption">%(rev_size_caption)s:</span> <span class="diff-info-value">%%(rev_size)d</span></div>
  <div class="diff-info diff-info-rev-author"><span class="diff-info-caption">%(rev_author_caption)s:</span> <span class="diff-info-value">%%(rev_author)s</span></div>
  <div class="diff-info diff-info-rev-comment"><span class="diff-info-caption">%(rev_comment_caption)s:</span> <span class="diff-info-value">%%(rev_comment)s</span></div>
""" % {
    'rev_header': _('Revision %(rev)d as of %(date)s'),
    'rev_size_caption': _('Size'),
    'rev_author_caption': _('Editor'),
    'rev_ts_caption': _('Date'),
    'rev_comment_caption': _('Comment'),
}

    rev_info_old_html = rev_info_html % {
        'rev_first_link': rev_nav_link(oldrev > 1, 1, newrev, u'\u21e4', 'diff-first-link diff-old-rev', _('Diff with oldest revision in left pane'), _("No older revision available for diff")),
        'rev_prev_link': rev_nav_link(oldrev > 1, prev_oldrev, newrev, u'\u2190', 'diff-prev-link diff-old-rev', _('Diff with older revision in left pane'), _("No older revision available for diff")),
        'rev_next_link': rev_nav_link((oldrev < currentrev) and (next_oldrev < newrev), next_oldrev, newrev, u'\u2192', 'diff-next-link diff-old-rev', _('Diff with newer revision in left pane'), _("Can't change to revision newer than in right pane")),
        'rev_last_link': '',
        'rev': oldrev,
        'rev_size': oldpage.size(),
        'rev_author': oldlog.getEditor(request) or _('N/A'),
        'date': request.user.getFormattedDateTime(wikiutil.version2timestamp(oldlog.ed_time_usecs)) or _('N/A'),
        'rev_comment': wikiutil.escape(oldlog.comment) or '',
    }

    rev_info_new_html = rev_info_html % {
        'rev_first_link': '',
        'rev_prev_link': rev_nav_link((newrev > 1) and (oldrev < prev_newrev), oldrev, prev_newrev, u'\u2190', 'diff-prev-link diff-new-rev', _('Diff with older revision in right pane'), _("Can't change to revision older than revision in left pane")),
        'rev_next_link': rev_nav_link(newrev < currentrev, oldrev, next_newrev, u'\u2192', 'diff-next-link diff-new-rev', _('Diff with newer revision in right pane'), _("No newer revision available for diff")),
        'rev_last_link': rev_nav_link(newrev < currentrev, oldrev, currentrev, u'\u21e5', 'diff-last-link diff-old-rev', _('Diff with newest revision in right pane'), _("No newer revision available for diff")),
        'rev': newrev,
        'rev_size': newpage.size(),
        'rev_author': newlog.getEditor(request) or _('N/A'),
        'date': request.user.getFormattedDateTime(wikiutil.version2timestamp(newlog.ed_time_usecs)) or _('N/A'),
        'rev_comment': wikiutil.escape(newlog.comment) or '',
    }

    if request.user.show_fancy_diff:
        from MoinMoin.util import diff_html
        request.write(f.rawHTML(diff_html.diff(request, oldpage.get_raw_body(), newpage.get_raw_body(), old_top=rev_info_old_html, new_top=rev_info_new_html, old_top_class="diff-info", new_top_class="diff-info")))
        newpage.send_page(count_hit=0, content_only=1, content_id="content-below-diff")
    else:
        request.write(f.rawHTML('<table class="diff"><tr><td class="diff-info">%s</td><td class="diff-info">%s</td></tr></table>' % (rev_info_old_html, rev_info_new_html)))

        from MoinMoin.util import diff_text
        lines = diff_text.diff(oldpage.getlines(), newpage.getlines())
        if not lines:
            msg = f.text(" - " + _("No differences found!"))
            if edit_count > 1:
                msg = msg + f.paragraph(1) + f.text(_('The page was saved %(count)d times, though!') % {
                    'count': edit_count}) + f.paragraph(0)
            request.write(msg)
        else:
            if ignorews:
                request.write(f.text(_('(ignoring whitespace)')), f.linebreak())
            else:
                qstr = {'action': 'diff', 'ignorews': '1', }
                if rev1:
                    qstr['rev1'] = str(rev1)
                if rev2:
                    qstr['rev2'] = str(rev2)
                request.write(f.paragraph(1), Page(request, pagename).link_to(request,
                    text=_('Ignore changes in the amount of whitespace'),
                    querystr=qstr, rel='nofollow'), f.paragraph(0))

            request.write(f.preformatted(1))
            for line in lines:
                if line[0] == "@":
                    request.write(f.rule(1))
                request.write(f.text(line + '\n'))
            request.write(f.preformatted(0))

    request.write(f.div(0)) # end content div
    request.theme.send_footer(pagename)
    request.theme.send_closing_html()
Beispiel #33
0
def execute(pagename, request):
    """ Handle "action=diff"
        checking for either a "rev=formerrevision" parameter
        or rev1 and rev2 parameters
    """
    if not request.user.may.read(pagename):
        Page(request, pagename).send_page()
        return

    try:
        date = request.values['date']
        try:
            date = long(date)  # must be long for py 2.2.x
        except StandardError:
            date = 0
    except KeyError:
        date = 0

    try:
        rev1 = int(request.values.get('rev1', -1))
    except StandardError:
        rev1 = 0
    try:
        rev2 = int(request.values.get('rev2', 0))
    except StandardError:
        rev2 = 0

    if rev1 == -1 and rev2 == 0:
        rev1 = request.rev
        if rev1 is None:
            rev1 = -1

    # spacing flag?
    ignorews = int(request.values.get('ignorews', 0))

    _ = request.getText

    # get a list of old revisions, and back out if none are available
    currentpage = Page(request, pagename)
    currentrev = currentpage.current_rev()
    if currentrev < 2:
        request.theme.add_msg(_("No older revisions available!"), "error")
        currentpage.send_page()
        return

    if date:  # this is how we get called from RecentChanges
        rev1 = 0
        log = editlog.EditLog(request, rootpagename=pagename)
        for line in log.reverse():
            if date >= line.ed_time_usecs and int(line.rev) != 99999999:
                rev1 = int(line.rev)
                break
        else:
            rev1 = 1
        rev2 = 0

    if rev1 > 0 and rev2 > 0 and rev1 > rev2 or rev1 == 0 and rev2 > 0:
        rev1, rev2 = rev2, rev1

    if rev1 == -1:
        oldrev = currentrev - 1
        oldpage = Page(request, pagename, rev=oldrev)
    elif rev1 == 0:
        oldrev = currentrev
        oldpage = currentpage
    else:
        oldrev = rev1
        oldpage = Page(request, pagename, rev=oldrev)

    if rev2 == 0:
        newrev = currentrev
        newpage = currentpage
    else:
        newrev = rev2
        newpage = Page(request, pagename, rev=newrev)

    oldlog = oldpage.editlog_entry()
    newlog = newpage.editlog_entry()

    if not oldlog or not newlog:
        # We use "No log entries found." msg because we already have i18n
        # for that. Better would "At least one log entry was not found.".
        request.theme.add_msg(_("No log entries found."), "error")
        currentpage.send_page()
        return

    edit_count = abs(newrev - oldrev)

    # Start output
    # This action generates content in the user language
    request.setContentLanguage(request.lang)

    request.theme.send_title(_('Diff for "%s"') % (pagename, ),
                             pagename=pagename,
                             allow_doubleclick=1)

    f = request.formatter
    request.write(f.div(1, id="content"))

    oldrev = oldpage.get_real_rev()
    newrev = newpage.get_real_rev()

    title = _('Differences between revisions %d and %d') % (oldrev, newrev)
    if edit_count > 1:
        title += ' ' + _('(spanning %d versions)') % (edit_count, )
    title = f.text(title)

    page_url = wikiutil.escape(currentpage.url(request), True)

    def enabled(val):
        return not val and u' disabled="disabled"' or u''

    revert_html = ""
    if request.user.may.revert(pagename):
        revert_html = """
  <form action="%s" method="get">
   <div style="text-align:center">
    <input name="action" value="revert" type="hidden">
    <input name="rev" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 """ % (page_url, rev2, _("Revert to this revision"),
        enabled(newrev < currentrev))

    other_diff_button_html = """
 <td style="border:0;">
  <form action="%s" method="get">
   <div style="text-align:%s">
    <input name="action" value="diff" type="hidden">
    <input name="rev1" value="%d" type="hidden">
    <input name="rev2" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 </td>
"""

    navigation_html = """
<span class="diff-header">%%s</span>
<table class="diff">
<tr>
 %(button)s
 <td style="border:0">
   %%s
 </td>
 %(button)s
</tr>
</table>
""" % {
        'button': other_diff_button_html
    }

    prev_oldrev = (oldrev > 1) and (oldrev - 1) or 1
    next_oldrev = (oldrev < currentrev) and (oldrev + 1) or currentrev

    prev_newrev = (newrev > 1) and (newrev - 1) or 1
    next_newrev = (newrev < currentrev) and (newrev + 1) or currentrev

    navigation_html = navigation_html % (
        title,
        page_url,
        "left",
        prev_oldrev,
        oldrev,
        _("Previous change"),
        enabled(oldrev > 1),
        revert_html,
        page_url,
        "right",
        newrev,
        next_newrev,
        _("Next change"),
        enabled(newrev < currentrev),
    )

    request.write(f.rawHTML(navigation_html))

    def rev_nav_link(enabled, old_rev, new_rev, caption, css_classes,
                     enabled_title, disabled_title):
        if enabled:
            return currentpage.link_to(
                request,
                on=1,
                querystr={
                    'action': 'diff',
                    'rev1': old_rev,
                    'rev2': new_rev,
                },
                css_class="diff-nav-link %s" % css_classes,
                title=enabled_title) + request.formatter.text(
                    caption) + currentpage.link_to(request, on=0)
        else:
            return '<span class="diff-no-nav-link %(css_classes)s" title="%(disabled_title)s">%(caption)s</span>' % {
                'css_classes': css_classes,
                'disabled_title': disabled_title,
                'caption': caption,
            }

    rev_info_html = """
  <div class="diff-info diff-info-header">%%(rev_first_link)s %%(rev_prev_link)s %(rev_header)s %%(rev_next_link)s %%(rev_last_link)s</div>
  <div class="diff-info diff-info-rev-size"><span class="diff-info-caption">%(rev_size_caption)s:</span> <span class="diff-info-value">%%(rev_size)d</span></div>
  <div class="diff-info diff-info-rev-author"><span class="diff-info-caption">%(rev_author_caption)s:</span> <span class="diff-info-value">%%(rev_author)s</span></div>
  <div class="diff-info diff-info-rev-comment"><span class="diff-info-caption">%(rev_comment_caption)s:</span> <span class="diff-info-value">%%(rev_comment)s</span></div>
""" % {
        'rev_header': _('Revision %(rev)d as of %(date)s'),
        'rev_size_caption': _('Size'),
        'rev_author_caption': _('Editor'),
        'rev_ts_caption': _('Date'),
        'rev_comment_caption': _('Comment'),
    }

    rev_info_old_html = rev_info_html % {
        'rev_first_link':
        rev_nav_link(oldrev > 1, 1, newrev, u'\u21e4',
                     'diff-first-link diff-old-rev',
                     _('Diff with oldest revision in left pane'),
                     _("No older revision available for diff")),
        'rev_prev_link':
        rev_nav_link(oldrev > 1, prev_oldrev, newrev, u'\u2190',
                     'diff-prev-link diff-old-rev',
                     _('Diff with older revision in left pane'),
                     _("No older revision available for diff")),
        'rev_next_link':
        rev_nav_link(
            (oldrev < currentrev) and (next_oldrev < newrev), next_oldrev,
            newrev, u'\u2192', 'diff-next-link diff-old-rev',
            _('Diff with newer revision in left pane'),
            _("Can't change to revision newer than in right pane")),
        'rev_last_link':
        '',
        'rev':
        oldrev,
        'rev_size':
        oldpage.size(),
        'rev_author':
        oldlog.getEditor(request) or _('N/A'),
        'date':
        request.user.getFormattedDateTime(
            wikiutil.version2timestamp(oldlog.ed_time_usecs)) or _('N/A'),
        'rev_comment':
        wikiutil.escape(oldlog.comment) or '',
    }

    rev_info_new_html = rev_info_html % {
        'rev_first_link':
        '',
        'rev_prev_link':
        rev_nav_link(
            (newrev > 1) and (oldrev < prev_newrev), oldrev, prev_newrev,
            u'\u2190', 'diff-prev-link diff-new-rev',
            _('Diff with older revision in right pane'),
            _("Can't change to revision older than revision in left pane")),
        'rev_next_link':
        rev_nav_link(newrev < currentrev, oldrev, next_newrev, u'\u2192',
                     'diff-next-link diff-new-rev',
                     _('Diff with newer revision in right pane'),
                     _("No newer revision available for diff")),
        'rev_last_link':
        rev_nav_link(newrev < currentrev, oldrev, currentrev, u'\u21e5',
                     'diff-last-link diff-old-rev',
                     _('Diff with newest revision in right pane'),
                     _("No newer revision available for diff")),
        'rev':
        newrev,
        'rev_size':
        newpage.size(),
        'rev_author':
        newlog.getEditor(request) or _('N/A'),
        'date':
        request.user.getFormattedDateTime(
            wikiutil.version2timestamp(newlog.ed_time_usecs)) or _('N/A'),
        'rev_comment':
        wikiutil.escape(newlog.comment) or '',
    }

    if request.user.show_fancy_diff:
        from MoinMoin.util import diff_html
        request.write(
            f.rawHTML(
                diff_html.diff(request,
                               oldpage.get_raw_body(),
                               newpage.get_raw_body(),
                               old_top=rev_info_old_html,
                               new_top=rev_info_new_html,
                               old_top_class="diff-info",
                               new_top_class="diff-info")))
        newpage.send_page(count_hit=0,
                          content_only=1,
                          content_id="content-below-diff")
    else:
        request.write(
            f.rawHTML(
                '<table class="diff"><tr><td class="diff-info">%s</td><td class="diff-info">%s</td></tr></table>'
                % (rev_info_old_html, rev_info_new_html)))

        from MoinMoin.util import diff_text
        lines = diff_text.diff(oldpage.getlines(), newpage.getlines())
        if not lines:
            msg = f.text(" - " + _("No differences found!"))
            if edit_count > 1:
                msg = msg + f.paragraph(1) + f.text(
                    _('The page was saved %(count)d times, though!') %
                    {'count': edit_count}) + f.paragraph(0)
            request.write(msg)
        else:
            if ignorews:
                request.write(f.text(_('(ignoring whitespace)')),
                              f.linebreak())
            else:
                qstr = {
                    'action': 'diff',
                    'ignorews': '1',
                }
                if rev1:
                    qstr['rev1'] = str(rev1)
                if rev2:
                    qstr['rev2'] = str(rev2)
                request.write(
                    f.paragraph(1),
                    Page(request, pagename).link_to(
                        request,
                        text=_('Ignore changes in the amount of whitespace'),
                        querystr=qstr,
                        rel='nofollow'), f.paragraph(0))

            request.write(f.preformatted(1))
            for line in lines:
                if line[0] == "@":
                    request.write(f.rule(1))
                request.write(f.text(line + '\n'))
            request.write(f.preformatted(0))

    request.write(f.div(0))  # end content div
    request.theme.send_footer(pagename)
    request.theme.send_closing_html()
Beispiel #34
0
def execute(pagename, request):
    """ Send recent changes as an RSS document
    """
    cfg = request.cfg

    # get params
    items_limit = 100
    try:
        max_items = int(request.values['items'])
        max_items = min(max_items, items_limit)  # not more than `items_limit`
    except (KeyError, ValueError):
        # not more than 15 items in a RSS file by default
        max_items = 15
    try:
        unique = int(request.values.get('unique', 0))
    except ValueError:
        unique = 0
    try:
        diffs = int(request.values.get('diffs', 0))
    except ValueError:
        diffs = 0
    ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003
    try:
        ddiffs = int(request.values.get('ddiffs', 0))
    except ValueError:
        ddiffs = 0

    urlfilter = request.values.get('filter')
    if urlfilter:
        urlfilter = re.compile(urlfilter)
    else:
        urlfilter = None

    # get data
    log = editlog.EditLog(request)
    logdata = []
    counter = 0
    pages = {}
    lastmod = 0
    for line in log.reverse():
        if urlfilter and not (urlfilter.match(line.pagename)):
            continue
        if not request.user.may.read(line.pagename):
            continue
        if (not line.action.startswith('SAVE')
                or ((line.pagename in pages) and unique)):
            continue
        #if log.dayChanged() and log.daycount > _MAX_DAYS: break
        line.editor = line.getInterwikiEditorData(request)
        line.time = timefuncs.tmtuple(
            wikiutil.version2timestamp(line.ed_time_usecs))  # UTC
        logdata.append(line)
        pages[line.pagename] = None

        if not lastmod:
            lastmod = wikiutil.version2timestamp(line.ed_time_usecs)

        counter += 1
        if counter >= max_items:
            break
    del log

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique)

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using whatever setting the admin
        # defined for suggested cache lifetime of the RecentChanges RSS doc
        expires = time.time() + cfg.rss_cache

        request.mimetype = 'application/rss+xml'
        request.expires = expires
        request.last_modified = lastmod
        request.headers['Etag'] = etag

        # send the generated XML document
        baseurl = request.url_root

        logo = re.search(r'src="([^"]*)"', cfg.logo_string)
        if logo:
            logo = request.getQualifiedURL(logo.group(1))

        # prepare output
        output = structencoder(indent=u"yes")

        FEED_HEADER_COMMENT = '''
<!--
    Add an "items=nnn" URL parameter to get more than the default 15 items.
    You cannot get more than %d items though.
    
    Add "unique=1" to get a list of changes where page names are unique,
    i.e. where only the latest change of each page is reflected.
    Add "diffs=1" to add change diffs to the description of each items.
    
    Add "ddiffs=1" to link directly to the diff (good for FeedReader).
    Current settings: items=%i, unique=%i, diffs=%i, ddiffs=%i
-->
        ''' % (items_limit, max_items, unique, diffs, ddiffs)

        # Feed envelope
        page = Page(request, pagename)
        f = output.cofeed(
            ROOT(NS(u'', ATOM_NAMESPACE), NS(u'wiki', RSSWIKI_NAMESPACE),
                 E_CURSOR((ATOM_NAMESPACE, u'feed'), )))
        f.send(
            E((ATOM_NAMESPACE, u'id'),
              full_url(request, page).encode(config.charset))),
        f.send(
            E((ATOM_NAMESPACE, u'title'),
              cfg.sitename.encode(config.charset))),
        f.send(
            E((ATOM_NAMESPACE, u'link'),
              {u'href': request.url_root.encode(config.charset)})),
        f.send(
            E((ATOM_NAMESPACE, u'summary'),
              ('RecentChanges at %s' % cfg.sitename).encode(config.charset))),
        #Icon
        #E((ATOM_NAMESPACE, u'link'), {u'href': logo.encode(config.charset)}),

        #if cfg.interwikiname:
        #    handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)

        for item in logdata:
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            page = Page(request, item.pagename)
            #link = full_url(request, page, anchor=anchor)
            if ddiffs:
                link = full_url(request, page, querystr={'action': 'diff'})
            else:
                link = full_url(request, page)

            # description
            desc_text = item.comment
            if diffs:
                # TODO: rewrite / extend wikiutil.pagediff
                # searching for the matching pages doesn't really belong here
                revisions = page.getRevList()

                rl = len(revisions)
                for idx in range(rl):
                    rev = revisions[idx]
                    if rev <= item.rev:
                        if idx + 1 < rl:
                            lines = wikiutil.pagediff(request,
                                                      item.pagename,
                                                      revisions[idx + 1],
                                                      item.pagename,
                                                      0,
                                                      ignorews=1)
                            if len(lines) > 20:
                                lines = lines[:20] + ['...\n']
                            lines = '\n'.join(lines)
                            lines = wikiutil.escape(lines)
                            desc_text = '%s\n<pre>\n%s\n</pre>\n' % (desc_text,
                                                                     lines)
                        break
            #if desc_text:
            #    handler.simpleNode('description', desc_text)

            # contributor
            edattr = {}
            #if cfg.show_hosts:
            #    edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
            if item.editor[0] == 'interwiki':
                edname = "%s:%s" % item.editor[1]
                ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
            else:  # 'ip'
                edname = item.editor[1]
                ##edattr[(None, 'link')] = link + "?action=info"

            history_link = full_url(request, page, querystr={'action': 'info'})

            f.send(
                E(
                    (ATOM_NAMESPACE, u'entry'),
                    E((ATOM_NAMESPACE, u'id'), link.encode(config.charset)),
                    E((ATOM_NAMESPACE, u'title'),
                      item.pagename.encode(config.charset)),
                    E((ATOM_NAMESPACE, u'updated'),
                      timefuncs.W3CDate(item.time).encode(config.charset)),
                    E((ATOM_NAMESPACE, u'link'),
                      {u'href': link.encode(config.charset)}),
                    E((ATOM_NAMESPACE, u'summary'),
                      desc_text.encode(config.charset)),
                    E((ATOM_NAMESPACE, u'author'),
                      E((ATOM_NAMESPACE, u'name'),
                        edname.encode(config.charset))),
                    #E((ATOM_NAMESPACE, u'title'), item.pagename.encode(config.charset)),
                    # wiki extensions
                    E((RSSWIKI_NAMESPACE, u'wiki:version'),
                      ("%i" % (item.ed_time_usecs)).encode(config.charset)),
                    E((RSSWIKI_NAMESPACE, u'wiki:status'),
                      (u'deleted', u'updated')[page.exists()]),
                    E((RSSWIKI_NAMESPACE, u'wiki:diff'),
                      link.encode(config.charset)),
                    E((RSSWIKI_NAMESPACE, u'wiki:history'),
                      history_link.encode(config.charset)),
                    # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
                    # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )
                ))

        # emit logo data
        #if logo:
        #    handler.startNode('image', attr={
        #        (handler.xmlns['rdf'], 'about'): logo,
        #        })
        #    handler.simpleNode('title', cfg.sitename)
        #    handler.simpleNode('link', baseurl)
        #    handler.simpleNode('url', logo)
        #    handler.endNode('image')

        f.close()
        request.write(output.read())
Beispiel #35
0
def execute(pagename, request):
    """ Send recent changes as an RSS document
    """
    if not wikixml.ok:
        request.mimetype = 'text/plain'
        request.write(
            "rss_rc action is not supported because of missing pyxml module.")
        return

    cfg = request.cfg

    # get params
    items_limit = 100
    try:
        max_items = int(request.values['items'])
        max_items = min(max_items, items_limit)  # not more than `items_limit`
    except (KeyError, ValueError):
        # not more than 15 items in a RSS file by default
        max_items = 15
    try:
        unique = int(request.values.get('unique', 0))
    except ValueError:
        unique = 0
    try:
        diffs = int(request.values.get('diffs', 0))
    except ValueError:
        diffs = 0
    ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003
    try:
        ddiffs = int(request.values.get('ddiffs', 0))
    except ValueError:
        ddiffs = 0

    # get data
    log = editlog.EditLog(request)
    logdata = []
    counter = 0
    pages = {}
    lastmod = 0
    for line in log.reverse():
        if not request.user.may.read(line.pagename):
            continue
        if (not line.action.startswith('SAVE')
                or ((line.pagename in pages) and unique)):
            continue
        #if log.dayChanged() and log.daycount > _MAX_DAYS: break
        line.editor = line.getInterwikiEditorData(request)
        line.time = timefuncs.tmtuple(
            wikiutil.version2timestamp(line.ed_time_usecs))  # UTC
        logdata.append(line)
        pages[line.pagename] = None

        if not lastmod:
            lastmod = wikiutil.version2timestamp(line.ed_time_usecs)

        counter += 1
        if counter >= max_items:
            break
    del log

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique)

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using whatever setting the admin
        # defined for suggested cache lifetime of the RecentChanges RSS doc
        expires = time.time() + cfg.rss_cache

        request.mimetype = 'application/rss+xml'
        request.expires = expires
        request.last_modified = lastmod
        request.headers['Etag'] = etag

        # send the generated XML document
        baseurl = request.url_root

        logo = re.search(r'src="([^"]*)"', cfg.logo_string)
        if logo:
            logo = request.getQualifiedURL(logo.group(1))

        # prepare output
        out = StringIO.StringIO()
        handler = RssGenerator(out)

        # start SAX stream
        handler.startDocument()
        handler._out.write(
            '<!--\n'
            '    Add an "items=nnn" URL parameter to get more than the default 15 items.\n'
            '    You cannot get more than %d items though.\n'
            '    \n'
            '    Add "unique=1" to get a list of changes where page names are unique,\n'
            '    i.e. where only the latest change of each page is reflected.\n'
            '    \n'
            '    Add "diffs=1" to add change diffs to the description of each items.\n'
            '    \n'
            '    Add "ddiffs=1" to link directly to the diff (good for FeedReader).\n'
            '    Current settings: items=%i, unique=%i, diffs=%i, ddiffs=%i'
            '-->\n' % (items_limit, max_items, unique, diffs, ddiffs))

        # emit channel description
        handler.startNode('channel', {
            (handler.xmlns['rdf'], 'about'): request.url_root,
        })
        handler.simpleNode('title', cfg.sitename)
        page = Page(request, pagename)
        handler.simpleNode('link', full_url(request, page))
        handler.simpleNode('description', 'RecentChanges at %s' % cfg.sitename)
        if logo:
            handler.simpleNode('image', None, {
                (handler.xmlns['rdf'], 'resource'): logo,
            })
        if cfg.interwikiname:
            handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)

        handler.startNode('items')
        handler.startNode(('rdf', 'Seq'))
        for item in logdata:
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            page = Page(request, item.pagename)
            link = full_url(request, page, anchor=anchor)
            handler.simpleNode(('rdf', 'li'),
                               None,
                               attr={
                                   (handler.xmlns['rdf'], 'resource'): link,
                               })
        handler.endNode(('rdf', 'Seq'))
        handler.endNode('items')
        handler.endNode('channel')

        # emit logo data
        if logo:
            handler.startNode('image',
                              attr={
                                  (handler.xmlns['rdf'], 'about'): logo,
                              })
            handler.simpleNode('title', cfg.sitename)
            handler.simpleNode('link', baseurl)
            handler.simpleNode('url', logo)
            handler.endNode('image')

        # emit items
        for item in logdata:
            page = Page(request, item.pagename)
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            rdflink = full_url(request, page, anchor=anchor)
            handler.startNode('item',
                              attr={
                                  (handler.xmlns['rdf'], 'about'): rdflink,
                              })

            # general attributes
            handler.simpleNode('title', item.pagename)
            if ddiffs:
                handler.simpleNode(
                    'link', full_url(request,
                                     page,
                                     querystr={'action': 'diff'}))
            else:
                handler.simpleNode('link', full_url(request, page))

            handler.simpleNode(('dc', 'date'), timefuncs.W3CDate(item.time))

            # description
            desc_text = item.comment
            if diffs:
                # TODO: rewrite / extend wikiutil.pagediff
                # searching for the matching pages doesn't really belong here
                revisions = page.getRevList()

                rl = len(revisions)
                for idx in range(rl):
                    rev = revisions[idx]
                    if rev <= item.rev:
                        if idx + 1 < rl:
                            lines = wikiutil.pagediff(request,
                                                      item.pagename,
                                                      revisions[idx + 1],
                                                      item.pagename,
                                                      0,
                                                      ignorews=1)
                            if len(lines) > 20:
                                lines = lines[:20] + ['...\n']
                            lines = '\n'.join(lines)
                            lines = wikiutil.escape(lines)
                            desc_text = '%s\n<pre>\n%s\n</pre>\n' % (desc_text,
                                                                     lines)
                        break
            if desc_text:
                handler.simpleNode('description', desc_text)

            # contributor
            edattr = {}
            if cfg.show_hosts:
                edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
            if item.editor[0] == 'interwiki':
                edname = "%s:%s" % item.editor[1]
                ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
            else:  # 'ip'
                edname = item.editor[1]
                ##edattr[(None, 'link')] = link + "?action=info"

            # this edattr stuff, esp. None as first tuple element breaks things (tracebacks)
            # if you know how to do this right, please send us a patch

            handler.startNode(('dc', 'contributor'))
            handler.startNode(('rdf', 'Description'), attr=edattr)
            handler.simpleNode(('rdf', 'value'), edname)
            handler.endNode(('rdf', 'Description'))
            handler.endNode(('dc', 'contributor'))

            # wiki extensions
            handler.simpleNode(('wiki', 'version'),
                               "%i" % (item.ed_time_usecs))
            handler.simpleNode(('wiki', 'status'),
                               ('deleted', 'updated')[page.exists()])
            handler.simpleNode(('wiki', 'diff'),
                               full_url(request,
                                        page,
                                        querystr={'action': 'diff'}))
            handler.simpleNode(('wiki', 'history'),
                               full_url(request,
                                        page,
                                        querystr={'action': 'info'}))
            # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
            # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )

            handler.endNode('item')

        # end SAX stream
        handler.endDocument()

        request.write(out.getvalue())
Beispiel #36
0
    def history(page, pagename, request):
        # show history as default
        _ = request.getText
        default_count, limit_max_count = request.cfg.history_count[0:2]
        paging = request.cfg.history_paging

        try:
            max_count = int(request.values.get('max_count', default_count))
        except ValueError:
            max_count = default_count
        max_count = max(1, min(max_count, limit_max_count))

        # read in the complete log of this page
        log = editlog.EditLog(request, rootpagename=pagename)

        offset = 0
        paging_info_html = ""
        paging_nav_html = ""
        count_select_html = ""

        f = request.formatter

        if paging:
            log_size = log.lines()

            try:
                offset = int(request.values.get('offset', 0))
            except ValueError:
                offset = 0
            offset = max(min(offset, log_size - 1), 0)

            paging_info_html += f.paragraph(1, css_class="searchstats info-paging-info") + _("Showing page edit history entries from '''%(start_offset)d''' to '''%(end_offset)d''' out of '''%(total_count)d''' entries total.", wiki=True) % {
                'start_offset': log_size - min(log_size, offset + max_count) + 1,
                'end_offset': log_size - offset,
                'total_count': log_size,
            } + f.paragraph(0)

            # generating offset navigating links
            if max_count < log_size or offset != 0:
                offset_links = []
                cur_offset = max_count
                near_count = 5 # request.cfg.pagination_size

                min_offset = max(0, (offset + max_count - 1) / max_count - near_count)
                max_offset = min((log_size - 1) / max_count, offset / max_count + near_count)
                offset_added = False

                def add_offset_link(offset, caption=None):
                    offset_links.append(f.table_cell(1, css_class="info-offset-item") +
                        page.link_to(request, on=1, querystr={
                            'action': 'info',
                            'offset': str(offset),
                            'max_count': str(max_count),
                            }, css_class="info-offset-nav-link", rel="nofollow") + f.text(caption or str(log_size - offset)) + page.link_to(request, on=0) +
                        f.table_cell(0)
                    )

                # link to previous page - only if not at start
                if offset > 0:
                    add_offset_link(((offset - 1) / max_count) * max_count, _("Newer"))

                # link to beggining of event log - if min_offset is not minimal
                if min_offset > 0:
                    add_offset_link(0)
                    # adding gap only if min_offset not explicitly following beginning
                    if min_offset > 1:
                        offset_links.append(f.table_cell(1, css_class="info-offset-gap") + f.text(u'\u2026') + f.table_cell(0))

                # generating near pages links
                for cur_offset in range(min_offset, max_offset + 1):
                    # note that current offset may be not multiple of max_count,
                    # so we check whether we should add current offset marker like this
                    if not offset_added and offset <= cur_offset * max_count:
                        # current info history view offset
                        offset_links.append(f.table_cell(1, css_class="info-offset-item info-cur-offset") + f.text(str(log_size - offset)) + f.table_cell(0))
                        offset_added = True

                    # add link, if not at this offset
                    if offset != cur_offset * max_count:
                        add_offset_link(cur_offset * max_count)

                # link to the last page of event log
                if max_offset < (log_size - 1) / max_count:
                    if max_offset < (log_size - 1) / max_count - 1:
                        offset_links.append(f.table_cell(1, css_class="info-offset-gap") + f.text(u'\u2026') + f.table_cell(0))
                    add_offset_link(((log_size - 1) / max_count) * max_count)

                # special case - if offset is greater than max_offset * max_count
                if offset > max_offset * max_count:
                    offset_links.append(f.table_cell(1, css_class="info-offset-item info-cur-offset") + f.text(str(log_size - offset)) + f.table_cell(0))

                # link to next page
                if offset < (log_size - max_count):
                    add_offset_link(((offset + max_count) / max_count) * max_count, _("Older"))

                # generating html
                paging_nav_html += "".join([
                    f.table(1, css_class="searchpages"),
                    f.table_row(1),
                    "".join(offset_links),
                    f.table_row(0),
                    f.table(0),
                ])

        # generating max_count switcher
        # we do it only in case history_count has additional values
        if len(request.cfg.history_count) > 2:
            max_count_possibilities = list(set(request.cfg.history_count))
            max_count_possibilities.sort()
            max_count_html = []
            cur_count_added = False


            for count in max_count_possibilities:
                # max count value can be not in list of predefined values
                if max_count <= count and not cur_count_added:
                    max_count_html.append("".join([
                        f.span(1, css_class="info-count-item info-cur-count"),
                        f.text(str(max_count)),
                        f.span(0),
                    ]))
                    cur_count_added = True

                # checking for limit_max_count to prevent showing unavailable options
                if max_count != count and count <= limit_max_count:
                    max_count_html.append("".join([
                        f.span(1, css_class="info-count-item"),
                        page.link_to(request, on=1, querystr={
                            'action': 'info',
                            'offset': str(offset),
                            'max_count': str(count),
                            }, css_class="info-count-link", rel="nofollow"),
                        f.text(str(count)),
                        page.link_to(request, on=0),
                        f.span(0),
                    ]))

            count_select_html += "".join([
                f.span(1, css_class="info-count-selector"),
                    f.text(" ("),
                    f.text(_("%s items per page")) % (f.span(1, css_class="info-count-selector info-count-selector-divider") + f.text(" | ") + f.span(0)).join(max_count_html),
                    f.text(")"),
                f.span(0),
            ])

        # open log for this page
        from MoinMoin.util.dataset import TupleDataset, Column

        history = TupleDataset()
        history.columns = [
            Column('rev', label='#', align='right'),
            Column('mtime', label=_('Date'), align='right'),
            Column('size', label=_('Size'), align='right'),
            Column('diff', label='<input type="submit" value="%s">' % (_("Diff"))),
            Column('editor', label=_('Editor'), hidden=not request.cfg.show_names),
            Column('comment', label=_('Comment')),
            Column('action', label=_('Action')),
            ]

        # generate history list

        def render_action(text, query, **kw):
            kw.update(dict(rel='nofollow'))
            return page.link_to(request, text, querystr=query, **kw)

        def render_file_action(text, pagename, filename, request, do):
            url = AttachFile.getAttachUrl(pagename, filename, request, do=do)
            if url:
                f = request.formatter
                link = f.url(1, url) + f.text(text) + f.url(0)
                return link

        may_write = request.user.may.write(pagename)
        may_delete = request.user.may.delete(pagename)

        count = 0
        pgactioncount = 0
        for line in log.reverse():
            count += 1

            if paging and count <= offset:
                continue

            rev = int(line.rev)
            actions = []
            if line.action in ('SAVE', 'SAVENEW', 'SAVE/REVERT', 'SAVE/RENAME', ):
                size = page.size(rev=rev)
                actions.append(render_action(_('view'), {'action': 'recall', 'rev': '%d' % rev}))
                if pgactioncount == 0:
                    rchecked = ' checked="checked"'
                    lchecked = ''
                elif pgactioncount == 1:
                    lchecked = ' checked="checked"'
                    rchecked = ''
                else:
                    lchecked = rchecked = ''
                diff = '<input type="radio" name="rev1" value="%d"%s><input type="radio" name="rev2" value="%d"%s>' % (rev, lchecked, rev, rchecked)
                if rev > 1:
                    diff += render_action(' ' + _('to previous'), {'action': 'diff', 'rev1': rev-1, 'rev2': rev})
                comment = line.comment
                if not comment:
                    if '/REVERT' in line.action:
                        comment = _("Revert to revision %(rev)d.") % {'rev': int(line.extra)}
                    elif '/RENAME' in line.action:
                        comment = _("Renamed from '%(oldpagename)s'.") % {'oldpagename': line.extra}
                pgactioncount += 1
            else: # ATT*
                rev = '-'
                diff = '-'

                filename = wikiutil.url_unquote(line.extra)
                comment = "%s: %s %s" % (line.action, filename, line.comment)
                if AttachFile.exists(request, pagename, filename):
                    size = AttachFile.size(request, pagename, filename)
                    actions.append(render_file_action(_('view'), pagename, filename, request, do='view'))
                    actions.append(render_file_action(_('get'), pagename, filename, request, do='get'))
                    if may_delete:
                        actions.append(render_file_action(_('del'), pagename, filename, request, do='del'))
                    if may_write:
                        actions.append(render_file_action(_('edit'), pagename, filename, request, do='modify'))
                else:
                    size = 0

            history.addRow((
                rev,
                request.user.getFormattedDateTime(wikiutil.version2timestamp(line.ed_time_usecs)),
                str(size),
                diff,
                line.getEditor(request) or _("N/A"),
                wikiutil.escape(comment) or '&nbsp;',
                "&nbsp;".join(a for a in actions if a),
            ))
            if (count >= max_count + offset) or (paging and count >= log_size):
                break

        # print version history
        from MoinMoin.widget.browser import DataBrowserWidget

        request.write(unicode(html.H2().append(_('Revision History'))))

        if not count: # there was no entry in logfile
            request.write(_('No log entries found.'))
            return

        history_table = DataBrowserWidget(request)
        history_table.setData(history)

        div = html.DIV(id="page-history")
        div.append(html.INPUT(type="hidden", name="action", value="diff"))
        div.append(history_table.render(method="GET"))

        form = html.FORM(method="GET", action="")
        if paging:
            form.append(f.div(1, css_class="info-paging-info") + paging_info_html + count_select_html + f.div(0))
            form.append("".join([
                f.div(1, css_class="info-paging-nav info-paging-nav-top"),
                paging_nav_html,
                f.div(0),
            ]))
        form.append(div)
        if paging:
            form.append("".join([
                f.div(1, css_class="info-paging-nav info-paging-nav-bottom"),
                paging_nav_html,
                f.div(0)
            ]))
        request.write(unicode(form))
Beispiel #37
0
def execute(pagename, request):
    """ Send recent changes as an RSS document
    """
    if not wikixml.ok:
        #XXX send error message
        pass

    cfg = request.cfg

    # get params
    items_limit = 100
    try:
        max_items = int(request.form['items'][0])
        max_items = min(max_items, items_limit) # not more than `items_limit`
    except (KeyError, ValueError):
        # not more than 15 items in a RSS file by default
        max_items = 15
    try:
        unique = int(request.form.get('unique', [0])[0])
    except ValueError:
        unique = 0
    try:
        diffs = int(request.form.get('diffs', [0])[0])
    except ValueError:
        diffs = 0
    ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003
    try:
        ddiffs = int(request.form.get('ddiffs', [0])[0])
    except ValueError:
        ddiffs = 0

    # prepare output
    out = StringIO.StringIO()
    handler = RssGenerator(out)

    # get data
    interwiki = request.getBaseURL()
    if interwiki[-1] != "/": interwiki = interwiki + "/"

    logo = re.search(r'src="([^"]*)"', cfg.logo_string)
    if logo: logo = request.getQualifiedURL(logo.group(1))
    
    log = editlog.EditLog(request)
    logdata = []
    counter = 0
    pages = {}
    for line in log.reverse():
        if not request.user.may.read(line.pagename):
            continue
        if ((line.action[:4] != 'SAVE') or
            ((line.pagename in pages) and unique)): continue
        #if log.dayChanged() and log.daycount > _MAX_DAYS: break
        line.editor = line.getEditorData(request)[1]
        line.time = util.datetime.tmtuple(wikiutil.version2timestamp(line.ed_time_usecs)) # UTC
        logdata.append(line)
        pages[line.pagename] = None
        counter += 1
        if counter >= max_items:
            break
    del log

    # start SAX stream
    handler.startDocument()
    handler._out.write(
        '<!--\n'
        '    Add an "items=nnn" URL parameter to get more than the default 15 items.\n'
        '    You cannot get more than %d items though.\n'
        '    \n'
        '    Add "unique=1" to get a list of changes where page names are unique,\n'
        '    i.e. where only the latest change of each page is reflected.\n'
        '    \n'
        '    Add "diffs=1" to add change diffs to the description of each items.\n'
        '    \n'
        '    Add "ddiffs=1" to link directly to the diff (good for FeedReader).\n'
        '    Current settings: items=%i, unique=%i, diffs=%i, ddiffs=%i'
        '-->\n' % (items_limit, max_items, unique, diffs, ddiffs)
        )

    # emit channel description
    handler.startNode('channel', {
        (handler.xmlns['rdf'], 'about'): request.getBaseURL(),
        })
    handler.simpleNode('title', cfg.sitename)
    handler.simpleNode('link', interwiki + wikiutil.quoteWikinameURL(pagename))
    handler.simpleNode('description', 'RecentChanges at %s' % cfg.sitename)
    if logo:
        handler.simpleNode('image', None, {
            (handler.xmlns['rdf'], 'resource'): logo,
            })
    if cfg.interwikiname:
        handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)

    handler.startNode('items')
    handler.startNode(('rdf', 'Seq'))
    for item in logdata:
        link = "%s%s#%04d%02d%02d%02d%02d%02d" % ((interwiki,
                wikiutil.quoteWikinameURL(item.pagename),) + item.time[:6])
        handler.simpleNode(('rdf', 'li'), None, attr={
            (handler.xmlns['rdf'], 'resource'): link,
        })
    handler.endNode(('rdf', 'Seq'))
    handler.endNode('items')
    handler.endNode('channel')

    # emit logo data
    if logo:
        handler.startNode('image', attr={
            (handler.xmlns['rdf'], 'about'): logo,
            })
        handler.simpleNode('title', cfg.sitename)
        handler.simpleNode('link', interwiki)
        handler.simpleNode('url', logo)
        handler.endNode('image')

    # emit items
    for item in logdata:
        page = Page(request, item.pagename)
        link = interwiki + wikiutil.quoteWikinameURL(item.pagename)
        rdflink = "%s#%04d%02d%02d%02d%02d%02d" % ((link,) + item.time[:6])
        handler.startNode('item', attr={
            (handler.xmlns['rdf'], 'about'): rdflink,
        })

        # general attributes
        handler.simpleNode('title', item.pagename)
        if ddiffs:
            handler.simpleNode('link', link+"?action=diff")
        else:
            handler.simpleNode('link', link)
            
        handler.simpleNode(('dc', 'date'), util.W3CDate(item.time))

        # description
        desc_text = item.comment
        if diffs:
            # TODO: rewrite / extend wikiutil.pagediff
            # searching for the matching pages doesn't really belong here
            revisions = page.getRevList()

            rl = len(revisions)
            for idx in range(rl):
                rev = revisions[idx]
                if rev <= item.rev:
                    if idx+1 < rl:
                        lines = wikiutil.pagediff(request, item.pagename, revisions[idx+1], item.pagename, 0, ignorews=1)
                        if len(lines) > 20: lines = lines[:20] + ['...\n']
                        desc_text = '%s\n<pre>\n%s\n</pre>\n' % (desc_text, '\n'.join(lines))
                    break
        if desc_text:
            handler.simpleNode('description', desc_text)

        # contributor
        edattr = {}
        if cfg.show_hosts:
            edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
        if isinstance(item.editor, Page):
            edname = item.editor.page_name
            ##edattr[(None, 'link')] = interwiki + wikiutil.quoteWikiname(edname)
        else:
            edname = item.editor
            ##edattr[(None, 'link')] = link + "?action=info"
        
        # this edattr stuff, esp. None as first tuple element breaks things (tracebacks)
        # if you know how to do this right, please send us a patch
        
        handler.startNode(('dc', 'contributor'))
        handler.startNode(('rdf', 'Description'), attr=edattr)
        handler.simpleNode(('rdf', 'value'), edname)
        handler.endNode(('rdf', 'Description'))
        handler.endNode(('dc', 'contributor'))

        # wiki extensions
        handler.simpleNode(('wiki', 'version'), "%i" % (item.ed_time_usecs))
        handler.simpleNode(('wiki', 'status'), ('deleted', 'updated')[page.exists()])
        handler.simpleNode(('wiki', 'diff'), link + "?action=diff")
        handler.simpleNode(('wiki', 'history'), link + "?action=info")
        # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor ) 
        # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA ) 

        handler.endNode('item')

    # end SAX stream
    handler.endDocument()

    # send the generated XML document
    request.http_headers(["Content-Type: text/xml; charset=%s" % config.charset] + request.nocache)
    request.write(out.getvalue())
    request.finish()
    request.no_closing_html_code = 1
Beispiel #38
0
def getblacklist(request, pagename, do_update):
    """ Get blacklist, possibly downloading new copy

    @param request: current request (request instance)
    @param pagename: bad content page name (unicode)
    @rtype: list
    @return: list of blacklisted regular expressions
    """
    from MoinMoin.PageEditor import PageEditor
    p = PageEditor(request, pagename, uid_override="Antispam subsystem")
    invalidate_cache = False
    if do_update:
        tooold = time.time() - 3600
        mymtime = wikiutil.version2timestamp(p.mtime_usecs())
        failure = caching.CacheEntry(request, "antispam", "failure")
        fail_time = failure.mtime() # only update if no failure in last hour
        if (mymtime < tooold) and (fail_time < tooold):
            dprint("%d *BadContent too old, have to check for an update..." % tooold)
            import xmlrpclib

            # TODO replace following with import socket when we require py 2.3
            # also change the call / exception names accordingly
            from MoinMoin.support import timeoutsocket

            timeout = 15 # time out for reaching the master server via xmlrpc
            old_timeout = timeoutsocket.getDefaultSocketTimeout()
            timeoutsocket.setDefaultSocketTimeout(timeout)
            
            # For production code
            uri = "http://moinmaster.wikiwikiweb.de:8000/?action=xmlrpc2"
            # For testing (use your test wiki as BadContent source)
            ##uri = "http://localhost/main/?action=xmlrpc2")
            master = xmlrpclib.ServerProxy(uri)

            try:
                # Get BadContent info
                master.putClientInfo('ANTISPAM-CHECK',
                                     request.http_host+request.script_name)
                response = master.getPageInfo(pagename)

                # It seems that response is always a dict
                if isinstance(response, dict) and 'faultCode' in response:
                    raise WikirpcError("failed to get BadContent information",
                                       response)
                
                # Compare date against local BadContent copy
                masterdate = response['lastModified']
                mydate = xmlrpclib.DateTime(tuple(time.gmtime(mymtime)))
                dprint("master: %s mine: %s" % (masterdate, mydate))
                if mydate < masterdate:
                    # Get new copy and save
                    dprint("Fetching page from master...")
                    master.putClientInfo('ANTISPAM-FETCH',
                                         request.http_host + request.script_name)
                    response = master.getPage(pagename)
                    if isinstance(response, dict) and 'faultCode' in response:
                        raise WikirpcError("failed to get BadContent data",
                                           response)
                    p._write_file(response)

                invalidate_cache = True

            except (timeoutsocket.Timeout, timeoutsocket.error, xmlrpclib.ProtocolError), err:
                # Log the error
                # TODO: check if this does not fill the logs!
                dprint('Timeout / socket / protocol error when accessing'
                       ' moinmaster: %s' % str(err))
                # update cache to wait before the next try
                failure.update("")

            except Error, err:
                # In case of Error, we log the error and use the local
                # BadContent copy.
                dprint(str(err))

            # set back socket timeout
            timeoutsocket.setDefaultSocketTimeout(old_timeout)
def execute(pagename, request):
    pagename_header = '%s-%s.zip' % (pagename, datetime.now().isoformat()[:10])
    pagename_header = pagename_header.encode('ascii', 'ignore')

    request.content_type = 'application/zip'
    request.headers['Content-Disposition'] = \
        'attachment; filename="%s"' % pagename_header

    args = values_to_form(request.values)

    try:
        args = args['args'][0]
    except (KeyError, IndexError):
        args = u''

    pagelist, metakeys, _ = metatable_parseargs(request,
                                                args,
                                                get_all_keys=True)

    renameDict = dict()

    for page in pagelist:
        metas = get_metas(request,
                          page, ["gwikirename"],
                          abs_attach=False,
                          checkAccess=False)
        renameList = metas["gwikirename"]
        if renameList:
            renameDict[page] = renameList

    output = StringIO()
    zip = zipfile.ZipFile(output, "w", zipfile.ZIP_DEFLATED)

    userid = user.getUserIdentification(request)
    script = [
        packLine(['MoinMoinPackage', '1']),
    ]
    counter = 0

    for pagename in pagelist:
        counter += 1
        page = Page(request, pagename)
        timestamp = wikiutil.version2timestamp(page.mtime_usecs())
        # Underlay pages are in epoch 0, zipfile in python 2.7 does
        # not support this.
        if not timestamp:
            pagefile, rev, exists = page.get_rev()
            if rev == 99999999:
                # We should never get here
                log.error("Page %s neither in pages or underlay, skipping." %
                          (pagename))
                continue
            timestamp = os.path.getctime(pagefile)
        pagetext = page.get_raw_body().encode("utf-8")
        filename = str(counter)
        zinfo = zipfile.ZipInfo(
            filename=filename,
            date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
        zinfo.compress_type = zipfile.ZIP_DEFLATED
        zip.writestr(zinfo, pagetext)

        targetNameList = renameDict.get(pagename, [pagename])
        for targetName in targetNameList:
            script.append(
                packLine(["AddRevision", filename, targetName, userid, ""]))

        for attachment in _get_files(request, pagename):
            counter += 1
            sourcefile = AttachFile.getFilename(request, pagename, attachment)
            filename = str(counter) + "-attachment"
            zip.write(sourcefile, filename)
            script.append(
                packLine([
                    "AddAttachment", filename, attachment, pagename, userid, ""
                ]))

    zip.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
    zip.close()

    request.write(output.getvalue())
def getblacklist(request, pagename, do_update):
    """ Get blacklist, possibly downloading new copy

    @param request: current request (request instance)
    @param pagename: bad content page name (unicode)
    @rtype: list
    @return: list of blacklisted regular expressions
    """
    from MoinMoin.PageEditor import PageEditor
    p = PageEditor(request, pagename, uid_override="Antispam subsystem")
    mymtime = wikiutil.version2timestamp(p.mtime_usecs())
    if do_update:
        tooold = time.time() - 1800
        failure = caching.CacheEntry(request, "antispam", "failure", scope='wiki')
        fail_time = failure.mtime() # only update if no failure in last hour
        if (mymtime < tooold) and (fail_time < tooold):
            logging.info("%d *BadContent too old, have to check for an update..." % tooold)
            import xmlrpclib
            import socket

            timeout = 15 # time out for reaching the master server via xmlrpc
            old_timeout = socket.getdefaulttimeout()
            socket.setdefaulttimeout(timeout)

            master_url = request.cfg.antispam_master_url
            master = xmlrpclib.ServerProxy(master_url)
            try:
                # Get BadContent info
                master.putClientInfo('ANTISPAM-CHECK', request.url)
                response = master.getPageInfo(pagename)

                # It seems that response is always a dict
                if isinstance(response, dict) and 'faultCode' in response:
                    raise WikirpcError("failed to get BadContent information",
                                       response)

                # Compare date against local BadContent copy
                masterdate = response['lastModified']

                if isinstance(masterdate, datetime.datetime):
                    # for python 2.5
                    mydate = datetime.datetime(*tuple(time.gmtime(mymtime))[0:6])
                else:
                    # for python <= 2.4.x
                    mydate = xmlrpclib.DateTime(tuple(time.gmtime(mymtime)))

                logging.debug("master: %s mine: %s" % (masterdate, mydate))
                if mydate < masterdate:
                    # Get new copy and save
                    logging.info("Fetching page from %s..." % master_url)
                    master.putClientInfo('ANTISPAM-FETCH', request.url)
                    response = master.getPage(pagename)
                    if isinstance(response, dict) and 'faultCode' in response:
                        raise WikirpcError("failed to get BadContent data", response)
                    p._write_file(response)
                    mymtime = wikiutil.version2timestamp(p.mtime_usecs())
                else:
                    failure.update("") # we didn't get a modified version, this avoids
                                       # permanent polling for every save when there
                                       # is no updated master page

            except (socket.error, xmlrpclib.ProtocolError), err:
                logging.error('Timeout / socket / protocol error when accessing %s: %s' % (master_url, str(err)))
                # update cache to wait before the next try
                failure.update("")

            except (xmlrpclib.Fault, ), err:
                logging.error('Fault on %s: %s' % (master_url, str(err)))
                # update cache to wait before the next try
                failure.update("")

            except Error, err:
                # In case of Error, we log the error and use the local BadContent copy.
                logging.error(str(err))
Beispiel #41
0
def execute(pagename, request):
    """ Send recent changes as an RSS document
    """
    cfg = request.cfg

    # get params
    items_limit = 100
    try:
        max_items = int(request.values["items"])
        max_items = min(max_items, items_limit)  # not more than `items_limit`
    except (KeyError, ValueError):
        # not more than 15 items in a RSS file by default
        max_items = 15
    try:
        unique = int(request.values.get("unique", 0))
    except ValueError:
        unique = 0
    try:
        diffs = int(request.values.get("diffs", 0))
    except ValueError:
        diffs = 0
    ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003
    try:
        ddiffs = int(request.values.get("ddiffs", 0))
    except ValueError:
        ddiffs = 0

    urlfilter = request.values.get("filter")
    if urlfilter:
        urlfilter = re.compile(urlfilter)
    else:
        urlfilter = None

    # get data
    log = editlog.EditLog(request)
    logdata = []
    counter = 0
    pages = {}
    lastmod = 0
    for line in log.reverse():
        if urlfilter and not (urlfilter.match(line.pagename)):
            continue
        if not request.user.may.read(line.pagename):
            continue
        if not line.action.startswith("SAVE") or ((line.pagename in pages) and unique):
            continue
        # if log.dayChanged() and log.daycount > _MAX_DAYS: break
        line.editor = line.getInterwikiEditorData(request)
        line.time = timefuncs.tmtuple(wikiutil.version2timestamp(line.ed_time_usecs))  # UTC
        logdata.append(line)
        pages[line.pagename] = None

        if not lastmod:
            lastmod = wikiutil.version2timestamp(line.ed_time_usecs)

        counter += 1
        if counter >= max_items:
            break
    del log

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique)

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using whatever setting the admin
        # defined for suggested cache lifetime of the RecentChanges RSS doc
        expires = time.time() + cfg.rss_cache

        request.mimetype = "application/rss+xml"
        request.expires = expires
        request.last_modified = lastmod
        request.headers["Etag"] = etag

        # send the generated XML document
        baseurl = request.url_root

        logo = re.search(r'src="([^"]*)"', cfg.logo_string)
        if logo:
            logo = request.getQualifiedURL(logo.group(1))

        # prepare output
        output = structencoder(indent=u"yes")

        FEED_HEADER_COMMENT = """
<!--
    Add an "items=nnn" URL parameter to get more than the default 15 items.
    You cannot get more than %d items though.
    
    Add "unique=1" to get a list of changes where page names are unique,
    i.e. where only the latest change of each page is reflected.
    Add "diffs=1" to add change diffs to the description of each items.
    
    Add "ddiffs=1" to link directly to the diff (good for FeedReader).
    Current settings: items=%i, unique=%i, diffs=%i, ddiffs=%i
-->
        """ % (
            items_limit,
            max_items,
            unique,
            diffs,
            ddiffs,
        )

        # Feed envelope
        page = Page(request, pagename)
        f = output.cofeed(
            ROOT(NS(u"", ATOM_NAMESPACE), NS(u"wiki", RSSWIKI_NAMESPACE), E_CURSOR((ATOM_NAMESPACE, u"feed")))
        )
        f.send(E((ATOM_NAMESPACE, u"id"), full_url(request, page).encode(config.charset))),
        f.send(E((ATOM_NAMESPACE, u"title"), cfg.sitename.encode(config.charset))),
        f.send(E((ATOM_NAMESPACE, u"link"), {u"href": request.url_root.encode(config.charset)})),
        f.send(E((ATOM_NAMESPACE, u"summary"), ("RecentChanges at %s" % cfg.sitename).encode(config.charset))),
        # Icon
        # E((ATOM_NAMESPACE, u'link'), {u'href': logo.encode(config.charset)}),

        # if cfg.interwikiname:
        #    handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)

        for item in logdata:
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            page = Page(request, item.pagename)
            # link = full_url(request, page, anchor=anchor)
            if ddiffs:
                link = full_url(request, page, querystr={"action": "diff"})
            else:
                link = full_url(request, page)

            # description
            desc_text = item.comment
            if diffs:
                # TODO: rewrite / extend wikiutil.pagediff
                # searching for the matching pages doesn't really belong here
                revisions = page.getRevList()

                rl = len(revisions)
                for idx in range(rl):
                    rev = revisions[idx]
                    if rev <= item.rev:
                        if idx + 1 < rl:
                            lines = wikiutil.pagediff(
                                request, item.pagename, revisions[idx + 1], item.pagename, 0, ignorews=1
                            )
                            if len(lines) > 20:
                                lines = lines[:20] + ["...\n"]
                            lines = "\n".join(lines)
                            lines = wikiutil.escape(lines)
                            desc_text = "%s\n<pre>\n%s\n</pre>\n" % (desc_text, lines)
                        break
            # if desc_text:
            #    handler.simpleNode('description', desc_text)

            # contributor
            edattr = {}
            # if cfg.show_hosts:
            #    edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
            if item.editor[0] == "interwiki":
                edname = "%s:%s" % item.editor[1]
                ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
            else:  # 'ip'
                edname = item.editor[1]
                ##edattr[(None, 'link')] = link + "?action=info"

            history_link = full_url(request, page, querystr={"action": "info"})

            f.send(
                E(
                    (ATOM_NAMESPACE, u"entry"),
                    E((ATOM_NAMESPACE, u"id"), link.encode(config.charset)),
                    E((ATOM_NAMESPACE, u"title"), item.pagename.encode(config.charset)),
                    E((ATOM_NAMESPACE, u"updated"), timefuncs.W3CDate(item.time).encode(config.charset)),
                    E((ATOM_NAMESPACE, u"link"), {u"href": link.encode(config.charset)}),
                    E((ATOM_NAMESPACE, u"summary"), desc_text.encode(config.charset)),
                    E((ATOM_NAMESPACE, u"author"), E((ATOM_NAMESPACE, u"name"), edname.encode(config.charset))),
                    # E((ATOM_NAMESPACE, u'title'), item.pagename.encode(config.charset)),
                    # wiki extensions
                    E((RSSWIKI_NAMESPACE, u"wiki:version"), ("%i" % (item.ed_time_usecs)).encode(config.charset)),
                    E((RSSWIKI_NAMESPACE, u"wiki:status"), (u"deleted", u"updated")[page.exists()]),
                    E((RSSWIKI_NAMESPACE, u"wiki:diff"), link.encode(config.charset)),
                    E((RSSWIKI_NAMESPACE, u"wiki:history"), history_link.encode(config.charset)),
                    # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
                    # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )
                )
            )

        # emit logo data
        # if logo:
        #    handler.startNode('image', attr={
        #        (handler.xmlns['rdf'], 'about'): logo,
        #        })
        #    handler.simpleNode('title', cfg.sitename)
        #    handler.simpleNode('link', baseurl)
        #    handler.simpleNode('url', logo)
        #    handler.endNode('image')

        f.close()
        request.write(output.read())
Beispiel #42
0
def print_abandoned(macro):
    request = macro.request
    _ = request.getText
    output = []
    d = {}
    page = macro.formatter.page
    pagename = page.page_name
    d['page'] = page
    d['q_page_name'] = wikiutil.quoteWikinameURL(pagename)
    msg = None

    pages = request.rootpage.getPageList()
    last_edits = []
    for name in pages:
        log = Page(request, name).editlog_entry()
        if log:
            last_edits.append(log)
        #   we don't want all Systempages at the beginning of the abandoned list
        #    line = editlog.EditLogLine({})
        #    line.pagename = page
        #    line.ed_time = 0
        #    line.comment = 'not edited'
        #    line.action = ''
        #    line.userid = ''
        #    line.hostname = ''
        #    line.addr = ''
        #    last_edits.append(line)
    del pages
    last_edits.sort()

    # set max size in days
    max_days = min(int(request.values.get('max_days', 0)), _DAYS_SELECTION[-1])
    # default to _MAX_DAYS for users without bookmark
    if not max_days:
        max_days = _MAX_DAYS
    d['rc_max_days'] = max_days

    # give known user the option to extend the normal display
    if request.user.valid:
        d['rc_days'] = _DAYS_SELECTION
    else:
        d['rc_days'] = None

    d['rc_update_bookmark'] = None
    output.append(request.theme.recentchanges_header(d))

    length = len(last_edits)

    index = 0
    last_index = 0
    day_count = 0

    if length > 0:
        line = last_edits[index]
        line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs))
        this_day = line.time_tuple[0:3]
        day = this_day

    while 1:

        index += 1

        if index > length:
            break

        if index < length:
            line = last_edits[index]
            line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs))
            day = line.time_tuple[0:3]

        if (day != this_day) or (index == length):
            d['bookmark_link_html'] = None
            d['date'] = request.user.getFormattedDate(wikiutil.version2timestamp(last_edits[last_index].ed_time_usecs))
            output.append(request.theme.recentchanges_daybreak(d))
            this_day = day

            for page in last_edits[last_index:index]:
                output.append(format_page_edits(macro, [page], None))
            last_index = index
            day_count += 1
            if (day_count >= max_days):
                break

    d['rc_msg'] = msg
    output.append(request.theme.recentchanges_footer(d))
    return ''.join(output)
Beispiel #43
0
def macro_RecentChanges(macro, abandoned=False):
    # handle abandoned keyword
    if abandoned:
        return print_abandoned(macro)

    request = macro.request
    _ = request.getText
    output = []
    user = request.user
    page = macro.formatter.page
    pagename = page.page_name

    d = {}
    d['page'] = page
    d['q_page_name'] = wikiutil.quoteWikinameURL(pagename)

    log = editlog.EditLog(request)

    tnow = time.time()
    msg = ""

    # get bookmark from valid user
    bookmark_usecs = request.user.getBookmark() or 0

    # add bookmark link if valid user
    d['rc_curr_bookmark'] = None
    d['rc_update_bookmark'] = None
    if request.user.valid:
        d['rc_curr_bookmark'] = _('(no bookmark set)')
        if bookmark_usecs:
            currentBookmark = wikiutil.version2timestamp(bookmark_usecs)
            currentBookmark = user.getFormattedDateTime(currentBookmark)
            currentBookmark = _('(currently set to %s)') % currentBookmark
            deleteBookmark = page.link_to(request, _("Delete bookmark"), querystr={'action': 'bookmark', 'time': 'del'}, rel='nofollow')
            d['rc_curr_bookmark'] = currentBookmark + ' ' + deleteBookmark

        version = wikiutil.timestamp2version(tnow)
        d['rc_update_bookmark'] = page.link_to(request, _("Set bookmark"), querystr={'action': 'bookmark', 'time': '%d' % version}, rel='nofollow')

    # set max size in days
    max_days = min(int(request.values.get('max_days', 0)), _DAYS_SELECTION[-1])
    # default to _MAX_DAYS for useres without bookmark
    if not max_days and not bookmark_usecs:
        max_days = _MAX_DAYS
    d['rc_max_days'] = max_days

    # give known user the option to extend the normal display
    if request.user.valid:
        d['rc_days'] = _DAYS_SELECTION
    else:
        d['rc_days'] = []

    output.append(request.theme.recentchanges_header(d))

    pages = {}
    ignore_pages = {}

    today = request.user.getTime(tnow)[0:3]
    this_day = today
    day_count = 0

    for line in log.reverse():

        if not request.user.may.read(line.pagename):
            continue

        line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs))
        day = line.time_tuple[0:3]
        hilite = line.ed_time_usecs > (bookmark_usecs or line.ed_time_usecs)

        if ((this_day != day or (not hilite and not max_days))) and len(pages) > 0:
            # new day or bookmark reached: print out stuff
            this_day = day
            for p in pages:
                ignore_pages[p] = None
            pages = pages.values()
            pages.sort(cmp_lines)
            pages.reverse()

            if request.user.valid:
                bmtime = pages[0][0].ed_time_usecs
                d['bookmark_link_html'] = page.link_to(request, _("Set bookmark"), querystr={'action': 'bookmark', 'time': '%d' % bmtime}, rel='nofollow')
            else:
                d['bookmark_link_html'] = None
            d['date'] = request.user.getFormattedDate(wikiutil.version2timestamp(pages[0][0].ed_time_usecs))
            output.append(request.theme.recentchanges_daybreak(d))

            for p in pages:
                output.append(format_page_edits(macro, p, bookmark_usecs))
            pages = {}
            day_count += 1
            if max_days and (day_count >= max_days):
                break

        elif this_day != day:
            # new day but no changes
            this_day = day

        if line.pagename in ignore_pages:
            continue

        # end listing by default if user has a bookmark and we reached it
        if not max_days and not hilite:
            msg = _('[Bookmark reached]')
            break

        if line.pagename in pages:
            pages[line.pagename].append(line)
        else:
            pages[line.pagename] = [line]
    else:
        if len(pages) > 0:
            # end of loop reached: print out stuff
            # XXX duplicated code from above
            # but above does not trigger if we have the first day in wiki history
            for p in pages:
                ignore_pages[p] = None
            pages = pages.values()
            pages.sort(cmp_lines)
            pages.reverse()

            if request.user.valid:
                bmtime = pages[0][0].ed_time_usecs
                d['bookmark_link_html'] = page.link_to(request, _("Set bookmark"), querystr={'action': 'bookmark', 'time': '%d' % bmtime}, rel='nofollow')
            else:
                d['bookmark_link_html'] = None
            d['date'] = request.user.getFormattedDate(wikiutil.version2timestamp(pages[0][0].ed_time_usecs))
            output.append(request.theme.recentchanges_daybreak(d))

            for p in pages:
                output.append(format_page_edits(macro, p, bookmark_usecs))


    d['rc_msg'] = msg
    output.append(request.theme.recentchanges_footer(d))

    return ''.join(output)
Beispiel #44
0
def format_page_edits(macro, lines, bookmark_usecs):
    request = macro.request
    _ = request.getText
    d = {} # dict for passing stuff to theme
    line = lines[0]
    pagename = line.pagename
    rev = int(line.rev)
    tnow = time.time()
    is_new = lines[-1].action == 'SAVENEW'
    is_renamed = lines[-1].action == 'SAVE/RENAME'
    # check whether this page is newer than the user's bookmark
    hilite = line.ed_time_usecs > (bookmark_usecs or line.ed_time_usecs)
    page = Page(request, pagename)

    html_link = ''
    if not page.exists():
        img = request.theme.make_icon('deleted')
        revbefore = rev - 1
        if revbefore and page.exists(rev=revbefore, domain='standard'):
            # indicate page was deleted and show diff to last existing revision of it
            html_link = page.link_to_raw(request, img, querystr={'action': 'diff'}, rel='nofollow')
        else:
            # just indicate page was deleted
            html_link = img
    elif page.isConflict():
        img = request.theme.make_icon('conflict')
        html_link = page.link_to_raw(request, img, querystr={'action': 'edit'}, rel='nofollow')
    elif hilite:
        # show special icons if change was after the user's bookmark
        if is_new:
            img = 'new'
        elif is_renamed:
            img = 'renamed'
        else:
            img = 'updated'
        img = request.theme.make_icon(img)
        html_link = page.link_to_raw(request, img, querystr={'action': 'diff', 'date': '%d' % bookmark_usecs}, rel='nofollow')
    else:
        # show "DIFF" icon else
        img = request.theme.make_icon('diffrc')
        html_link = page.link_to_raw(request, img, querystr={'action': 'diff'}, rel='nofollow')

    # print name of page, with a link to it
    force_split = len(page.page_name) > _MAX_PAGENAME_LENGTH

    d['icon_html'] = html_link
    d['pagelink_html'] = page.link_to(request, text=page.split_title(force=force_split))

    # print time of change
    d['time_html'] = None
    if request.cfg.changed_time_fmt:
        tdiff = long(tnow - wikiutil.version2timestamp(long(line.ed_time_usecs))) / 60 # has to be long for py 2.2.x
        if tdiff < 100:
            d['time_html'] = _("%(mins)dm ago") % {
                'mins': tdiff}
        else:
            d['time_html'] = time.strftime(request.cfg.changed_time_fmt, line.time_tuple)

    # print editor name or IP
    d['editors'] = None
    if request.cfg.show_names:
        if len(lines) > 1:
            counters = {}
            for idx in range(len(lines)):
                name = lines[idx].getEditor(request)
                if not name in counters:
                    counters[name] = []
                counters[name].append(idx+1)
            poslist = [(v, k) for k, v in counters.items()]
            poslist.sort()
            d['editors'] = []
            for positions, name in poslist:
                d['editors'].append("%s&nbsp;[%s]" % (
                    name, util.rangelist(positions)))
        else:
            d['editors'] = [line.getEditor(request)]

    comments = []
    for idx in range(len(lines)):
        comment = format_comment(request, lines[idx])
        if comment:
            comments.append((idx+1, wikiutil.escape(comment)))

    d['changecount'] = len(lines)
    d['comments'] = comments

    img = request.theme.make_icon('info')
    d['info_html'] = page.link_to_raw(request, img, querystr={'action': 'info'}, rel='nofollow')

    return request.theme.recentchanges_entry(d)
def print_abandoned(macro):
    request = macro.request
    _ = request.getText
    output = []
    d = {}
    page = macro.formatter.page
    pagename = page.page_name
    d["page"] = page
    d["q_page_name"] = wikiutil.quoteWikinameURL(pagename)
    msg = None

    pages = request.rootpage.getPageList()
    last_edits = []
    for name in pages:
        log = Page(request, name).editlog_entry()
        if log:
            last_edits.append(log)
        #   we don't want all Systempages at the beginning of the abandoned list
        #    line = editlog.EditLogLine({})
        #    line.pagename = page
        #    line.ed_time = 0
        #    line.comment = 'not edited'
        #    line.action = ''
        #    line.userid = ''
        #    line.hostname = ''
        #    line.addr = ''
        #    last_edits.append(line)
    del pages
    last_edits.sort()

    # set max size in days
    max_days = min(int(request.values.get("max_days", 0)), _DAYS_SELECTION[-1])
    # default to _MAX_DAYS for users without bookmark
    if not max_days:
        max_days = _MAX_DAYS
    d["rc_max_days"] = max_days

    # give known user the option to extend the normal display
    if request.user.valid:
        d["rc_days"] = _DAYS_SELECTION
    else:
        d["rc_days"] = None

    d["rc_update_bookmark"] = None
    output.append(request.theme.recentchanges_header(d))

    length = len(last_edits)

    index = 0
    last_index = 0
    day_count = 0

    if length > 0:
        line = last_edits[index]
        line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs))
        this_day = line.time_tuple[0:3]
        day = this_day

    while 1:

        index += 1

        if index > length:
            break

        if index < length:
            line = last_edits[index]
            line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs))
            day = line.time_tuple[0:3]

        if (day != this_day) or (index == length):
            d["bookmark_link_html"] = None
            d["date"] = request.user.getFormattedDate(wikiutil.version2timestamp(last_edits[last_index].ed_time_usecs))
            output.append(request.theme.recentchanges_daybreak(d))
            this_day = day

            for page in last_edits[last_index:index]:
                output.append(format_page_edits(macro, [page], None))
            last_index = index
            day_count += 1
            if day_count >= max_days:
                break

    d["rc_msg"] = msg
    output.append(request.theme.recentchanges_footer(d))
    return "".join(output)
Beispiel #46
0
    def _addRevisionHistory(self, targetNode):
        """
        This will generate a revhistory element which it will populate with
        revision nodes. Each revision has the revnumber, date and author-
        initial elements, and if a comment was supplied, the comment element.

        The date elements format depends on the users settings, so it will
        be in the same format as the revision history as viewed in the
        page info on the wiki.

        The authorinitials will be the UserName or if it was an anonymous
        edit, then it will be the hostname/ip-address.

        The revision history of included documents is NOT included at the
        moment due to technical difficulties.
        """
        _ = self.request.getText
        log = editlog.EditLog(self.request, rootpagename=self.title)
        user_cache = {}

        history = self.doc.createElement("revhistory")

        # read in the complete log of this page
        for line in log.reverse():
            if not line.action in (
                    'SAVE',
                    'SAVENEW',
                    'SAVE/REVERT',
                    'SAVE/RENAME',
            ):
                #Let's ignore adding of attachments
                continue
            revision = self.doc.createElement("revision")

            # Revision number (without preceeding zeros)
            self._addTextElem(revision, "revnumber", line.rev.lstrip('0'))

            # Date of revision
            date_text = self.request.user.getFormattedDateTime(
                wikiutil.version2timestamp(line.ed_time_usecs))
            self._addTextElem(revision, "date", date_text)

            # Author or revision
            if not (line.userid in user_cache):
                user_cache[line.userid] = user.User(
                    self.request, line.userid, auth_method="text_docbook:740")
            author = user_cache[line.userid]
            if author and author.name:
                self._addTextElem(revision, "authorinitials", author.name)
            else:
                self._addTextElem(revision, "authorinitials", line.hostname)

            # Comment from author of revision
            comment = line.comment
            if not comment:
                if '/REVERT' in line.action:
                    comment = _("Revert to revision %(rev)d.") % {
                        'rev': int(line.extra)
                    }
                elif '/RENAME' in line.action:
                    comment = _("Renamed from '%(oldpagename)s'.") % {
                        'oldpagename': line.extra
                    }
            if comment:
                self._addTextElem(revision, "revremark", comment)

            history.appendChild(revision)

        if history.firstChild:
            #only add revision history is there is history to add
            targetNode.appendChild(history)
def format_page_edits(macro, lines, bookmark_usecs):
    request = macro.request
    _ = request.getText
    d = {}  # dict for passing stuff to theme
    line = lines[0]
    pagename = line.pagename
    rev = int(line.rev)
    tnow = time.time()
    is_new = lines[-1].action == "SAVENEW"
    is_renamed = lines[-1].action == "SAVE/RENAME"
    # check whether this page is newer than the user's bookmark
    hilite = line.ed_time_usecs > (bookmark_usecs or line.ed_time_usecs)
    page = Page(request, pagename)

    html_link = ""
    if not page.exists():
        img = request.theme.make_icon("deleted")
        revbefore = rev - 1
        if revbefore and page.exists(rev=revbefore, domain="standard"):
            # indicate page was deleted and show diff to last existing revision of it
            html_link = page.link_to_raw(request, img, querystr={"action": "diff"}, rel="nofollow")
        else:
            # just indicate page was deleted
            html_link = img
    elif page.isConflict():
        img = request.theme.make_icon("conflict")
        html_link = page.link_to_raw(request, img, querystr={"action": "edit"}, rel="nofollow")
    elif hilite:
        # show special icons if change was after the user's bookmark
        if is_new:
            img = "new"
        elif is_renamed:
            img = "renamed"
        else:
            img = "updated"
        img = request.theme.make_icon(img)
        html_link = page.link_to_raw(
            request, img, querystr={"action": "diff", "date": "%d" % bookmark_usecs}, rel="nofollow"
        )
    else:
        # show "DIFF" icon else
        img = request.theme.make_icon("diffrc")
        html_link = page.link_to_raw(request, img, querystr={"action": "diff"}, rel="nofollow")

    # print name of page, with a link to it
    force_split = len(page.page_name) > _MAX_PAGENAME_LENGTH

    d["icon_html"] = html_link
    d["pagelink_html"] = page.link_to(request, text=page.split_title(force=force_split))

    # print time of change
    d["time_html"] = None
    if request.cfg.changed_time_fmt:
        tdiff = long(tnow - wikiutil.version2timestamp(long(line.ed_time_usecs))) / 60  # has to be long for py 2.2.x
        if tdiff < 100:
            d["time_html"] = _("%(mins)dm ago") % {"mins": tdiff}
        else:
            d["time_html"] = time.strftime(request.cfg.changed_time_fmt, line.time_tuple)

    # print editor name or IP
    d["editors"] = None
    if request.cfg.show_names:
        if len(lines) > 1:
            counters = {}
            editorcache = {}
            for idx in range(len(lines)):
                editorkey = lines[idx].addr, lines[idx].hostname, lines[idx].userid
                if editorkey not in editorcache:
                    editorcache[editorkey] = lines[idx].getEditor(request)
                name = editorcache[editorkey]
                if not name in counters:
                    counters[name] = []
                counters[name].append(idx + 1)
            poslist = [(v, k) for k, v in counters.items()]
            poslist.sort()
            d["editors"] = []
            for positions, name in poslist:
                d["editors"].append("%s&nbsp;[%s]" % (name, util.rangelist(positions)))
        else:
            d["editors"] = [line.getEditor(request)]

    comments = []
    for idx in range(len(lines)):
        comment = format_comment(request, lines[idx])
        if comment:
            comments.append((idx + 1, wikiutil.escape(comment)))

    d["changecount"] = len(lines)
    d["comments"] = comments

    img = request.theme.make_icon("info")
    d["info_html"] = page.link_to_raw(request, img, querystr={"action": "info"}, rel="nofollow")

    return request.theme.recentchanges_entry(d)
    def history(page, pagename, request):
        # show history as default
        _ = request.getText

        # open log for this page
        from MoinMoin.logfile import editlog
        from MoinMoin.util.dataset import TupleDataset, Column

        history = TupleDataset()
        history.columns = [
            Column('rev', label='#', align='right'),
            Column('mtime', label=_('Date'), align='right'),
            Column('size',  label=_('Size'), align='right'),
            Column('diff', label='<input type="submit"           value="%s">' % (_("Diff"))),
            # TODO: translate to English
            # entfernt, nicht 4.01 compliant:          href="%s"   % page.url(request)
            Column('editor', label=_('Editor'), hidden=not request.cfg.show_hosts),
            Column('comment', label=_('Comment')),
            Column('action', label=_('Action')),
            ]

        # generate history list
        revisions = page.getRevList()
        versions = len(revisions)

        may_revert = request.user.may.revert(pagename)
        
        # read in the complete log of this page
        log = editlog.EditLog(request, rootpagename=pagename)
        count = 0
        for line in log.reverse():
            rev = int(line.rev)
            actions = ""
            if line.action in ['SAVE','SAVENEW','SAVE/REVERT',]:
                if count == 0: # latest page
                    actions = '%s&nbsp;%s' % (actions, page.link_to(request,
                        text=_('view'),
                        querystr=''))
                    actions = '%s&nbsp;%s' % (actions, page.link_to(request,
                        text=_('raw'),
                        querystr='action=raw'))
                    actions = '%s&nbsp;%s' % (actions, page.link_to(request,
                        text=_('print'),
                        querystr='action=print'))
                else:
                    actions = '%s&nbsp;%s' % (actions, page.link_to(request,
                        text=_('view'),
                        querystr='action=recall&rev=%d' % rev))
                    actions = '%s&nbsp;%s' % (actions, page.link_to(request,
                        text=_('raw'),
                        querystr='action=raw&rev=%d' % rev))
                    actions = '%s&nbsp;%s' % (actions, page.link_to(request,
                        text=_('print'),
                        querystr='action=print&rev=%d' % rev))
                    if may_revert:
                        actions = '%s&nbsp;%s' % (actions, page.link_to(request,
                            text=_('revert'),
                            querystr='action=revert&rev=%d' % (rev,)))
                if count == 0:
                    rchecked=' checked="checked"'
                    lchecked = ''
                elif count == 1:
                    lchecked=' checked="checked"'
                    rchecked = ''
                else:
                    lchecked = rchecked = ''
                diff = '<input type="radio" name="rev1" value="%d"%s><input type="radio" name="rev2" value="%d"%s>' % (rev,lchecked,rev,rchecked)
      
                comment = line.comment
                if not comment and line.action.find('/REVERT') != -1:
                        comment = _("Revert to revision %(rev)d.") % {'rev': int(line.extra)}
                size = page.size(rev=rev)
            else: # ATT*
                rev = '-'
                diff = '-'
                filename = line.extra
                comment = "%s: %s %s" % (line.action, filename, line.comment)
                size = 0
                if line.action != 'ATTDEL':
                    from MoinMoin.action import AttachFile
                    page_dir = AttachFile.getAttachDir(request, pagename)
                    filepath = os.path.join(page_dir, filename)
                    try:
                        # FIXME, wrong path on non-std names
                        size = os.path.getsize(filepath)
                    except:
                        pass
                    if line.action == 'ATTNEW':
                        actions = '%s&nbsp;%s' % (actions, page.link_to(request,
                            text=_('view'),
                            querystr='action=AttachFile&do=view&target=%s' % filename))
                    elif line.action == 'ATTDRW':
                        actions = '%s&nbsp;%s' % (actions, page.link_to(request,
                            text=_('edit'),
                            querystr='action=AttachFile&drawing=%s' % filename.replace(".draw","")))

                    actions = '%s&nbsp;%s' % (actions, page.link_to(request,
                        text=_('get'),
                        querystr='action=AttachFile&do=get&target=%s' % filename))
                    actions = '%s&nbsp;%s' % (actions, page.link_to(request,
                        text=_('del'),
                        querystr='action=AttachFile&do=del&target=%s' % filename))
                    # XXX use?: wikiutil.escape(filename)

            history.addRow((
                rev,
                request.user.getFormattedDateTime(wikiutil.version2timestamp(line.ed_time_usecs)),
                str(size),
                diff,
                line.getEditor(request) or _("N/A"),
                wikiutil.escape(comment) or '&nbsp;',
                actions,
            ))
            count += 1
            if count >= 100:
                break

        # print version history
        from MoinMoin.widget.browser import DataBrowserWidget
        from MoinMoin.formatter.text_html import Formatter

        request.write('<h2>%s</h2>\n' % _('Revision History'))

        if not count: # there was no entry in logfile
            request.write(_('No log entries found.'))
            return

        # TODO: this form activate revert, which should use post, but
        # other actions should use get. Maybe we should put the revert
        # into the page view itself, and not in this form.
        request.write('<form method="GET" action="">\n')
        request.write('<div id="page-history">\n')
        request.write('<input type="hidden" name="action" value="diff">\n')

        request.formatter = Formatter(request)
        history_table = DataBrowserWidget(request)
        history_table.setData(history)
        history_table.render()
        request.write('</div>\n')
        request.write('</form>\n')