Example #1
0
def execute(pagename, request):
    log = editlog.EditLog(request)
    try:
        lastmod = wikiutil.version2timestamp(log.date())
    except:
        lastmod = 0

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d" % lastmod

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.emit_http_headers(["Status: 304 Not modified"])
        else:
            request.emit_http_headers(["Status: 304 Not modified"])
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.emit_http_headers(["Status: 304 Not modified"])
        else:
            request.emit_http_headers(["Status: 304 Not modified"])
    else:
        # generate an Expires header, using 1d cache lifetime of sisterpages list
        expires = timefuncs.formathttpdate(time.time() + 24 * 3600)

        httpheaders = [
            "Content-Type: text/plain; charset=UTF-8",
            "Expires: %s" % expires,
            "Last-Modified: %s" % timestamp,
            "Etag: %s" % etag,
        ]

        # send the generated XML document
        request.emit_http_headers(httpheaders)

        baseurl = request.getBaseURL()
        if not baseurl.endswith("/"):
            baseurl += "/"

        # Get list of user readable pages
        pages = request.rootpage.getPageList()
        pages.sort()
        for pn in pages:
            p = Page(request, pn)
            entry = u"%s %s\r\n" % (request.getQualifiedURL(p.url(request)), p.page_name)
            request.write(entry.encode("utf-8"))
Example #2
0
 def end_headers(self):
     """overload the default end_headers, inserting expires header"""
     if self.expires:
         now = time.time()
         expires = now + self.expires
         self.send_header('Expires', timefuncs.formathttpdate(expires))
     SimpleHTTPServer.SimpleHTTPRequestHandler.end_headers(self)
Example #3
0
def execute(pagename, request):
    status = []
    for sistername, sisterurl in request.cfg.sistersites:
        arena = 'sisters'
        key = sistername
        cache = caching.CacheEntry(request,
                                   arena,
                                   key,
                                   scope='farm',
                                   use_pickle=True)
        if cache.exists():
            data = cache.content()
        else:
            data = {'lastmod': ''}
        uo = urllib.URLopener()
        uo.version = 'MoinMoin SisterPage list fetcher 1.0'
        lastmod = data['lastmod']
        if lastmod:
            uo.addheader('If-Modified-Since', lastmod)
        try:
            sisterpages = {}
            f = uo.open(sisterurl)
            for line in f:
                line = line.strip()
                try:
                    page_url, page_name = line.split(' ', 1)
                    sisterpages[page_name.decode('utf-8')] = page_url
                except:
                    pass  # ignore invalid lines
            try:
                lastmod = f.info()["Last-Modified"]
            except:
                lastmod = timefuncs.formathttpdate(time.time())
            f.close()
            data['lastmod'] = lastmod
            data['sisterpages'] = sisterpages
            cache.update(data)
            status.append(u"Site: %s Status: Updated. Pages: %d" %
                          (sistername, len(sisterpages)))
        except IOError, (title, code, msg, headers):  # code e.g. 304
            status.append(u"Site: %s Status: Not updated." % sistername)
        except TypeError:  # catch bug in python 2.5: "EnvironmentError expected at most 3 arguments, got 4"
            status.append(u"Site: %s Status: Not updated." % sistername)
Example #4
0
def execute(pagename, request):
    log = editlog.EditLog(request)
    try:
        lastmod = wikiutil.version2timestamp(log.date())
    except:
        lastmod = 0

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d" % lastmod

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using 1d cache lifetime of sisterpages list
        expires = time.time() + 24 * 3600

        request.mimetype = 'text/plain'
        request.expires = expires
        request.last_modified = timestamp
        request.headers['Etag'] = etag

        # send the generated XML document
        # Get list of user readable pages
        pages = request.rootpage.getPageList()
        pages.sort()
        for pn in pages:
            p = Page(request, pn)
            entry = u"%s %s\r\n" % (request.getQualifiedURL(
                p.url(request)), p.page_name)
            request.write(entry.encode('utf-8'))
Example #5
0
def execute(pagename, request):
    log = editlog.EditLog(request)
    try:
        lastmod = wikiutil.version2timestamp(log.date())
    except:
        lastmod = 0

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d" % lastmod

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using 1d cache lifetime of sisterpages list
        expires = time.time() + 24*3600

        request.mimetype = 'text/plain'
        request.expires = expires
        request.last_modified = timestamp
        request.headers['Etag'] = etag

        # send the generated XML document
        # Get list of user readable pages
        pages = request.rootpage.getPageList()
        pages.sort()
        for pn in pages:
            p = Page(request, pn)
            entry = u"%s %s\r\n" % (request.getQualifiedURL(p.url(request)), p.page_name)
            request.write(entry.encode('utf-8'))
Example #6
0
def execute(pagename, request):
    status = []
    for sistername, sisterurl in request.cfg.sistersites:
        arena = 'sisters'
        key = sistername
        cache = caching.CacheEntry(request, arena, key, scope='farm', use_pickle=True)
        if cache.exists():
            data = cache.content()
        else:
            data = {'lastmod': ''}
        uo = urllib.URLopener()
        uo.version = 'MoinMoin SisterPage list fetcher 1.0'
        lastmod = data['lastmod']
        if lastmod:
            uo.addheader('If-Modified-Since', lastmod)
        try:
            sisterpages = {}
            f = uo.open(sisterurl)
            for line in f:
                line = line.strip()
                try:
                    page_url, page_name = line.split(' ', 1)
                    sisterpages[page_name.decode('utf-8')] = page_url
                except:
                    pass # ignore invalid lines
            try:
                lastmod = f.info()["Last-Modified"]
            except:
                lastmod = timefuncs.formathttpdate(time.time())
            f.close()
            data['lastmod'] = lastmod
            data['sisterpages'] = sisterpages
            cache.update(data)
            status.append(u"Site: %s Status: Updated. Pages: %d" % (sistername, len(sisterpages)))
        except IOError, (title, code, msg, headers): # code e.g. 304
            status.append(u"Site: %s Status: Not updated." % sistername)
        except TypeError: # catch bug in python 2.5: "EnvironmentError expected at most 3 arguments, got 4"
            status.append(u"Site: %s Status: Not updated." % sistername)
Example #7
0
def get_file(pagename, request):
    import shutil

    filename, fpath = _access_file(pagename, request)
    if not filename: return # error msg already sent in _access_file

    timestamp = timefuncs.formathttpdate(int(os.path.getmtime(fpath)))
    if request.if_modified_since == timestamp:
        request.http_headers(["Status: 304 Not modified"])
        request.setResponseCode(304)
    else:
        # get mimetype
        mt, enc = mimetypes.guess_type(filename)
        if not mt:
            mt = "application/octet-stream"

        # TODO: fix the encoding here, plain 8 bit is not allowed according to the RFCs
        # There is no solution that is compatible to IE except stripping non-ascii chars
        filename_enc = filename.encode(config.charset)

        # for dangerous files (like .html), when we are in danger of cross-site-scripting attacks,
        # we just let the user store them to disk ('attachment').
        # For safe files, we directly show them inline (this also works better for IE).
        dangerous = mt in request.cfg.mimetypes_xss_protect
        content_dispo = dangerous and 'attachment' or 'inline'

        request.http_headers([
            'Content-Type: %s' % mt,
            'Last-Modified: %s' % timestamp, # TODO maybe add a short Expires: header here?
            'Content-Length: %d' % os.path.getsize(fpath),
            'Content-Disposition: %s; filename="%s"' % (content_dispo, filename_enc),
        ])

        # send data
        shutil.copyfileobj(open(fpath, 'rb'), request, 8192)

    raise MoinMoinNoFooter
Example #8
0
def _do_get(pagename, request):
    _ = request.getText

    pagename, filename, fpath = _access_file(pagename, request)
    if not request.user.may.read(pagename):
        return _('You are not allowed to get attachments from this page.')
    if not filename:
        return # error msg already sent in _access_file

    timestamp = timefuncs.formathttpdate(int(os.path.getmtime(fpath)))
    if request.if_modified_since == timestamp:
        request.emit_http_headers(["Status: 304 Not modified"])
    else:
        mt = wikiutil.MimeType(filename=filename)
        content_type = mt.content_type()
        mime_type = mt.mime_type()

        # TODO: fix the encoding here, plain 8 bit is not allowed according to the RFCs
        # There is no solution that is compatible to IE except stripping non-ascii chars
        filename_enc = filename.encode(config.charset)

        # for dangerous files (like .html), when we are in danger of cross-site-scripting attacks,
        # we just let the user store them to disk ('attachment').
        # For safe files, we directly show them inline (this also works better for IE).
        dangerous = mime_type in request.cfg.mimetypes_xss_protect
        content_dispo = dangerous and 'attachment' or 'inline'

        request.emit_http_headers([
            'Content-Type: %s' % content_type,
            'Last-Modified: %s' % timestamp,
            'Content-Length: %d' % os.path.getsize(fpath),
            'Content-Disposition: %s; filename="%s"' % (content_dispo, filename_enc),
        ])

        # send data
        request.send_file(open(fpath, 'rb'))
def execute(pagename, request):
    """ Send recent changes as an RSS document
    """
    if not wikixml.ok:
        request.mimetype = 'text/plain'
        request.write("rss_rc action is not supported because of missing pyxml module.")
        return
    if request.isSpiderAgent: # reduce bot cpu usage
        return ''

    cfg = request.cfg
    _ = request.getText

    # get params
    def_max_items = max_items = cfg.rss_items_default
    items_limit = cfg.rss_items_limit
    unique = cfg.rss_unique
    diffs = cfg.rss_diffs
    ddiffs = cfg.rss_ddiffs
    max_lines = cfg.rss_lines_default
    lines_limit = cfg.rss_lines_limit
    show_att = cfg.rss_show_attachment_entries
    page_pattern = cfg.rss_page_filter_pattern

    try:
        max_items = min(int(request.values.get('items', max_items)),
                        items_limit)
    except ValueError:
        pass
    try:
        unique = int(request.values.get('unique', unique))
    except ValueError:
        pass
    try:
        diffs = int(request.values.get('diffs', diffs))
    except ValueError:
        pass
    ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003
    try:
        ddiffs = int(request.values.get('ddiffs', ddiffs))
    except ValueError:
        pass
    try:
        max_lines = min(int(request.values.get('lines', max_lines)),
                        lines_limit)
    except ValueError:
        pass
    try:
        show_att = int(request.values.get('show_att', show_att))
    except ValueError:
        pass
    try:
        page_pattern = request.values.get('page', page_pattern)
    except ValueError:
        pass

    # if we are just interested in a specific page, using the local edit-log
    # of that page is much faster than the global one - esp. if the page was
    # NOT recently changed and the global edit-log is rather big.
    kw = dict(rootpagename=page_pattern) if is_single_page_match(page_pattern) else {}
    log = editlog.EditLog(request, **kw)
    logdata = []
    counter = 0
    pages = {}
    lastmod = 0
    for line in log.reverse():
        if not request.user.may.read(line.pagename):
            continue
        if ((not show_att and not line.action.startswith('SAVE')) or
            ((line.pagename in pages) and unique) or
            not match_page(line.pagename, page_pattern)):
            continue
        line.editor = line.getInterwikiEditorData(request)
        line.time = timefuncs.tmtuple(wikiutil.version2timestamp(line.ed_time_usecs)) # UTC
        logdata.append(line)
        pages[line.pagename] = None

        if not lastmod:
            lastmod = wikiutil.version2timestamp(line.ed_time_usecs)

        counter += 1
        if counter >= max_items:
            break
    del log

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d-%d-%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique,
        max_lines, show_att)

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using whatever setting the admin
        # defined for suggested cache lifetime of the RecentChanges RSS doc
        expires = time.time() + cfg.rss_cache

        request.mimetype = 'application/rss+xml'
        request.expires = expires
        request.last_modified = lastmod
        request.headers['Etag'] = etag

        # send the generated XML document
        baseurl = request.url_root

        logo = re.search(r'src="([^"]*)"', cfg.logo_string)
        if logo:
            logo = request.getQualifiedURL(logo.group(1))

        # prepare output
        out = StringIO.StringIO()
        handler = RssGenerator(out)

        # start SAX stream
        handler.startDocument()
        handler._write(
            u'<!--\n'
            u'    Add an "items=nnn" URL parameter to get more than the \n'
            u'    default %(def_max_items)d items. You cannot get more than \n'
            u'    %(items_limit)d items though.\n'
            u'    \n'
            u'    Add "unique=1" to get a list of changes where page names are unique,\n'
            u'    i.e. where only the latest change of each page is reflected.\n'
            u'    \n'
            u'    Add "diffs=1" to add change diffs to the description of each items.\n'
            u'    \n'
            u'    Add "ddiffs=1" to link directly to the diff (good for FeedReader).\n'
            u'    \n'
            u'    Add "lines=nnn" to change maximum number of diff/body lines \n'
            u'    to show. Cannot be more than %(lines_limit)d.\n'
            u'    \n'
            u'    Add "show_att=1" to show items related to attachments.\n'
            u'    \n'
            u'    Add "page=pattern" to show feed only for specific pages.\n'
            u'    Pattern can be empty (it would match to all pages), \n'
            u'    can start with circumflex (it would be interpreted as \n'
            u'    regular expression in this case), end with slash (for \n'
            u'    getting feed for page tree) or point to specific page (if \n'
            u'    none of the above can be applied).\n'
            u'    \n'
            u'    Current settings: items=%(max_items)i, unique=%(unique)i, \n'
            u'    diffs=%(diffs)i, ddiffs=%(ddiffs)i, lines=%(max_lines)i, \n'
            u'    show_att=%(show_att)i\n'
            u'-->\n' % locals()
            )

        # emit channel description
        handler.startNode('channel', {
            (handler.xmlns['rdf'], 'about'): request.url_root,
            })
        handler.simpleNode('title', cfg.sitename)
        page = Page(request, pagename)
        handler.simpleNode('link', full_url(request, page))
        handler.simpleNode('description', u'RecentChanges at %s' % cfg.sitename)
        if logo:
            handler.simpleNode('image', None, {
                (handler.xmlns['rdf'], 'resource'): logo,
                })
        if cfg.interwikiname:
            handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)

        handler.startNode('items')
        handler.startNode(('rdf', 'Seq'))
        for item in logdata:
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            page = Page(request, item.pagename)
            link = full_url(request, page, anchor=anchor)
            handler.simpleNode(('rdf', 'li'), None, attr={(handler.xmlns['rdf'], 'resource'): link, })
        handler.endNode(('rdf', 'Seq'))
        handler.endNode('items')
        handler.endNode('channel')

        # emit logo data
        if logo:
            handler.startNode('image', attr={
                (handler.xmlns['rdf'], 'about'): logo,
                })
            handler.simpleNode('title', cfg.sitename)
            handler.simpleNode('link', baseurl)
            handler.simpleNode('url', logo)
            handler.endNode('image')

        # Mapping { oldname: curname } for maintaining page renames
        pagename_map = {}

        # emit items
        for item in logdata:
            if item.pagename in pagename_map:
                cur_pagename = pagename_map[item.pagename]
            else:
                cur_pagename = item.pagename
            page = Page(request, cur_pagename)
            action = item.action
            comment = item.comment
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            rdflink = full_url(request, page, anchor=anchor)
            handler.startNode('item', attr={(handler.xmlns['rdf'], 'about'): rdflink, })

            # general attributes
            handler.simpleNode('title', item.pagename)
            handler.simpleNode(('dc', 'date'), timefuncs.W3CDate(item.time))

            show_diff = diffs

            if action.startswith('ATT'): # Attachment
                show_diff = 0
                filename = wikiutil.url_unquote(item.extra)
                att_exists = AttachFile.exists(request, cur_pagename, filename)

                if action == 'ATTNEW':
                    # Once attachment deleted this link becomes invalid but we
                    # preserve it to prevent appearance of new RSS entries in
                    # RSS readers.
                    if ddiffs:
                        handler.simpleNode('link', attach_url(request,
                            cur_pagename, filename, do='view'))

                    comment = _(u"Upload of attachment '%(filename)s'.") % {
                        'filename': filename}

                elif action == 'ATTDEL':
                    if ddiffs:
                        handler.simpleNode('link', full_url(request, page,
                            querystr={'action': 'AttachFile'}))

                    comment = _(u"Attachment '%(filename)s' deleted.") % {
                        'filename': filename}

                elif action == 'ATTDRW':
                    if ddiffs:
                        handler.simpleNode('link', attach_url(request,
                            cur_pagename, filename, do='view'))

                    comment = _(u"Drawing '%(filename)s' saved.") % {
                        'filename': filename}

            elif action.startswith('SAVE'):
                if action == 'SAVE/REVERT':
                    to_rev = int(item.extra)
                    comment = (_(u"Revert to revision %(rev)d.") % {
                        'rev': to_rev}) + "<br />" \
                        + _("Comment:") + " " + comment

                elif action == 'SAVE/RENAME':
                    show_diff = 0
                    comment = (_(u"Renamed from '%(oldpagename)s'.") % {
                        'oldpagename': item.extra}) + "<br />" \
                        + _("Comment:") + " " + comment
                    if item.pagename in pagename_map:
                        newpage = pagename_map[item.pagename]
                        del pagename_map[item.pagename]
                        pagename_map[item.extra] = newpage
                    else:
                        pagename_map[item.extra] = item.pagename

                elif action == 'SAVENEW':
                    comment = _(u"New page:\n") + comment

                item_rev = int(item.rev)

                # If we use diffs/ddiffs, we should calculate proper links and
                # content
                if ddiffs:
                    # first revision can't have older revisions to diff with
                    if item_rev == 1:
                        handler.simpleNode('link', full_url(request, page,
                            querystr={'action': 'recall',
                                      'rev': str(item_rev)}))
                    else:
                        handler.simpleNode('link', full_url(request, page,
                            querystr={'action': 'diff',
                                      'rev1': str(item_rev),
                                      'rev2': str(item_rev - 1)}))

                if show_diff:
                    if item_rev == 1:
                        lines = Page(request, cur_pagename,
                            rev=item_rev).getlines()
                    else:
                        lines = wikiutil.pagediff(request, cur_pagename,
                            item_rev - 1, cur_pagename, item_rev, ignorews=1)

                    if len(lines) > max_lines:
                        lines = lines[:max_lines] + ['...\n']

                    lines = '\n'.join(lines)
                    lines = wikiutil.escape(lines)

                    comment = u'%s\n<pre>\n%s\n</pre>\n' % (comment, lines)

                if not ddiffs:
                    handler.simpleNode('link', full_url(request, page))

            if comment:
                handler.simpleNode('description', comment)

            # contributor
            if cfg.show_names:
                edattr = {}
                if cfg.show_hosts:
                    edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
                if item.editor[0] == 'interwiki':
                    edname = "%s:%s" % item.editor[1]
                    ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
                else: # 'ip'
                    edname = item.editor[1]
                    ##edattr[(None, 'link')] = link + "?action=info"

                # this edattr stuff, esp. None as first tuple element breaks things (tracebacks)
                # if you know how to do this right, please send us a patch

                handler.startNode(('dc', 'contributor'))
                handler.startNode(('rdf', 'Description'), attr=edattr)
                handler.simpleNode(('rdf', 'value'), edname)
                handler.endNode(('rdf', 'Description'))
                handler.endNode(('dc', 'contributor'))

            # wiki extensions
            handler.simpleNode(('wiki', 'version'), "%i" % (item.ed_time_usecs))
            handler.simpleNode(('wiki', 'status'), ('deleted', 'updated')[page.exists()])
            handler.simpleNode(('wiki', 'diff'), full_url(request, page, querystr={'action': 'diff'}))
            handler.simpleNode(('wiki', 'history'), full_url(request, page, querystr={'action': 'info'}))
            # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
            # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )

            handler.endNode('item')

        # end SAX stream
        handler.endDocument()

        request.write(out.getvalue())
Example #10
0
def execute(pagename, request):
    """ Send recent changes as an RSS document
    """
    cfg = request.cfg

    # get params
    items_limit = 100
    try:
        max_items = int(request.values["items"])
        max_items = min(max_items, items_limit)  # not more than `items_limit`
    except (KeyError, ValueError):
        # not more than 15 items in a RSS file by default
        max_items = 15
    try:
        unique = int(request.values.get("unique", 0))
    except ValueError:
        unique = 0
    try:
        diffs = int(request.values.get("diffs", 0))
    except ValueError:
        diffs = 0
    ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003
    try:
        ddiffs = int(request.values.get("ddiffs", 0))
    except ValueError:
        ddiffs = 0

    urlfilter = request.values.get("filter")
    if urlfilter:
        urlfilter = re.compile(urlfilter)
    else:
        urlfilter = None

    # get data
    log = editlog.EditLog(request)
    logdata = []
    counter = 0
    pages = {}
    lastmod = 0
    for line in log.reverse():
        if urlfilter and not (urlfilter.match(line.pagename)):
            continue
        if not request.user.may.read(line.pagename):
            continue
        if not line.action.startswith("SAVE") or ((line.pagename in pages) and unique):
            continue
        # if log.dayChanged() and log.daycount > _MAX_DAYS: break
        line.editor = line.getInterwikiEditorData(request)
        line.time = timefuncs.tmtuple(wikiutil.version2timestamp(line.ed_time_usecs))  # UTC
        logdata.append(line)
        pages[line.pagename] = None

        if not lastmod:
            lastmod = wikiutil.version2timestamp(line.ed_time_usecs)

        counter += 1
        if counter >= max_items:
            break
    del log

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique)

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using whatever setting the admin
        # defined for suggested cache lifetime of the RecentChanges RSS doc
        expires = time.time() + cfg.rss_cache

        request.mimetype = "application/rss+xml"
        request.expires = expires
        request.last_modified = lastmod
        request.headers["Etag"] = etag

        # send the generated XML document
        baseurl = request.url_root

        logo = re.search(r'src="([^"]*)"', cfg.logo_string)
        if logo:
            logo = request.getQualifiedURL(logo.group(1))

        # prepare output
        output = structencoder(indent=u"yes")

        FEED_HEADER_COMMENT = """
<!--
    Add an "items=nnn" URL parameter to get more than the default 15 items.
    You cannot get more than %d items though.
    
    Add "unique=1" to get a list of changes where page names are unique,
    i.e. where only the latest change of each page is reflected.
    Add "diffs=1" to add change diffs to the description of each items.
    
    Add "ddiffs=1" to link directly to the diff (good for FeedReader).
    Current settings: items=%i, unique=%i, diffs=%i, ddiffs=%i
-->
        """ % (
            items_limit,
            max_items,
            unique,
            diffs,
            ddiffs,
        )

        # Feed envelope
        page = Page(request, pagename)
        f = output.cofeed(
            ROOT(NS(u"", ATOM_NAMESPACE), NS(u"wiki", RSSWIKI_NAMESPACE), E_CURSOR((ATOM_NAMESPACE, u"feed")))
        )
        f.send(E((ATOM_NAMESPACE, u"id"), full_url(request, page).encode(config.charset))),
        f.send(E((ATOM_NAMESPACE, u"title"), cfg.sitename.encode(config.charset))),
        f.send(E((ATOM_NAMESPACE, u"link"), {u"href": request.url_root.encode(config.charset)})),
        f.send(E((ATOM_NAMESPACE, u"summary"), ("RecentChanges at %s" % cfg.sitename).encode(config.charset))),
        # Icon
        # E((ATOM_NAMESPACE, u'link'), {u'href': logo.encode(config.charset)}),

        # if cfg.interwikiname:
        #    handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)

        for item in logdata:
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            page = Page(request, item.pagename)
            # link = full_url(request, page, anchor=anchor)
            if ddiffs:
                link = full_url(request, page, querystr={"action": "diff"})
            else:
                link = full_url(request, page)

            # description
            desc_text = item.comment
            if diffs:
                # TODO: rewrite / extend wikiutil.pagediff
                # searching for the matching pages doesn't really belong here
                revisions = page.getRevList()

                rl = len(revisions)
                for idx in range(rl):
                    rev = revisions[idx]
                    if rev <= item.rev:
                        if idx + 1 < rl:
                            lines = wikiutil.pagediff(
                                request, item.pagename, revisions[idx + 1], item.pagename, 0, ignorews=1
                            )
                            if len(lines) > 20:
                                lines = lines[:20] + ["...\n"]
                            lines = "\n".join(lines)
                            lines = wikiutil.escape(lines)
                            desc_text = "%s\n<pre>\n%s\n</pre>\n" % (desc_text, lines)
                        break
            # if desc_text:
            #    handler.simpleNode('description', desc_text)

            # contributor
            edattr = {}
            # if cfg.show_hosts:
            #    edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
            if item.editor[0] == "interwiki":
                edname = "%s:%s" % item.editor[1]
                ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
            else:  # 'ip'
                edname = item.editor[1]
                ##edattr[(None, 'link')] = link + "?action=info"

            history_link = full_url(request, page, querystr={"action": "info"})

            f.send(
                E(
                    (ATOM_NAMESPACE, u"entry"),
                    E((ATOM_NAMESPACE, u"id"), link.encode(config.charset)),
                    E((ATOM_NAMESPACE, u"title"), item.pagename.encode(config.charset)),
                    E((ATOM_NAMESPACE, u"updated"), timefuncs.W3CDate(item.time).encode(config.charset)),
                    E((ATOM_NAMESPACE, u"link"), {u"href": link.encode(config.charset)}),
                    E((ATOM_NAMESPACE, u"summary"), desc_text.encode(config.charset)),
                    E((ATOM_NAMESPACE, u"author"), E((ATOM_NAMESPACE, u"name"), edname.encode(config.charset))),
                    # E((ATOM_NAMESPACE, u'title'), item.pagename.encode(config.charset)),
                    # wiki extensions
                    E((RSSWIKI_NAMESPACE, u"wiki:version"), ("%i" % (item.ed_time_usecs)).encode(config.charset)),
                    E((RSSWIKI_NAMESPACE, u"wiki:status"), (u"deleted", u"updated")[page.exists()]),
                    E((RSSWIKI_NAMESPACE, u"wiki:diff"), link.encode(config.charset)),
                    E((RSSWIKI_NAMESPACE, u"wiki:history"), history_link.encode(config.charset)),
                    # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
                    # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )
                )
            )

        # emit logo data
        # if logo:
        #    handler.startNode('image', attr={
        #        (handler.xmlns['rdf'], 'about'): logo,
        #        })
        #    handler.simpleNode('title', cfg.sitename)
        #    handler.simpleNode('link', baseurl)
        #    handler.simpleNode('url', logo)
        #    handler.endNode('image')

        f.close()
        request.write(output.read())
Example #11
0
def put(request, key, data,
        filename=None,
        content_type=None,
        content_disposition=None,
        content_length=None,
        last_modified=None,
        original=None):
    """
    Put an object into the cache to send it with cache action later.

    @param request: the request object
    @param key: non-guessable key into cache (str)
    @param data: content data (str or open file-like obj)
    @param filename: filename for content-disposition header and for autodetecting
                     content_type (unicode, default: None)
    @param content_type: content-type header value (str, default: autodetect from filename)
    @param content_disposition: type for content-disposition header (str, default: None)
    @param content_length: data length for content-length header (int, default: autodetect)
    @param last_modified: last modified timestamp (int, default: autodetect)
    @param original: location of original object (default: None) - this is just written to
                     the metadata cache "as is" and could be used for cache cleanup,
                     use (wikiname, itemname, attachname or None))
    """
    import os.path
    from MoinMoin.util import timefuncs

    if filename:
        # make sure we just have a simple filename (without path)
        filename = os.path.basename(filename)

        if content_type is None:
            # try autodetect
            mt, enc = mimetypes.guess_type(filename)
            if mt:
                content_type = mt

    if content_type is None:
        content_type = 'application/octet-stream'

    data_cache = caching.CacheEntry(request, cache_arena, key+'.data', cache_scope, do_locking=do_locking)
    data_cache.update(data)
    content_length = content_length or data_cache.size()
    last_modified = last_modified or data_cache.mtime()

    httpdate_last_modified = timefuncs.formathttpdate(int(last_modified))
    headers = [('Content-Type', content_type),
               ('Last-Modified', httpdate_last_modified),
               ('Content-Length', content_length),
              ]
    if content_disposition and filename:
        # TODO: fix the encoding here, plain 8 bit is not allowed according to the RFCs
        # There is no solution that is compatible to IE except stripping non-ascii chars
        filename = filename.encode(config.charset)
        headers.append(('Content-Disposition', '%s; filename="%s"' % (content_disposition, filename)))

    meta_cache = caching.CacheEntry(request, cache_arena, key+'.meta', cache_scope, do_locking=do_locking, use_pickle=True)
    meta_cache.update({
        'httpdate_last_modified': httpdate_last_modified,
        'last_modified': last_modified,
        'headers': headers,
        'original': original,
    })
Example #12
0
def execute(pagename, request):
    """ Send recent changes as an RSS document
    """
    if not wikixml.ok:
        request.mimetype = 'text/plain'
        request.write(
            "rss_rc action is not supported because of missing pyxml module.")
        return
    if request.isSpiderAgent:  # reduce bot cpu usage
        return ''

    cfg = request.cfg
    _ = request.getText

    # get params
    def_max_items = max_items = cfg.rss_items_default
    items_limit = cfg.rss_items_limit
    unique = cfg.rss_unique
    diffs = cfg.rss_diffs
    ddiffs = cfg.rss_ddiffs
    max_lines = cfg.rss_lines_default
    lines_limit = cfg.rss_lines_limit
    show_att = cfg.rss_show_attachment_entries
    page_pattern = cfg.rss_page_filter_pattern

    try:
        max_items = min(int(request.values.get('items', max_items)),
                        items_limit)
    except ValueError:
        pass
    try:
        unique = int(request.values.get('unique', unique))
    except ValueError:
        pass
    try:
        diffs = int(request.values.get('diffs', diffs))
    except ValueError:
        pass
    ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003
    try:
        ddiffs = int(request.values.get('ddiffs', ddiffs))
    except ValueError:
        pass
    try:
        max_lines = min(int(request.values.get('lines', max_lines)),
                        lines_limit)
    except ValueError:
        pass
    try:
        show_att = int(request.values.get('show_att', show_att))
    except ValueError:
        pass
    try:
        page_pattern = request.values.get('page', page_pattern)
    except ValueError:
        pass

    # if we are just interested in a specific page, using the local edit-log
    # of that page is much faster than the global one - esp. if the page was
    # NOT recently changed and the global edit-log is rather big.
    kw = dict(rootpagename=page_pattern) if is_single_page_match(
        page_pattern) else {}
    log = editlog.EditLog(request, **kw)
    logdata = []
    counter = 0
    pages = {}
    lastmod = 0
    for line in log.reverse():
        if not request.user.may.read(line.pagename):
            continue
        if ((not show_att and not line.action.startswith('SAVE'))
                or ((line.pagename in pages) and unique)
                or not match_page(line.pagename, page_pattern)):
            continue
        line.editor = line.getInterwikiEditorData(request)
        line.time = timefuncs.tmtuple(
            wikiutil.version2timestamp(line.ed_time_usecs))  # UTC
        logdata.append(line)
        pages[line.pagename] = None

        if not lastmod:
            lastmod = wikiutil.version2timestamp(line.ed_time_usecs)

        counter += 1
        if counter >= max_items:
            break
    del log

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d-%d-%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique,
                                     max_lines, show_att)

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using whatever setting the admin
        # defined for suggested cache lifetime of the RecentChanges RSS doc
        expires = time.time() + cfg.rss_cache

        request.mimetype = 'application/rss+xml'
        request.expires = expires
        request.last_modified = lastmod
        request.headers['Etag'] = etag

        # send the generated XML document
        baseurl = request.url_root

        logo = re.search(r'src="([^"]*)"', cfg.logo_string)
        if logo:
            logo = request.getQualifiedURL(logo.group(1))

        # prepare output
        out = StringIO.StringIO()
        handler = RssGenerator(out)

        # start SAX stream
        handler.startDocument()
        handler.write(
            u'<!--\n'
            u'    Add an "items=nnn" URL parameter to get more than the \n'
            u'    default %(def_max_items)d items. You cannot get more than \n'
            u'    %(items_limit)d items though.\n'
            u'    \n'
            u'    Add "unique=1" to get a list of changes where page names are unique,\n'
            u'    i.e. where only the latest change of each page is reflected.\n'
            u'    \n'
            u'    Add "diffs=1" to add change diffs to the description of each items.\n'
            u'    \n'
            u'    Add "ddiffs=1" to link directly to the diff (good for FeedReader).\n'
            u'    \n'
            u'    Add "lines=nnn" to change maximum number of diff/body lines \n'
            u'    to show. Cannot be more than %(lines_limit)d.\n'
            u'    \n'
            u'    Add "show_att=1" to show items related to attachments.\n'
            u'    \n'
            u'    Add "page=pattern" to show feed only for specific pages.\n'
            u'    Pattern can be empty (it would match to all pages), \n'
            u'    can start with circumflex (it would be interpreted as \n'
            u'    regular expression in this case), end with slash (for \n'
            u'    getting feed for page tree) or point to specific page (if \n'
            u'    none of the above can be applied).\n'
            u'    \n'
            u'    Current settings: items=%(max_items)i, unique=%(unique)i, \n'
            u'    diffs=%(diffs)i, ddiffs=%(ddiffs)i, lines=%(max_lines)i, \n'
            u'    show_att=%(show_att)i\n'
            u'-->\n' % locals())

        # emit channel description
        handler.startNode('channel', {
            (handler.xmlns['rdf'], 'about'): request.url_root,
        })
        handler.simpleNode('title', cfg.sitename)
        page = Page(request, pagename)
        handler.simpleNode('link', full_url(request, page))
        handler.simpleNode('description',
                           u'RecentChanges at %s' % cfg.sitename)
        if logo:
            handler.simpleNode('image', None, {
                (handler.xmlns['rdf'], 'resource'): logo,
            })
        if cfg.interwikiname:
            handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)

        handler.startNode('items')
        handler.startNode(('rdf', 'Seq'))
        for item in logdata:
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            page = Page(request, item.pagename)
            link = full_url(request, page, anchor=anchor)
            handler.simpleNode(('rdf', 'li'),
                               None,
                               attr={
                                   (handler.xmlns['rdf'], 'resource'): link,
                               })
        handler.endNode(('rdf', 'Seq'))
        handler.endNode('items')
        handler.endNode('channel')

        # emit logo data
        if logo:
            handler.startNode('image',
                              attr={
                                  (handler.xmlns['rdf'], 'about'): logo,
                              })
            handler.simpleNode('title', cfg.sitename)
            handler.simpleNode('link', baseurl)
            handler.simpleNode('url', logo)
            handler.endNode('image')

        # Mapping { oldname: curname } for maintaining page renames
        pagename_map = {}

        # emit items
        for item in logdata:
            if item.pagename in pagename_map:
                cur_pagename = pagename_map[item.pagename]
            else:
                cur_pagename = item.pagename
            page = Page(request, cur_pagename)
            action = item.action
            comment = item.comment
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            rdflink = full_url(request, page, anchor=anchor)
            handler.startNode('item',
                              attr={
                                  (handler.xmlns['rdf'], 'about'): rdflink,
                              })

            # general attributes
            handler.simpleNode('title', item.pagename)
            handler.simpleNode(('dc', 'date'), timefuncs.W3CDate(item.time))

            show_diff = diffs

            if action.startswith('ATT'):  # Attachment
                show_diff = 0
                filename = wikiutil.url_unquote(item.extra)
                att_exists = AttachFile.exists(request, cur_pagename, filename)

                if action == 'ATTNEW':
                    # Once attachment deleted this link becomes invalid but we
                    # preserve it to prevent appearance of new RSS entries in
                    # RSS readers.
                    if ddiffs:
                        handler.simpleNode(
                            'link',
                            attach_url(request,
                                       cur_pagename,
                                       filename,
                                       do='view'))

                    comment = _(u"Upload of attachment '%(filename)s'.") % {
                        'filename': filename
                    }

                elif action == 'ATTDEL':
                    if ddiffs:
                        handler.simpleNode(
                            'link',
                            full_url(request,
                                     page,
                                     querystr={'action': 'AttachFile'}))

                    comment = _(u"Attachment '%(filename)s' deleted.") % {
                        'filename': filename
                    }

                elif action == 'ATTDRW':
                    if ddiffs:
                        handler.simpleNode(
                            'link',
                            attach_url(request,
                                       cur_pagename,
                                       filename,
                                       do='view'))

                    comment = _(u"Drawing '%(filename)s' saved.") % {
                        'filename': filename
                    }

            elif action.startswith('SAVE'):
                if action == 'SAVE/REVERT':
                    to_rev = int(item.extra)
                    comment = (_(u"Revert to revision %(rev)d.") % {
                        'rev': to_rev}) + "<br />" \
                        + _("Comment:") + " " + comment

                elif action == 'SAVE/RENAME':
                    show_diff = 0
                    comment = (_(u"Renamed from '%(oldpagename)s'.") % {
                        'oldpagename': item.extra}) + "<br />" \
                        + _("Comment:") + " " + comment
                    if item.pagename in pagename_map:
                        newpage = pagename_map[item.pagename]
                        del pagename_map[item.pagename]
                        pagename_map[item.extra] = newpage
                    else:
                        pagename_map[item.extra] = item.pagename

                elif action == 'SAVENEW':
                    comment = _(u"New page:\n") + comment

                item_rev = int(item.rev)

                # If we use diffs/ddiffs, we should calculate proper links and
                # content
                if ddiffs:
                    # first revision can't have older revisions to diff with
                    if item_rev == 1:
                        handler.simpleNode(
                            'link',
                            full_url(request,
                                     page,
                                     querystr={
                                         'action': 'recall',
                                         'rev': str(item_rev)
                                     }))
                    else:
                        handler.simpleNode(
                            'link',
                            full_url(request,
                                     page,
                                     querystr={
                                         'action': 'diff',
                                         'rev1': str(item_rev),
                                         'rev2': str(item_rev - 1)
                                     }))

                if show_diff:
                    if item_rev == 1:
                        lines = Page(request, cur_pagename,
                                     rev=item_rev).getlines()
                    else:
                        lines = wikiutil.pagediff(request,
                                                  cur_pagename,
                                                  item_rev - 1,
                                                  cur_pagename,
                                                  item_rev,
                                                  ignorews=1)

                    if len(lines) > max_lines:
                        lines = lines[:max_lines] + ['...\n']

                    lines = '\n'.join(lines)
                    lines = wikiutil.escape(lines)

                    comment = u'%s\n<pre>\n%s\n</pre>\n' % (comment, lines)

                if not ddiffs:
                    handler.simpleNode('link', full_url(request, page))

            if comment:
                handler.simpleNode('description', comment)

            # contributor
            if cfg.show_names:
                edattr = {}
                if cfg.show_hosts:
                    edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
                if item.editor[0] == 'interwiki':
                    edname = "%s:%s" % item.editor[1]
                    ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
                else:  # 'ip'
                    edname = item.editor[1]
                    ##edattr[(None, 'link')] = link + "?action=info"

                # this edattr stuff, esp. None as first tuple element breaks things (tracebacks)
                # if you know how to do this right, please send us a patch

                handler.startNode(('dc', 'contributor'))
                handler.startNode(('rdf', 'Description'), attr=edattr)
                handler.simpleNode(('rdf', 'value'), edname)
                handler.endNode(('rdf', 'Description'))
                handler.endNode(('dc', 'contributor'))

            # wiki extensions
            handler.simpleNode(('wiki', 'version'),
                               "%i" % (item.ed_time_usecs))
            handler.simpleNode(('wiki', 'status'),
                               ('deleted', 'updated')[page.exists()])
            handler.simpleNode(('wiki', 'diff'),
                               full_url(request,
                                        page,
                                        querystr={'action': 'diff'}))
            handler.simpleNode(('wiki', 'history'),
                               full_url(request,
                                        page,
                                        querystr={'action': 'info'}))
            # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
            # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )

            handler.endNode('item')

        # end SAX stream
        handler.endDocument()

        request.write(out.getvalue())
Example #13
0
def execute(pagename, request):
    """ Send recent changes as an RSS document
    """
    if not wikixml.ok:
        request.mimetype = "text/plain"
        request.write("rss_rc action is not supported because of missing pyxml module.")
        return

    cfg = request.cfg
    _ = request.getText

    # get params
    def_max_items = max_items = cfg.rss_items_default
    items_limit = cfg.rss_items_limit
    unique = cfg.rss_unique
    diffs = cfg.rss_diffs
    ddiffs = cfg.rss_ddiffs
    max_lines = cfg.rss_lines_default
    lines_limit = cfg.rss_lines_limit
    show_att = cfg.rss_show_attachment_entries
    page_pattern = cfg.rss_page_filter_pattern

    try:
        max_items = min(int(request.values.get("items", max_items)), items_limit)
    except ValueError:
        pass
    try:
        unique = int(request.values.get("unique", unique))
    except ValueError:
        pass
    try:
        diffs = int(request.values.get("diffs", diffs))
    except ValueError:
        pass
    ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003
    try:
        ddiffs = int(request.values.get("ddiffs", ddiffs))
    except ValueError:
        pass
    try:
        max_lines = min(int(request.values.get("lines", max_lines)), lines_limit)
    except ValueError:
        pass
    try:
        show_att = int(request.values.get("show_att", show_att))
    except ValueError:
        pass
    try:
        page_pattern = request.values.get("page", page_pattern)
    except ValueError:
        pass

    # get data
    log = editlog.EditLog(request)
    logdata = []
    counter = 0
    pages = {}
    lastmod = 0
    for line in log.reverse():
        if not request.user.may.read(line.pagename):
            continue
        if (
            (not show_att and not line.action.startswith("SAVE"))
            or ((line.pagename in pages) and unique)
            or not match_page(line.pagename, page_pattern)
        ):
            continue
        line.editor = line.getInterwikiEditorData(request)
        line.time = timefuncs.tmtuple(wikiutil.version2timestamp(line.ed_time_usecs))  # UTC
        logdata.append(line)
        pages[line.pagename] = None

        if not lastmod:
            lastmod = wikiutil.version2timestamp(line.ed_time_usecs)

        counter += 1
        if counter >= max_items:
            break
    del log

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d-%d-%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique, max_lines, show_att)

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using whatever setting the admin
        # defined for suggested cache lifetime of the RecentChanges RSS doc
        expires = time.time() + cfg.rss_cache

        request.mimetype = "application/rss+xml"
        request.expires = expires
        request.last_modified = lastmod
        request.headers["Etag"] = etag

        # send the generated XML document
        baseurl = request.url_root

        logo = re.search(r'src="([^"]*)"', cfg.logo_string)
        if logo:
            logo = request.getQualifiedURL(logo.group(1))

        # prepare output
        out = StringIO.StringIO()
        handler = RssGenerator(out)

        # start SAX stream
        handler.startDocument()
        handler._out.write(
            unicode(
                "<!--\n"
                '    Add an "items=nnn" URL parameter to get more than the \n'
                "    default %(def_max_items)d items. You cannot get more than \n"
                "    %(items_limit)d items though.\n"
                "    \n"
                '    Add "unique=1" to get a list of changes where page names are unique,\n'
                "    i.e. where only the latest change of each page is reflected.\n"
                "    \n"
                '    Add "diffs=1" to add change diffs to the description of each items.\n'
                "    \n"
                '    Add "ddiffs=1" to link directly to the diff (good for FeedReader).\n'
                "    \n"
                '    Add "lines=nnn" to change maximum number of diff/body lines \n'
                "    to show. Cannot be more than %(lines_limit)d.\n"
                "    \n"
                '    Add "show_att=1" to show items related to attachments.\n'
                "    \n"
                '    Add "page=pattern" to show feed only for specific pages.\n'
                "    Pattern can be empty (it would match to all pages), \n"
                "    can start with circumflex (it would be interpreted as \n"
                "    regular expression in this case), end with slash (for \n"
                "    getting feed for page tree) or point to specific page (if \n"
                "    none of the above can be applied).\n"
                "    \n"
                "    Current settings: items=%(max_items)i, unique=%(unique)i, \n"
                "    diffs=%(diffs)i, ddiffs=%(ddiffs)i, lines=%(max_lines)i, \n"
                "    show_att=%(show_att)i\n"
                "-->\n" % locals()
            ).encode(config.charset)
        )

        # emit channel description
        handler.startNode("channel", {(handler.xmlns["rdf"], "about"): request.url_root})
        handler.simpleNode("title", cfg.sitename)
        page = Page(request, pagename)
        handler.simpleNode("link", full_url(request, page))
        handler.simpleNode("description", u"RecentChanges at %s" % cfg.sitename)
        if logo:
            handler.simpleNode("image", None, {(handler.xmlns["rdf"], "resource"): logo})
        if cfg.interwikiname:
            handler.simpleNode(("wiki", "interwiki"), cfg.interwikiname)

        handler.startNode("items")
        handler.startNode(("rdf", "Seq"))
        for item in logdata:
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            page = Page(request, item.pagename)
            link = full_url(request, page, anchor=anchor)
            handler.simpleNode(("rdf", "li"), None, attr={(handler.xmlns["rdf"], "resource"): link})
        handler.endNode(("rdf", "Seq"))
        handler.endNode("items")
        handler.endNode("channel")

        # emit logo data
        if logo:
            handler.startNode("image", attr={(handler.xmlns["rdf"], "about"): logo})
            handler.simpleNode("title", cfg.sitename)
            handler.simpleNode("link", baseurl)
            handler.simpleNode("url", logo)
            handler.endNode("image")

        # Mapping { oldname: curname } for maintaining page renames
        pagename_map = {}

        # emit items
        for item in logdata:
            if item.pagename in pagename_map:
                cur_pagename = pagename_map[item.pagename]
            else:
                cur_pagename = item.pagename
            page = Page(request, cur_pagename)
            action = item.action
            comment = item.comment
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            rdflink = full_url(request, page, anchor=anchor)
            handler.startNode("item", attr={(handler.xmlns["rdf"], "about"): rdflink})

            # general attributes
            handler.simpleNode("title", item.pagename)
            handler.simpleNode(("dc", "date"), timefuncs.W3CDate(item.time))

            show_diff = diffs

            if action.startswith("ATT"):  # Attachment
                show_diff = 0
                filename = wikiutil.url_unquote(item.extra)
                att_exists = AttachFile.exists(request, cur_pagename, filename)

                if action == "ATTNEW":
                    # Once attachment deleted this link becomes invalid but we
                    # preserve it to prevent appearance of new RSS entries in
                    # RSS readers.
                    if ddiffs:
                        handler.simpleNode("link", attach_url(request, cur_pagename, filename, do="view"))

                    comment = _(u"Upload of attachment '%(filename)s'.") % {"filename": filename}

                elif action == "ATTDEL":
                    if ddiffs:
                        handler.simpleNode("link", full_url(request, page, querystr={"action": "AttachFile"}))

                    comment = _(u"Attachment '%(filename)s' deleted.") % {"filename": filename}

                elif action == "ATTDRW":
                    if ddiffs:
                        handler.simpleNode("link", attach_url(request, cur_pagename, filename, do="view"))

                    comment = _(u"Drawing '%(filename)s' saved.") % {"filename": filename}

            elif action.startswith("SAVE"):
                if action == "SAVE/REVERT":
                    to_rev = int(item.extra)
                    comment = (
                        (_(u"Revert to revision %(rev)d.") % {"rev": to_rev}) + "<br />" + _("Comment:") + " " + comment
                    )

                elif action == "SAVE/RENAME":
                    show_diff = 0
                    comment = (
                        (_(u"Renamed from '%(oldpagename)s'.") % {"oldpagename": item.extra})
                        + "<br />"
                        + _("Comment:")
                        + " "
                        + comment
                    )
                    if item.pagename in pagename_map:
                        newpage = pagename_map[item.pagename]
                        del pagename_map[item.pagename]
                        pagename_map[item.extra] = newpage
                    else:
                        pagename_map[item.extra] = item.pagename

                elif action == "SAVENEW":
                    comment = _(u"New page:\n") + comment

                item_rev = int(item.rev)

                # If we use diffs/ddiffs, we should calculate proper links and
                # content
                if ddiffs:
                    # first revision can't have older revisions to diff with
                    if item_rev == 1:
                        handler.simpleNode(
                            "link", full_url(request, page, querystr={"action": "recall", "rev": str(item_rev)})
                        )
                    else:
                        handler.simpleNode(
                            "link",
                            full_url(
                                request,
                                page,
                                querystr={"action": "diff", "rev1": str(item_rev), "rev2": str(item_rev - 1)},
                            ),
                        )

                if show_diff:
                    if item_rev == 1:
                        lines = Page(request, cur_pagename, rev=item_rev).getlines()
                    else:
                        lines = wikiutil.pagediff(
                            request, cur_pagename, item_rev - 1, cur_pagename, item_rev, ignorews=1
                        )

                    if len(lines) > max_lines:
                        lines = lines[:max_lines] + ["...\n"]

                    lines = "\n".join(lines)
                    lines = wikiutil.escape(lines)

                    comment = u"%s\n<pre>\n%s\n</pre>\n" % (comment, lines)

                if not ddiffs:
                    handler.simpleNode("link", full_url(request, page))

            if comment:
                handler.simpleNode("description", comment)

            # contributor
            if cfg.show_names:
                edattr = {}
                if cfg.show_hosts:
                    edattr[(handler.xmlns["wiki"], "host")] = item.hostname
                if item.editor[0] == "interwiki":
                    edname = "%s:%s" % item.editor[1]
                    ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
                else:  # 'ip'
                    edname = item.editor[1]
                    ##edattr[(None, 'link')] = link + "?action=info"

                # this edattr stuff, esp. None as first tuple element breaks things (tracebacks)
                # if you know how to do this right, please send us a patch

                handler.startNode(("dc", "contributor"))
                handler.startNode(("rdf", "Description"), attr=edattr)
                handler.simpleNode(("rdf", "value"), edname)
                handler.endNode(("rdf", "Description"))
                handler.endNode(("dc", "contributor"))

            # wiki extensions
            handler.simpleNode(("wiki", "version"), "%i" % (item.ed_time_usecs))
            handler.simpleNode(("wiki", "status"), ("deleted", "updated")[page.exists()])
            handler.simpleNode(("wiki", "diff"), full_url(request, page, querystr={"action": "diff"}))
            handler.simpleNode(("wiki", "history"), full_url(request, page, querystr={"action": "info"}))
            # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
            # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )

            handler.endNode("item")

        # end SAX stream
        handler.endDocument()

        request.write(out.getvalue())
Example #14
0
def put(
    request,
    key,
    data,
    filename=None,
    content_type=None,
    content_disposition=None,
    content_length=None,
    last_modified=None,
    original=None,
):
    """
    Put an object into the cache to send it with cache action later.

    @param request: the request object
    @param key: non-guessable key into cache (str)
    @param data: content data (str or open file-like obj)
    @param filename: filename for content-disposition header and for autodetecting
                     content_type (unicode, default: None)
    @param content_type: content-type header value (str, default: autodetect from filename)
    @param content_disposition: type for content-disposition header (str, default: None)
    @param content_length: data length for content-length header (int, default: autodetect)
    @param last_modified: last modified timestamp (int, default: autodetect)
    @param original: location of original object (default: None) - this is just written to
                     the metadata cache "as is" and could be used for cache cleanup,
                     use (wikiname, itemname, attachname or None))
    """
    import os.path
    from MoinMoin.util import timefuncs

    if filename:
        # make sure we just have a simple filename (without path)
        filename = os.path.basename(filename)

        if content_type is None:
            # try autodetect
            mt, enc = mimetypes.guess_type(filename)
            if mt:
                content_type = mt

    if content_type is None:
        content_type = "application/octet-stream"

    data_cache = caching.CacheEntry(request, cache_arena, key + ".data", cache_scope, do_locking=do_locking)
    data_cache.update(data)
    content_length = content_length or data_cache.size()
    last_modified = last_modified or data_cache.mtime()

    httpdate_last_modified = timefuncs.formathttpdate(int(last_modified))
    headers = [
        "Content-Type: %s" % content_type,
        "Last-Modified: %s" % httpdate_last_modified,
        "Content-Length: %s" % content_length,
    ]
    if content_disposition and filename:
        # TODO: fix the encoding here, plain 8 bit is not allowed according to the RFCs
        # There is no solution that is compatible to IE except stripping non-ascii chars
        filename = filename.encode(config.charset)
        headers.append('Content-Disposition: %s; filename="%s"' % (content_disposition, filename))

    meta_cache = caching.CacheEntry(
        request, cache_arena, key + ".meta", cache_scope, do_locking=do_locking, use_pickle=True
    )
    meta_cache.update(
        {
            "httpdate_last_modified": httpdate_last_modified,
            "last_modified": last_modified,
            "headers": headers,
            "original": original,
        }
    )
Example #15
0
def execute(pagename, request):
    """ Send recent changes as an RSS document
    """
    if not wikixml.ok:
        httpheaders = ["Content-Type: text/plain; charset=%s" % config.charset]
        request.emit_http_headers(httpheaders)
        request.write("rss_rc action is not supported because of missing pyxml module.")
        return

    cfg = request.cfg

    # get params
    items_limit = 100
    try:
        max_items = int(request.form['items'][0])
        max_items = min(max_items, items_limit) # not more than `items_limit`
    except (KeyError, ValueError):
        # not more than 15 items in a RSS file by default
        max_items = 15
    try:
        unique = int(request.form.get('unique', [0])[0])
    except ValueError:
        unique = 0
    try:
        diffs = int(request.form.get('diffs', [1])[0])
    except ValueError:
        diffs = 1 
    ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003
    try:
        ddiffs = int(request.form.get('ddiffs', [1])[0])
    except ValueError:
        ddiffs = 1 

    # get data
    log = editlog.EditLog(request)
    logdata = []
    counter = 0
    pages = {}
    lastmod = 0
    for line in log.reverse():
        if not request.user.may.read(line.pagename):
            continue
        if (not line.action.startswith('SAVE') or
            ((line.pagename in pages) and unique)): continue
        #if log.dayChanged() and log.daycount > _MAX_DAYS: break
        line.editor = line.getInterwikiEditorData(request)
        line.time = timefuncs.tmtuple(wikiutil.version2timestamp(line.ed_time_usecs)) # UTC
        logdata.append(line)
        pages[line.pagename] = None

        if not lastmod:
            lastmod = wikiutil.version2timestamp(line.ed_time_usecs)

        counter += 1
        if counter >= max_items:
            break
    del log

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique)

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.emit_http_headers(["Status: 304 Not modified"])
        else:
            request.emit_http_headers(["Status: 304 Not modified"])
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.emit_http_headers(["Status: 304 Not modified"])
        else:
            request.emit_http_headers(["Status: 304 Not modified"])
    else:
        # generate an Expires header, using whatever setting the admin
        # defined for suggested cache lifetime of the RecentChanges RSS doc
        expires = timefuncs.formathttpdate(time.time() + cfg.rss_cache)

        httpheaders = ["Content-Type: text/xml; charset=%s" % config.charset,
                       "Expires: %s" % expires,
                       "Last-Modified: %s" % timestamp,
                       "Etag: %s" % etag, ]

        # send the generated XML document
        request.emit_http_headers(httpheaders)

        baseurl = request.getBaseURL()
        if not baseurl.endswith('/'):
            baseurl += '/'

        logo = re.search(r'src="([^"]*)"', cfg.logo_string)
        if logo:
            logo = request.getQualifiedURL(logo.group(1))

        # prepare output
        out = StringIO.StringIO()
        handler = RssGenerator(out)

        # start SAX stream
        handler.startDocument()
        handler._out.write(
            '<!--\n'
            '    Add an "items=nnn" URL parameter to get more than the default 15 items.\n'
            '    You cannot get more than %d items though.\n'
            '    \n'
            '    Add "unique=1" to get a list of changes where page names are unique,\n'
            '    i.e. where only the latest change of each page is reflected.\n'
            '    \n'
            '    Add "diffs=0" to remove change diffs to the description of each items.\n'
            '    \n'
            '    Add "ddiffs=0" to link directly to the wikipage\n'
            '    Current settings: items=%i, unique=%i, diffs=%i, ddiffs=%i'
            '-->\n' % (items_limit, max_items, unique, diffs, ddiffs)
            )

        # emit channel description
        handler.startNode('channel', {
            (handler.xmlns['rdf'], 'about'): request.getBaseURL(),
            })
        handler.simpleNode('title', cfg.sitename)
        page = Page(request, pagename)
        handler.simpleNode('link', full_url(request, page))
        handler.simpleNode('description', 'RecentChanges at %s' % cfg.sitename)
        if logo:
            handler.simpleNode('image', None, {
                (handler.xmlns['rdf'], 'resource'): logo,
                })
        if cfg.interwikiname:
            handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)

        handler.startNode('items')
        handler.startNode(('rdf', 'Seq'))
        for item in logdata:
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            page = Page(request, item.pagename)
            link = full_url(request, page, anchor=anchor)
            handler.simpleNode(('rdf', 'li'), None, attr={(handler.xmlns['rdf'], 'resource'): link, })
        handler.endNode(('rdf', 'Seq'))
        handler.endNode('items')
        handler.endNode('channel')

        # emit logo data
        if logo:
            handler.startNode('image', attr={
                (handler.xmlns['rdf'], 'about'): logo,
                })
            handler.simpleNode('title', cfg.sitename)
            handler.simpleNode('link', baseurl)
            handler.simpleNode('url', logo)
            handler.endNode('image')

        # emit items
        for item in logdata:
            page = Page(request, item.pagename)
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            rdflink = full_url(request, page, anchor=anchor)
            handler.startNode('item', attr={(handler.xmlns['rdf'], 'about'): rdflink, })

            # general attributes
            handler.simpleNode('title', item.pagename)
            if ddiffs:
                handler.simpleNode('link', full_url(request, page, querystr={'action': 'diff'}))
            else:
                handler.simpleNode('link', full_url(request, page))

            handler.simpleNode(('dc', 'date'), timefuncs.W3CDate(item.time))

            # description
            if item.editor[0] == 'interwiki':
                edname = "%s:%s" % item.editor[1]
                ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
            else: # 'ip'
                edname = item.editor[1]
                ##edattr[(None, 'link')] = link + "?action=info"

            # this edattr stuff, esp. None as first tuple element breaks things (tracebacks)
            # if you know how to do this right, please send us a patch
            user = edname.split(':')[-1]
            user_link = full_url(request, Page(request, user))

            desc_text = 'Cambio por <a href="%s">%s</a> -- "%s"' % (user_link, user, item.comment)
            if diffs:
                # TODO: rewrite / extend wikiutil.pagediff
                # searching for the matching pages doesn't really belong here
                revisions = page.getRevList()

                rl = len(revisions)
                for idx in range(rl):
                    rev = revisions[idx]
                    if rev <= item.rev:
                        if idx + 1 < rl:
                            lines = wikiutil.pagediff(request, item.pagename, revisions[idx+1], item.pagename, 0, ignorews=1)
                            if len(lines) > 20:
                                lines = lines[:20] + ['... (Continua)\n']

                            # vamos a colorear las lineas!
                            fixed_lines = []
                            for line in lines:
                                line = wikiutil.escape(line)
                                if line.startswith('+'):
                                    line = "<font color='green'>%s</font>" % line
                                elif line.startswith('-'):
                                    line = "<font color='red'>%s</font>" % line
                                fixed_lines.append(line)
                            lines = fixed_lines    
                            lines = '\n'.join(lines)
                            desc_text = '%s\n<pre>\n%s\n</pre>\n' % (desc_text, lines)
                        break
            if desc_text:
                handler.simpleNode('description', desc_text)

            # contributor
            edattr = {}
            if cfg.show_hosts:
                edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
           
            handler.startNode(('dc', 'contributor'))
            handler.startNode(('rdf', 'Description'), attr=edattr)
            handler.simpleNode(('rdf', 'value'), edname)
            handler.endNode(('rdf', 'Description'))
            handler.endNode(('dc', 'contributor'))

            # wiki extensions
            handler.simpleNode(('wiki', 'version'), "%i" % (item.ed_time_usecs))
            handler.simpleNode(('wiki', 'status'), ('deleted', 'updated')[page.exists()])
            handler.simpleNode(('wiki', 'diff'), full_url(request, page, querystr={'action': 'diff'}))
            handler.simpleNode(('wiki', 'history'), full_url(request, page, querystr={'action': 'info'}))
            # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
            # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )

            handler.endNode('item')

        # end SAX stream
        handler.endDocument()

        request.write(out.getvalue())
Example #16
0
def execute(pagename, request):
    """ Send recent changes as an RSS document
    """
    cfg = request.cfg

    # get params
    items_limit = 100
    try:
        max_items = int(request.values['items'])
        max_items = min(max_items, items_limit)  # not more than `items_limit`
    except (KeyError, ValueError):
        # not more than 15 items in a RSS file by default
        max_items = 15
    try:
        unique = int(request.values.get('unique', 0))
    except ValueError:
        unique = 0
    try:
        diffs = int(request.values.get('diffs', 0))
    except ValueError:
        diffs = 0
    ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003
    try:
        ddiffs = int(request.values.get('ddiffs', 0))
    except ValueError:
        ddiffs = 0

    urlfilter = request.values.get('filter')
    if urlfilter:
        urlfilter = re.compile(urlfilter)
    else:
        urlfilter = None

    # get data
    log = editlog.EditLog(request)
    logdata = []
    counter = 0
    pages = {}
    lastmod = 0
    for line in log.reverse():
        if urlfilter and not (urlfilter.match(line.pagename)):
            continue
        if not request.user.may.read(line.pagename):
            continue
        if (not line.action.startswith('SAVE')
                or ((line.pagename in pages) and unique)):
            continue
        #if log.dayChanged() and log.daycount > _MAX_DAYS: break
        line.editor = line.getInterwikiEditorData(request)
        line.time = timefuncs.tmtuple(
            wikiutil.version2timestamp(line.ed_time_usecs))  # UTC
        logdata.append(line)
        pages[line.pagename] = None

        if not lastmod:
            lastmod = wikiutil.version2timestamp(line.ed_time_usecs)

        counter += 1
        if counter >= max_items:
            break
    del log

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique)

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using whatever setting the admin
        # defined for suggested cache lifetime of the RecentChanges RSS doc
        expires = time.time() + cfg.rss_cache

        request.mimetype = 'application/rss+xml'
        request.expires = expires
        request.last_modified = lastmod
        request.headers['Etag'] = etag

        # send the generated XML document
        baseurl = request.url_root

        logo = re.search(r'src="([^"]*)"', cfg.logo_string)
        if logo:
            logo = request.getQualifiedURL(logo.group(1))

        # prepare output
        output = structencoder(indent=u"yes")

        FEED_HEADER_COMMENT = '''
<!--
    Add an "items=nnn" URL parameter to get more than the default 15 items.
    You cannot get more than %d items though.
    
    Add "unique=1" to get a list of changes where page names are unique,
    i.e. where only the latest change of each page is reflected.
    Add "diffs=1" to add change diffs to the description of each items.
    
    Add "ddiffs=1" to link directly to the diff (good for FeedReader).
    Current settings: items=%i, unique=%i, diffs=%i, ddiffs=%i
-->
        ''' % (items_limit, max_items, unique, diffs, ddiffs)

        # Feed envelope
        page = Page(request, pagename)
        f = output.cofeed(
            ROOT(NS(u'', ATOM_NAMESPACE), NS(u'wiki', RSSWIKI_NAMESPACE),
                 E_CURSOR((ATOM_NAMESPACE, u'feed'), )))
        f.send(
            E((ATOM_NAMESPACE, u'id'),
              full_url(request, page).encode(config.charset))),
        f.send(
            E((ATOM_NAMESPACE, u'title'),
              cfg.sitename.encode(config.charset))),
        f.send(
            E((ATOM_NAMESPACE, u'link'),
              {u'href': request.url_root.encode(config.charset)})),
        f.send(
            E((ATOM_NAMESPACE, u'summary'),
              ('RecentChanges at %s' % cfg.sitename).encode(config.charset))),
        #Icon
        #E((ATOM_NAMESPACE, u'link'), {u'href': logo.encode(config.charset)}),

        #if cfg.interwikiname:
        #    handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)

        for item in logdata:
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            page = Page(request, item.pagename)
            #link = full_url(request, page, anchor=anchor)
            if ddiffs:
                link = full_url(request, page, querystr={'action': 'diff'})
            else:
                link = full_url(request, page)

            # description
            desc_text = item.comment
            if diffs:
                # TODO: rewrite / extend wikiutil.pagediff
                # searching for the matching pages doesn't really belong here
                revisions = page.getRevList()

                rl = len(revisions)
                for idx in range(rl):
                    rev = revisions[idx]
                    if rev <= item.rev:
                        if idx + 1 < rl:
                            lines = wikiutil.pagediff(request,
                                                      item.pagename,
                                                      revisions[idx + 1],
                                                      item.pagename,
                                                      0,
                                                      ignorews=1)
                            if len(lines) > 20:
                                lines = lines[:20] + ['...\n']
                            lines = '\n'.join(lines)
                            lines = wikiutil.escape(lines)
                            desc_text = '%s\n<pre>\n%s\n</pre>\n' % (desc_text,
                                                                     lines)
                        break
            #if desc_text:
            #    handler.simpleNode('description', desc_text)

            # contributor
            edattr = {}
            #if cfg.show_hosts:
            #    edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
            if item.editor[0] == 'interwiki':
                edname = "%s:%s" % item.editor[1]
                ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
            else:  # 'ip'
                edname = item.editor[1]
                ##edattr[(None, 'link')] = link + "?action=info"

            history_link = full_url(request, page, querystr={'action': 'info'})

            f.send(
                E(
                    (ATOM_NAMESPACE, u'entry'),
                    E((ATOM_NAMESPACE, u'id'), link.encode(config.charset)),
                    E((ATOM_NAMESPACE, u'title'),
                      item.pagename.encode(config.charset)),
                    E((ATOM_NAMESPACE, u'updated'),
                      timefuncs.W3CDate(item.time).encode(config.charset)),
                    E((ATOM_NAMESPACE, u'link'),
                      {u'href': link.encode(config.charset)}),
                    E((ATOM_NAMESPACE, u'summary'),
                      desc_text.encode(config.charset)),
                    E((ATOM_NAMESPACE, u'author'),
                      E((ATOM_NAMESPACE, u'name'),
                        edname.encode(config.charset))),
                    #E((ATOM_NAMESPACE, u'title'), item.pagename.encode(config.charset)),
                    # wiki extensions
                    E((RSSWIKI_NAMESPACE, u'wiki:version'),
                      ("%i" % (item.ed_time_usecs)).encode(config.charset)),
                    E((RSSWIKI_NAMESPACE, u'wiki:status'),
                      (u'deleted', u'updated')[page.exists()]),
                    E((RSSWIKI_NAMESPACE, u'wiki:diff'),
                      link.encode(config.charset)),
                    E((RSSWIKI_NAMESPACE, u'wiki:history'),
                      history_link.encode(config.charset)),
                    # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
                    # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )
                ))

        # emit logo data
        #if logo:
        #    handler.startNode('image', attr={
        #        (handler.xmlns['rdf'], 'about'): logo,
        #        })
        #    handler.simpleNode('title', cfg.sitename)
        #    handler.simpleNode('link', baseurl)
        #    handler.simpleNode('url', logo)
        #    handler.endNode('image')

        f.close()
        request.write(output.read())
Example #17
0
def execute(pagename, request):
    """ Send recent changes as an RSS document
    """
    if not wikixml.ok:
        request.mimetype = 'text/plain'
        request.write(
            "rss_rc action is not supported because of missing pyxml module.")
        return

    cfg = request.cfg

    # get params
    items_limit = 100
    try:
        max_items = int(request.values['items'])
        max_items = min(max_items, items_limit)  # not more than `items_limit`
    except (KeyError, ValueError):
        # not more than 15 items in a RSS file by default
        max_items = 15
    try:
        unique = int(request.values.get('unique', 0))
    except ValueError:
        unique = 0
    try:
        diffs = int(request.values.get('diffs', 0))
    except ValueError:
        diffs = 0
    ## ddiffs inserted by Ralf Zosel <*****@*****.**>, 04.12.2003
    try:
        ddiffs = int(request.values.get('ddiffs', 0))
    except ValueError:
        ddiffs = 0

    # get data
    log = editlog.EditLog(request)
    logdata = []
    counter = 0
    pages = {}
    lastmod = 0
    for line in log.reverse():
        if not request.user.may.read(line.pagename):
            continue
        if (not line.action.startswith('SAVE')
                or ((line.pagename in pages) and unique)):
            continue
        #if log.dayChanged() and log.daycount > _MAX_DAYS: break
        line.editor = line.getInterwikiEditorData(request)
        line.time = timefuncs.tmtuple(
            wikiutil.version2timestamp(line.ed_time_usecs))  # UTC
        logdata.append(line)
        pages[line.pagename] = None

        if not lastmod:
            lastmod = wikiutil.version2timestamp(line.ed_time_usecs)

        counter += 1
        if counter >= max_items:
            break
    del log

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique)

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using whatever setting the admin
        # defined for suggested cache lifetime of the RecentChanges RSS doc
        expires = time.time() + cfg.rss_cache

        request.mimetype = 'application/rss+xml'
        request.expires = expires
        request.last_modified = lastmod
        request.headers['Etag'] = etag

        # send the generated XML document
        baseurl = request.url_root

        logo = re.search(r'src="([^"]*)"', cfg.logo_string)
        if logo:
            logo = request.getQualifiedURL(logo.group(1))

        # prepare output
        out = StringIO.StringIO()
        handler = RssGenerator(out)

        # start SAX stream
        handler.startDocument()
        handler._out.write(
            '<!--\n'
            '    Add an "items=nnn" URL parameter to get more than the default 15 items.\n'
            '    You cannot get more than %d items though.\n'
            '    \n'
            '    Add "unique=1" to get a list of changes where page names are unique,\n'
            '    i.e. where only the latest change of each page is reflected.\n'
            '    \n'
            '    Add "diffs=1" to add change diffs to the description of each items.\n'
            '    \n'
            '    Add "ddiffs=1" to link directly to the diff (good for FeedReader).\n'
            '    Current settings: items=%i, unique=%i, diffs=%i, ddiffs=%i'
            '-->\n' % (items_limit, max_items, unique, diffs, ddiffs))

        # emit channel description
        handler.startNode('channel', {
            (handler.xmlns['rdf'], 'about'): request.url_root,
        })
        handler.simpleNode('title', cfg.sitename)
        page = Page(request, pagename)
        handler.simpleNode('link', full_url(request, page))
        handler.simpleNode('description', 'RecentChanges at %s' % cfg.sitename)
        if logo:
            handler.simpleNode('image', None, {
                (handler.xmlns['rdf'], 'resource'): logo,
            })
        if cfg.interwikiname:
            handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)

        handler.startNode('items')
        handler.startNode(('rdf', 'Seq'))
        for item in logdata:
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            page = Page(request, item.pagename)
            link = full_url(request, page, anchor=anchor)
            handler.simpleNode(('rdf', 'li'),
                               None,
                               attr={
                                   (handler.xmlns['rdf'], 'resource'): link,
                               })
        handler.endNode(('rdf', 'Seq'))
        handler.endNode('items')
        handler.endNode('channel')

        # emit logo data
        if logo:
            handler.startNode('image',
                              attr={
                                  (handler.xmlns['rdf'], 'about'): logo,
                              })
            handler.simpleNode('title', cfg.sitename)
            handler.simpleNode('link', baseurl)
            handler.simpleNode('url', logo)
            handler.endNode('image')

        # emit items
        for item in logdata:
            page = Page(request, item.pagename)
            anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
            rdflink = full_url(request, page, anchor=anchor)
            handler.startNode('item',
                              attr={
                                  (handler.xmlns['rdf'], 'about'): rdflink,
                              })

            # general attributes
            handler.simpleNode('title', item.pagename)
            if ddiffs:
                handler.simpleNode(
                    'link', full_url(request,
                                     page,
                                     querystr={'action': 'diff'}))
            else:
                handler.simpleNode('link', full_url(request, page))

            handler.simpleNode(('dc', 'date'), timefuncs.W3CDate(item.time))

            # description
            desc_text = item.comment
            if diffs:
                # TODO: rewrite / extend wikiutil.pagediff
                # searching for the matching pages doesn't really belong here
                revisions = page.getRevList()

                rl = len(revisions)
                for idx in range(rl):
                    rev = revisions[idx]
                    if rev <= item.rev:
                        if idx + 1 < rl:
                            lines = wikiutil.pagediff(request,
                                                      item.pagename,
                                                      revisions[idx + 1],
                                                      item.pagename,
                                                      0,
                                                      ignorews=1)
                            if len(lines) > 20:
                                lines = lines[:20] + ['...\n']
                            lines = '\n'.join(lines)
                            lines = wikiutil.escape(lines)
                            desc_text = '%s\n<pre>\n%s\n</pre>\n' % (desc_text,
                                                                     lines)
                        break
            if desc_text:
                handler.simpleNode('description', desc_text)

            # contributor
            edattr = {}
            if cfg.show_hosts:
                edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
            if item.editor[0] == 'interwiki':
                edname = "%s:%s" % item.editor[1]
                ##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
            else:  # 'ip'
                edname = item.editor[1]
                ##edattr[(None, 'link')] = link + "?action=info"

            # this edattr stuff, esp. None as first tuple element breaks things (tracebacks)
            # if you know how to do this right, please send us a patch

            handler.startNode(('dc', 'contributor'))
            handler.startNode(('rdf', 'Description'), attr=edattr)
            handler.simpleNode(('rdf', 'value'), edname)
            handler.endNode(('rdf', 'Description'))
            handler.endNode(('dc', 'contributor'))

            # wiki extensions
            handler.simpleNode(('wiki', 'version'),
                               "%i" % (item.ed_time_usecs))
            handler.simpleNode(('wiki', 'status'),
                               ('deleted', 'updated')[page.exists()])
            handler.simpleNode(('wiki', 'diff'),
                               full_url(request,
                                        page,
                                        querystr={'action': 'diff'}))
            handler.simpleNode(('wiki', 'history'),
                               full_url(request,
                                        page,
                                        querystr={'action': 'info'}))
            # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
            # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )

            handler.endNode('item')

        # end SAX stream
        handler.endDocument()

        request.write(out.getvalue())