コード例 #1
0
ファイル: emailnotify.py プロジェクト: steveyen/moingo
def handle_file_attached(event):
    """Sends an email to super users that have subscribed to this event type"""

    names = set()
    from_address = event.request.cfg.mail_from
    request = event.request
    page = Page(request, event.pagename)

    subscribers = page.getSubscribers(request, return_users=1)
    notification.filter_subscriber_list(event, subscribers, False)
    recipients = []

    for lang in subscribers:
        recipients.extend(subscribers[lang])

    attachlink = request.getBaseURL() + getAttachUrl(event.pagename, event.filename, request)
    pagelink = request.getQualifiedURL(page.url(request, {}))

    for lang in subscribers:
        emails = []
        _ = lambda text: request.getText(text, lang=lang)

        links = _("Attachment link: %(attach)s\n" \
                  "Page link: %(page)s\n") % {'attach': attachlink, 'page': pagelink}

        data = notification.attachment_added(request, _, event.pagename, event.filename, event.size)
        data['text'] = data['text'] + links

        emails = [usr.email for usr in subscribers[lang]]

        if send_notification(request, from_address, emails, data):
            names.update(recipients)

    return notification.Success(names)
コード例 #2
0
ファイル: hitcounts.py プロジェクト: supercool/orgsites
def linkto(pagename, request, params=""):
    from MoinMoin.util import web

    _ = request.getText

    if not request.cfg.chart_options:
        request.formatter = Formatter(request)
        return (
            request.formatter.sysmsg(1)
            + request.formatter.text(_("Charts are not available!"))
            + request.formatter.sysmsg(0)
        )

    if _debug:
        return draw(pagename, request)

    page = Page(request, pagename)

    # Create escaped query string from dict and params
    querystr = {"action": "chart", "type": "hitcounts"}
    querystr = web.makeQueryString(querystr)
    querystr = wikiutil.escape(querystr)
    if params:
        querystr += "&" + params

    # TODO: remove escape=0 in 1.4
    data = {"url": page.url(request, querystr, escape=0)}
    data.update(request.cfg.chart_options)
    result = ('<img src="%(url)s" width="%(width)d" height="%(height)d"' ' alt="hitcounts chart">') % data

    return result
コード例 #3
0
def handle_file_attached(event):
    """Handles event sent when a file is attached to a page"""

    names = set()
    request = event.request
    page = Page(request, event.pagename)
    subscribers = page.getSubscribers(request, return_users=1)
    notification.filter_subscriber_list(event, subscribers, True)
    recipients = []

    for lang in subscribers:
        recipients.extend(subscribers[lang])

    attachlink = request.getQualifiedURL(getAttachUrl(event.pagename, event.filename, request))
    pagelink = request.getQualifiedURL(page.url(request, {}))

    for lang in subscribers.keys():
        _ = lambda text: request.getText(text, lang=lang)
        data = notification.attachment_added(request, _, event.pagename, event.filename, event.size)
        links = [{'url': attachlink, 'description': _("Attachment link")},
                  {'url': pagelink, 'description': _("Page link")}]

        jids = [usr.jid for usr in subscribers[lang]]
        data['url_list'] = links
        data['action'] = "file_attached"

        if send_notification(request, jids, data):
            names.update(recipients)

    return notification.Success(names)
コード例 #4
0
ファイル: hitcounts.py プロジェクト: imosts/flume
def linkto(pagename, request, params=''):
    _ = request.getText

    if not request.cfg.chart_options:
        request.formatter = Formatter(request)
        return text(pagename, request, params)

    if _debug:
        return draw(pagename, request)

    page = Page(request, pagename)

    # Create escaped query string from dict and params
    querystr = {'action': 'chart', 'type': 'hitcounts'}
    querystr = wikiutil.makeQueryString(querystr)
    querystr = wikiutil.escape(querystr)
    if params:
        querystr += '&amp;' + params
    
    # TODO: remove escape=0 in 2.0
    data = {'url': page.url(request, querystr, escape=0)}
    data.update(request.cfg.chart_options)
    result = ('<img src="%(url)s" width="%(width)d" height="%(height)d"'
              ' alt="hitcounts chart">') % data

    return result
コード例 #5
0
ファイル: pagesize.py プロジェクト: Glottotopia/aagd
def linkto(pagename, request, params=''):
    _ = request.getText

    if not request.cfg.chart_options:
        return (request.formatter.sysmsg(1) +
                request.formatter.text(_('Charts are not available!')) +
                request.formatter.sysmsg(0))

    if _debug:
        return draw(pagename, request)

    page = Page(request, pagename)

    # Create escaped query string from dict and params
    querystr = {'action': 'chart', 'type': 'pagesize'}
    querystr = wikiutil.makeQueryString(querystr)
    querystr = wikiutil.escape(querystr)
    if params:
        querystr += '&amp;' + params

    data = {'url': page.url(request, querystr)}
    data.update(request.cfg.chart_options)
    result = ('<img src="%(url)s" width="%(width)d" height="%(height)d"'
              ' alt="pagesize chart">') % data
    return result
コード例 #6
0
ファイル: pagesize.py プロジェクト: execgit/gwiki-with-moin
def linkto(pagename, request, params=''):
    _ = request.getText

    if not request.cfg.chart_options:
        return (request.formatter.sysmsg(1) +
                request.formatter.text(_('Charts are not available!')) +
                request.formatter.sysmsg(0))

    if _debug:
        return draw(pagename, request)

    page = Page(request, pagename)

    # Create escaped query string from dict and params
    querystr = {'action': 'chart', 'type': 'pagesize'}
    querystr = wikiutil.makeQueryString(querystr)
    querystr = wikiutil.escape(querystr)
    if params:
        querystr += '&amp;' + params

    data = {'url': page.url(request, querystr)}
    data.update(request.cfg.chart_options)
    result = ('<img src="%(url)s" width="%(width)d" height="%(height)d"'
              ' alt="pagesize chart">') % data
    return result
コード例 #7
0
ファイル: jabbernotify.py プロジェクト: IvanLogvinov/soar
def handle_file_attached(event):
    """Handles event sent when a file is attached to a page"""

    names = set()
    request = event.request
    page = Page(request, event.pagename)
    subscribers = page.getSubscribers(request, return_users=1)
    notification.filter_subscriber_list(event, subscribers, True)
    recipients = []

    for lang in subscribers:
        recipients.extend(subscribers[lang])

    attachlink = request.getQualifiedURL(getAttachUrl(event.pagename, event.filename, request))
    pagelink = request.getQualifiedURL(page.url(request, {}))

    for lang in subscribers.keys():
        _ = lambda text: request.getText(text, lang=lang)
        data = notification.attachment_added(request, _, event.pagename, event.filename, event.size)
        links = [{'url': attachlink, 'description': _("Attachment link")},
                  {'url': pagelink, 'description': _("Page link")}]

        jids = [usr.jid for usr in subscribers[lang]]
        data['url_list'] = links
        data['action'] = "file_attached"

        if send_notification(request, jids, data):
            names.update(recipients)

    return notification.Success(names)
コード例 #8
0
ファイル: wsgiapp.py プロジェクト: Opngate/moinmoin
def handle_action(context, pagename, action_name='show'):
    """ Actual dispatcher function for non-XMLRPC actions.

    Also sets up the Page object for this request, normalizes and
    redirects to canonical pagenames and checks for non-allowed
    actions.
    """
    _ = context.getText
    cfg = context.cfg

    # pagename could be empty after normalization e.g. '///' -> ''
    # Use localized FrontPage if pagename is empty
    if not pagename:
        context.page = wikiutil.getFrontPage(context)
    else:
        context.page = Page(context, pagename)
        if '_' in pagename and not context.page.exists():
            pagename = pagename.replace('_', ' ')
            page = Page(context, pagename)
            if page.exists():
                url = page.url(context)
                return context.http_redirect(url)

    msg = None
    # Complain about unknown actions
    if not action_name in get_names(cfg):
        msg = _("Unknown action %(action_name)s.") % {
                'action_name': wikiutil.escape(action_name), }

    # Disallow non available actions
    elif action_name[0].isupper() and not action_name in \
            get_available_actions(cfg, context.page, context.user):
        msg = _("You are not allowed to do %(action_name)s on this page.") % {
                'action_name': wikiutil.escape(action_name), }
        if not context.user.valid:
            # Suggest non valid user to login
            msg += " " + _("Login and try again.")

    if msg:
        context.theme.add_msg(msg, "error")
        context.page.send_page()
    # Try action
    else:
        from MoinMoin import action
        handler = action.getHandler(context, action_name)
        if handler is None:
            msg = _("You are not allowed to do %(action_name)s on this page.") % {
                    'action_name': wikiutil.escape(action_name), }
            if not context.user.valid:
                # Suggest non valid user to login
                msg += " " + _("Login and try again.")
            context.theme.add_msg(msg, "error")
            context.page.send_page()
        else:
            handler(context.page.page_name, context)

    return context
コード例 #9
0
ファイル: wsgiapp.py プロジェクト: happytk/moin
def handle_action(context, pagename, action_name='show'):
    """ Actual dispatcher function for non-XMLRPC actions.

    Also sets up the Page object for this request, normalizes and
    redirects to canonical pagenames and checks for non-allowed
    actions.
    """
    _ = context.getText
    cfg = context.cfg

    # pagename could be empty after normalization e.g. '///' -> ''
    # Use localized FrontPage if pagename is empty
    if not pagename:
        context.page = wikiutil.getFrontPage(context)
    else:
        context.page = Page(context, pagename)
        if '_' in pagename and not context.page.exists():
            pagename = pagename.replace('_', ' ')
            page = Page(context, pagename)
            if page.exists():
                url = page.url(context)
                return context.http_redirect(url)

    msg = None
    # Complain about unknown actions
    if not action_name in get_names(cfg):
        msg = _("Unknown action %(action_name)s.") % {
                'action_name': wikiutil.escape(action_name), }

    # Disallow non available actions
    elif action_name[0].isupper() and not action_name in \
            get_available_actions(cfg, context.page, context.user):
        msg = _("You are not allowed to do %(action_name)s on this page.") % {
                'action_name': wikiutil.escape(action_name), }
        if not context.user.valid:
            # Suggest non valid user to login
            msg += " " + _("Login and try again.")

    if msg:
        context.theme.add_msg(msg, "error")
        context.page.send_page()
    # Try action
    else:
        from MoinMoin import action
        handler = action.getHandler(context, action_name)
        if handler is None:
            msg = _("You are not allowed to do %(action_name)s on this page.") % {
                    'action_name': wikiutil.escape(action_name), }
            if not context.user.valid:
                # Suggest non valid user to login
                msg += " " + _("Login and try again.")
            context.theme.add_msg(msg, "error")
            context.page.send_page()
        else:
            handler(context.page.page_name, context)

    return context
コード例 #10
0
ファイル: sisterpages.py プロジェクト: steveyen/moingo
def execute(pagename, request):
    log = editlog.EditLog(request)
    try:
        lastmod = wikiutil.version2timestamp(log.date())
    except:
        lastmod = 0

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d" % lastmod

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.emit_http_headers(["Status: 304 Not modified"])
        else:
            request.emit_http_headers(["Status: 304 Not modified"])
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.emit_http_headers(["Status: 304 Not modified"])
        else:
            request.emit_http_headers(["Status: 304 Not modified"])
    else:
        # generate an Expires header, using 1d cache lifetime of sisterpages list
        expires = timefuncs.formathttpdate(time.time() + 24 * 3600)

        httpheaders = [
            "Content-Type: text/plain; charset=UTF-8",
            "Expires: %s" % expires,
            "Last-Modified: %s" % timestamp,
            "Etag: %s" % etag,
        ]

        # send the generated XML document
        request.emit_http_headers(httpheaders)

        baseurl = request.getBaseURL()
        if not baseurl.endswith("/"):
            baseurl += "/"

        # Get list of user readable pages
        pages = request.rootpage.getPageList()
        pages.sort()
        for pn in pages:
            p = Page(request, pn)
            entry = u"%s %s\r\n" % (request.getQualifiedURL(p.url(request)), p.page_name)
            request.write(entry.encode("utf-8"))
コード例 #11
0
ファイル: AttachFile.py プロジェクト: Glottotopia/aagd
def info(pagename, request):
    """ Generate snippet with info on the attachment for page `pagename`. """
    _ = request.getText

    attach_dir = getAttachDir(request, pagename)
    files = []
    if os.path.isdir(attach_dir):
        files = os.listdir(attach_dir)
    page = Page(request, pagename)
    link = page.url(request, {'action': action_name})
    attach_info = _('There are <a href="%(link)s">%(count)s attachment(s)</a> stored for this page.') % {
        'count': len(files),
        'link': wikiutil.escape(link)
        }
    return "\n<p>\n%s\n</p>\n" % attach_info
コード例 #12
0
ファイル: AttachFile.py プロジェクト: aahlad/soar
def info(pagename, request):
    """ Generate snippet with info on the attachment for page `pagename`. """
    _ = request.getText

    attach_dir = getAttachDir(request, pagename)
    files = []
    if os.path.isdir(attach_dir):
        files = os.listdir(attach_dir)
    page = Page(request, pagename)
    link = page.url(request, {'action': action_name})
    attach_info = _(
        'There are <a href="%(link)s">%(count)s attachment(s)</a> stored for this page.'
    ) % {
        'count': len(files),
        'link': wikiutil.escape(link)
    }
    return "\n<p>\n%s\n</p>\n" % attach_info
コード例 #13
0
ファイル: sisterpages.py プロジェクト: aahlad/soar
def execute(pagename, request):
    log = editlog.EditLog(request)
    try:
        lastmod = wikiutil.version2timestamp(log.date())
    except:
        lastmod = 0

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d" % lastmod

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using 1d cache lifetime of sisterpages list
        expires = time.time() + 24 * 3600

        request.mimetype = 'text/plain'
        request.expires = expires
        request.last_modified = timestamp
        request.headers['Etag'] = etag

        # send the generated XML document
        # Get list of user readable pages
        pages = request.rootpage.getPageList()
        pages.sort()
        for pn in pages:
            p = Page(request, pn)
            entry = u"%s %s\r\n" % (request.getQualifiedURL(
                p.url(request)), p.page_name)
            request.write(entry.encode('utf-8'))
コード例 #14
0
ファイル: sisterpages.py プロジェクト: Glottotopia/aagd
def execute(pagename, request):
    log = editlog.EditLog(request)
    try:
        lastmod = wikiutil.version2timestamp(log.date())
    except:
        lastmod = 0

    timestamp = timefuncs.formathttpdate(lastmod)
    etag = "%d" % lastmod

    # for 304, we look at if-modified-since and if-none-match headers,
    # one of them must match and the other is either not there or must match.
    if request.if_modified_since == timestamp:
        if request.if_none_match:
            if request.if_none_match == etag:
                request.status_code = 304
        else:
            request.status_code = 304
    elif request.if_none_match == etag:
        if request.if_modified_since:
            if request.if_modified_since == timestamp:
                request.status_code = 304
        else:
            request.status_code = 304
    else:
        # generate an Expires header, using 1d cache lifetime of sisterpages list
        expires = time.time() + 24*3600

        request.mimetype = 'text/plain'
        request.expires = expires
        request.last_modified = timestamp
        request.headers['Etag'] = etag

        # send the generated XML document
        # Get list of user readable pages
        pages = request.rootpage.getPageList()
        pages.sort()
        for pn in pages:
            p = Page(request, pn)
            entry = u"%s %s\r\n" % (request.getQualifiedURL(p.url(request)), p.page_name)
            request.write(entry.encode('utf-8'))
コード例 #15
0
ファイル: emailnotify.py プロジェクト: yuanbosdu/baikehow
def handle_file_changed(event):
    """Sends an email to users that have subscribed to this event type"""

    names = set()
    from_address = event.request.cfg.mail_from
    request = event.request
    page = Page(request, event.pagename)

    if isinstance(event, ev.FileAttachedEvent):
        notification_fn = notification.attachment_added
    elif isinstance(event, ev.FileRemovedEvent):
        notification_fn = notification.attachment_removed

    subscribers = page.getSubscribers(request, return_users=1)
    notification.filter_subscriber_list(event, subscribers, False)
    recipients = []

    for lang in subscribers:
        recipients.extend(subscribers[lang])

    attachlink = request.getQualifiedURL(
        getAttachUrl(event.pagename, event.filename, request))
    pagelink = request.getQualifiedURL(page.url(request, {}))

    for lang in subscribers:
        _ = lambda text: request.getText(text, lang=lang)

        links = _("Attachment link: %(attach)s\n" \
                  "Page link: %(page)s\n") % {'attach': attachlink, 'page': pagelink}

        data = notification_fn(request, _, event.pagename, event.filename,
                               event.size)
        data['text'] = data['text'] + links

        emails = [usr.email for usr in subscribers[lang]]

        if send_notification(request, from_address, emails, data):
            names.update(recipients)

    return notification.Success(names)
コード例 #16
0
ファイル: useragents.py プロジェクト: steveyen/moingo
def linkto(pagename, request, params=""):
    _ = request.getText

    if not request.cfg.chart_options:
        return text(pagename, request)
    if _debug:
        return draw(pagename, request)

    page = Page(request, pagename)

    # Create escaped query string from dict and params
    querystr = {"action": "chart", "type": "useragents"}
    querystr = wikiutil.makeQueryString(querystr)
    querystr = wikiutil.escape(querystr)
    if params:
        querystr += "&amp;" + params

    data = {"url": page.url(request, querystr)}
    data.update(request.cfg.chart_options)
    result = ('<img src="%(url)s" width="%(width)d" height="%(height)d"' ' alt="useragents chart">') % data

    return result
コード例 #17
0
ファイル: useragents.py プロジェクト: Opngate/moinmoin
def linkto(pagename, request, params=''):
    _ = request.getText

    if not request.cfg.chart_options:
        return text(pagename, request)
    if _debug:
        return draw(pagename, request)

    page = Page(request, pagename)

    # Create escaped query string from dict and params
    querystr = {'action': 'chart', 'type': 'useragents'}
    querystr = wikiutil.makeQueryString(querystr)
    querystr = wikiutil.escape(querystr)
    if params:
        querystr += '&amp;' + params

    data = {'url': page.url(request, querystr)}
    data.update(request.cfg.chart_options)
    result = ('<img src="%(url)s" width="%(width)d" height="%(height)d"'
              ' alt="useragents chart">') % data

    return result
コード例 #18
0
    def pageListWithContext(self, request, formatter, info=1, context=180,
                            maxlines=1, paging=True, hitsFrom=0, hitsInfo=0,
                            highlight_titles=True, highlight_pages=True):
        """ Format a list of found pages with context

        @param request: current request
        @param formatter: formatter to use
        @param info: show match info near the page link
        @param context: how many characters to show around each match.
        @param maxlines: how many contexts lines to show.
        @param paging: toggle paging
        @param hitsFrom: current position in the hits
        @param hitsInfo: toggle hits info line
        @param highlight_titles: perform highlighting in page list
        @param highlight_pages: add highlight parameter to page URLs
        @rtype: unicode
        @return formatted page list with context
        """
        ngowikiutil = NgoWikiUtil(request)
        ngowikiutil.open_database()

        self._reset(request, formatter)
        f = formatter
        write = self.buffer.write
        _ = request.getText

        if paging and len(self.hits) <= request.cfg.search_results_per_page:
            paging = False

        # Add pages formatted as definition list
        hitsTo = hitsFrom + request.cfg.search_results_per_page + 1
        hitDiff = 0
        if self.hits:
            write(f.definition_list(1))

            displayHits = []
            hitIdx = 0
            for hit in self.hits:
                result = ngowikiutil.select_page_by_path(hit.page_name)
                if result == None:
                    hitDiff = hitDiff + 1
                    continue
                if len(ngowikiutil.select_page_tags_by_id(result["id"])) == 0:
                    hitDiff = hitDiff + 1
                    continue
                hitIdx = hitIdx + 1
                if paging:
                    if hitsTo <= hitIdx - 1:
                        break
                    if hitsFrom <= hitIdx - 1:
                        displayHits.append(hit)
                else:
                    displayHits.append(hit)
            if len(displayHits) <= request.cfg.search_results_per_page and hitsFrom == 0:
                paging = False
            if len(displayHits) > request.cfg.search_results_per_page:
                displayHits = displayHits[0:request.cfg.search_results_per_page]

            if len(displayHits) == 0:
                write(u'没有找到相关内容,请调整搜索条件重新搜索')

            template = '''
                <table class="listitem_with_logosummary">
                    <tr>
                        <!--
                        <td class="logo">
                            %(logo)s
                        </td>
                        -->
                        <td>
                           <div class="title">
                              <a href="%(link)s">%(title)s</a>
                           </div>
                           <div class="meta">
                               <span>%(lastmodified)s</span>
                               <span>%(tags)s</span>
                               <span><span class="metaitem">%(likecount)s<span></span>
                               <span><span class="metaitem">%(commentcount)s<span></span>
                               <span><span class="metaitem">%(hitcount)s<span></span>
                           </div>
                           <div class="summary">%(summary)s</div>
                        </td>
                     </tr>
                 </table>
                '''
            for page in displayHits:
                result = ngowikiutil.select_page_by_path(page.page_name)
                if result != None:
                    if len(ngowikiutil.select_page_tags_by_id(result["id"])) > 0:
                        page = Page(request, result["path"]) 
                        logo = '<div class="logo defaultLogo">&nbsp;</div>'
                        if len(result["logo"]) > 0 and exists(request, result["path"], result["logo"]):
                            logo = '<img class="logo" src="' + getAttachUrl(result["path"], result["logo"], request) + '">'
                        link = page.url(request)
                        title = result["title"]
                        lastmodified = page.mtime_printable(request)
                        summary = result["summary"].replace(u"'''", "").replace(u"【请在此插入图片】", "").replace(u"【请在此插入图片,最多可插入9张】", "")

                        tags = (", ".join(
                                  map(lambda x: '<a href=\'javascript:add_filter_by_tag(' + json.dumps(x["tag"]) + ')\' >' + x["tag"] + '</a>', 
                                      filter(lambda x: x["type"] == 1, ngowikiutil.select_page_tags_by_id(result["id"]))
                                  )))

                        if len(tags) > 0:
                            tags = '<span class="metaitem">' + tags + '</span>'

                        write(template % {"logo":logo, "title": title, "link": link, "lastmodified": lastmodified, "tags": tags, "summary": summary, "likecount": u'\u559c\u6b22\uff1a' + str(result["likecount"]), "commentcount": u'\u8bc4\u8bba\u6570\uff1a' + str(result["commentcount"]), "hitcount": u'\u8bbf\u95ee\u91cf\uff1a' + str(result["hitcount"])})
            write(f.definition_list(0))
            if paging:
                write(self.formatPageLinks(hitsFrom=hitsFrom,
                    hitsPerPage=request.cfg.search_results_per_page,
                    hitsNum=len(self.hits) - hitDiff))

        return self.getvalue()
コード例 #19
0
ファイル: Include.py プロジェクト: aahlad/soar
def execute(macro, text, args_re=re.compile(_args_re_pattern), title_re=re.compile(_title_re, re.M)):
    request = macro.request
    _ = request.getText

    # return immediately if getting links for the current page
    if request.mode_getpagelinks:
        return ''

    # parse and check arguments
    args = text and args_re.match(text)
    if not args:
        return (_sysmsg % ('error', _('Invalid include arguments "%s"!')) % (text, ))

    # prepare including page
    result = []
    print_mode = request.action in ("print", "format")
    this_page = macro.formatter.page
    if not hasattr(this_page, '_macroInclude_pagelist'):
        this_page._macroInclude_pagelist = {}

    # get list of pages to include
    inc_name = wikiutil.AbsPageName(this_page.page_name, args.group('name'))
    pagelist = [inc_name]
    if inc_name.startswith("^"):
        try:
            inc_match = re.compile(inc_name)
        except re.error:
            pass # treat as plain page name
        else:
            # Get user filtered readable page list
            pagelist = request.rootpage.getPageList(filter=inc_match.match)

    # sort and limit page list
    pagelist.sort()
    sort_dir = args.group('sort')
    if sort_dir == 'descending':
        pagelist.reverse()
    max_items = args.group('items')
    if max_items:
        pagelist = pagelist[:int(max_items)]

    skipitems = 0
    if args.group("skipitems"):
        skipitems = int(args.group("skipitems"))
    titlesonly = args.group('titlesonly')
    editlink = args.group('editlink')

    # iterate over pages
    for inc_name in pagelist:
        if not request.user.may.read(inc_name):
            continue
        if inc_name in this_page._macroInclude_pagelist:
            result.append(u'<p><strong class="error">Recursive include of "%s" forbidden</strong></p>' % (inc_name, ))
            continue
        if skipitems:
            skipitems -= 1
            continue
        fmt = macro.formatter.__class__(request, is_included=True)
        fmt._base_depth = macro.formatter._base_depth
        inc_page = Page(request, inc_name, formatter=fmt)
        if not inc_page.exists():
            continue
        inc_page._macroInclude_pagelist = this_page._macroInclude_pagelist

        # check for "from" and "to" arguments (allowing partial includes)
        body = inc_page.get_raw_body() + '\n'
        from_pos = 0
        to_pos = -1
        from_re = args.group('from')
        if from_re:
            try:
                from_match = re.compile(from_re, re.M).search(body)
            except re.error:
                ##result.append("*** fe=%s ***" % e)
                from_match = re.compile(re.escape(from_re), re.M).search(body)
            if from_match:
                from_pos = from_match.end()
            else:
                result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % from_re)
        to_re = args.group('to')
        if to_re:
            try:
                to_match = re.compile(to_re, re.M).search(body, from_pos)
            except re.error:
                to_match = re.compile(re.escape(to_re), re.M).search(body, from_pos)
            if to_match:
                to_pos = to_match.start()
            else:
                result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % to_re)

        if titlesonly:
            levelstack = []
            for title, level in extract_titles(body[from_pos:to_pos], title_re):
                if levelstack:
                    if level > levelstack[-1]:
                        result.append(macro.formatter.bullet_list(1))
                        levelstack.append(level)
                    else:
                        while levelstack and level < levelstack[-1]:
                            result.append(macro.formatter.bullet_list(0))
                            levelstack.pop()
                        if not levelstack or level != levelstack[-1]:
                            result.append(macro.formatter.bullet_list(1))
                            levelstack.append(level)
                else:
                    result.append(macro.formatter.bullet_list(1))
                    levelstack.append(level)
                result.append(macro.formatter.listitem(1))
                result.append(inc_page.link_to(request, title))
                result.append(macro.formatter.listitem(0))
            while levelstack:
                result.append(macro.formatter.bullet_list(0))
                levelstack.pop()
            continue

        if from_pos or to_pos != -1:
            inc_page.set_raw_body(body[from_pos:to_pos], modified=True)
        ##result.append("*** f=%s t=%s ***" % (from_re, to_re))
        ##result.append("*** f=%d t=%d ***" % (from_pos, to_pos))

        if not hasattr(request, "_Include_backto"):
            request._Include_backto = this_page.page_name

        # do headings
        level = None
        if args.group('heading') and args.group('hquote'):
            heading = args.group('htext') or inc_page.split_title()
            level = 1
            if args.group('level'):
                level = int(args.group('level'))
            if print_mode:
                result.append(macro.formatter.heading(1, level) +
                              macro.formatter.text(heading) +
                              macro.formatter.heading(0, level))
            else:
                url = inc_page.url(request)
                result.extend([
                    macro.formatter.heading(1, level, id=heading),
                    macro.formatter.url(1, url, css="include-heading-link"),
                    macro.formatter.text(heading),
                    macro.formatter.url(0),
                    macro.formatter.heading(0, level),
                ])

        # set or increment include marker
        this_page._macroInclude_pagelist[inc_name] = \
            this_page._macroInclude_pagelist.get(inc_name, 0) + 1

        # output the included page
        strfile = StringIO.StringIO()
        request.redirect(strfile)
        try:
            inc_page.send_page(content_only=True,
                               omit_footnotes=True,
                               count_hit=False)
            result.append(strfile.getvalue())
        finally:
            request.redirect()

        # decrement or remove include marker
        if this_page._macroInclude_pagelist[inc_name] > 1:
            this_page._macroInclude_pagelist[inc_name] = \
                this_page._macroInclude_pagelist[inc_name] - 1
        else:
            del this_page._macroInclude_pagelist[inc_name]

        # if no heading and not in print mode, then output a helper link
        if editlink and not (level or print_mode):
            result.extend([
                macro.formatter.div(1, css_class="include-link"),
                inc_page.link_to(request, '[%s]' % (inc_name, ), css_class="include-page-link"),
                inc_page.link_to(request, '[%s]' % (_('edit'), ), css_class="include-edit-link", querystr={'action': 'edit', 'backto': request._Include_backto}),
                macro.formatter.div(0),
            ])
        # XXX page.link_to is wrong now, it escapes the edit_icon html as it escapes normal text

    # return include text
    return ''.join(result)
コード例 #20
0
ファイル: h1ds.py プロジェクト: dpretty/h1ds
    def send_title(self, text, **keywords):
        """
        Output the page header (and title).

        @param text: the title text
        @keyword page: the page instance that called us - using this is more efficient than using pagename..
        @keyword pagename: 'PageName'
        @keyword print_mode: 1 (or 0)
        @keyword editor_mode: 1 (or 0)
        @keyword media: css media type, defaults to 'screen'
        @keyword allow_doubleclick: 1 (or 0)
        @keyword html_head: additional <head> code
        @keyword body_attr: additional <body> attributes
        @keyword body_onload: additional "onload" JavaScript code
        """
        request = self.request
        _ = request.getText
        rev = request.rev

        if keywords.has_key('page'):
            page = keywords['page']
            pagename = page.page_name
        else:
            pagename = keywords.get('pagename', '')
            page = Page(request, pagename)
        if keywords.get('msg', ''):
            raise DeprecationWarning(
                "Using send_page(msg=) is deprecated! Use theme.add_msg() instead!"
            )
        scriptname = request.script_root

        # get name of system pages
        page_front_page = wikiutil.getFrontPage(request).page_name
        page_help_contents = wikiutil.getLocalizedPage(
            request, 'HelpContents').page_name
        page_title_index = wikiutil.getLocalizedPage(request,
                                                     'TitleIndex').page_name
        page_site_navigation = wikiutil.getLocalizedPage(
            request, 'SiteNavigation').page_name
        page_word_index = wikiutil.getLocalizedPage(request,
                                                    'WordIndex').page_name
        page_help_formatting = wikiutil.getLocalizedPage(
            request, 'HelpOnFormatting').page_name
        page_find_page = wikiutil.getLocalizedPage(request,
                                                   'FindPage').page_name
        home_page = wikiutil.getInterwikiHomePage(
            request
        )  # sorry theme API change!!! Either None or tuple (wikiname,pagename) now.
        page_parent_page = getattr(page.getParentPage(), 'page_name', None)

        # Prepare the HTML <head> element
        user_head = [request.cfg.html_head]

        # include charset information - needed for moin_dump or any other case
        # when reading the html without a web server
        user_head.append(
            '''<meta http-equiv="Content-Type" content="%s;charset=%s">\n''' %
            (page.output_mimetype, page.output_charset))

        meta_keywords = request.getPragma('keywords')
        meta_desc = request.getPragma('description')
        if meta_keywords:
            user_head.append('<meta name="keywords" content="%s">\n' %
                             wikiutil.escape(meta_keywords, 1))
        if meta_desc:
            user_head.append('<meta name="description" content="%s">\n' %
                             wikiutil.escape(meta_desc, 1))

        # search engine precautions / optimization:
        # if it is an action or edit/search, send query headers (noindex,nofollow):
        if request.query_string:
            user_head.append(request.cfg.html_head_queries)
        elif request.method == 'POST':
            user_head.append(request.cfg.html_head_posts)
        # we don't want to have BadContent stuff indexed:
        elif pagename in [
                'BadContent',
                'LocalBadContent',
        ]:
            user_head.append(request.cfg.html_head_posts)
        # if it is a special page, index it and follow the links - we do it
        # for the original, English pages as well as for (the possibly
        # modified) frontpage:
        elif pagename in [
                page_front_page,
                request.cfg.page_front_page,
                page_title_index,
                'TitleIndex',
                page_find_page,
                'FindPage',
                page_site_navigation,
                'SiteNavigation',
                'RecentChanges',
        ]:
            user_head.append(request.cfg.html_head_index)
        # if it is a normal page, index it, but do not follow the links, because
        # there are a lot of illegal links (like actions) or duplicates:
        else:
            user_head.append(request.cfg.html_head_normal)

        if 'pi_refresh' in keywords and keywords['pi_refresh']:
            user_head.append(
                '<meta http-equiv="refresh" content="%d;URL=%s">' %
                keywords['pi_refresh'])

        # output buffering increases latency but increases throughput as well
        output = []
        # later: <html xmlns=\"http://www.w3.org/1999/xhtml\">
        output.append("""\
<!doctype html>
<!--[if lt IE 7]> <html class="no-js ie6 oldie" lang="en"> <![endif]-->
<!--[if IE 7]>    <html class="no-js ie7 oldie" lang="en"> <![endif]-->
<!--[if IE 8]>    <html class="no-js ie8 oldie" lang="en"> <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en"> <!--<![endif]-->
<head>
  <meta charset="utf-8">
  <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
  <title>%(title)s</title>
  <meta name="description" content="">
  <meta name="author" content="">
  <meta name="viewport" content="width=device-width,initial-scale=1">
  <link rel="stylesheet" href="%(static_url)scss/style.css">
  <script src="%(static_url)sjs/libs/moin.common.js"></script>
<script type="text/javascript">
<!--
var search_hint = "Search";
//-->
</script>
  <script src="%(static_url)sjs/libs/modernizr-2.0.6.min.js"></script>
""" % {
            'title': text,
            'static_url': django_settings.STATIC_URL
        })

        # Links
        output.append('<link rel="Start" href="%s">\n' %
                      request.href(page_front_page))
        if pagename:
            output.append(
                '<link rel="Alternate" title="%s" href="%s">\n' %
                (_('Wiki Markup'), request.href(pagename, action='raw')))
            output.append(
                '<link rel="Alternate" media="print" title="%s" href="%s">\n' %
                (_('Print View'), request.href(pagename, action='print')))

            # !!! currently disabled due to Mozilla link prefetching, see
            # http://www.mozilla.org/projects/netlib/Link_Prefetching_FAQ.html
            #~ all_pages = request.getPageList()
            #~ if all_pages:
            #~     try:
            #~         pos = all_pages.index(pagename)
            #~     except ValueError:
            #~         # this shopuld never happend in theory, but let's be sure
            #~         pass
            #~     else:
            #~         request.write('<link rel="First" href="%s/%s">\n' % (request.script_root, quoteWikinameURL(all_pages[0]))
            #~         if pos > 0:
            #~             request.write('<link rel="Previous" href="%s/%s">\n' % (request.script_root, quoteWikinameURL(all_pages[pos-1])))
            #~         if pos+1 < len(all_pages):
            #~             request.write('<link rel="Next" href="%s/%s">\n' % (request.script_root, quoteWikinameURL(all_pages[pos+1])))
            #~         request.write('<link rel="Last" href="%s/%s">\n' % (request.script_root, quoteWikinameURL(all_pages[-1])))

            if page_parent_page:
                output.append('<link rel="Up" href="%s">\n' %
                              request.href(page_parent_page))

        # write buffer because we call AttachFile
        request.write(''.join(output))
        output = []

        # XXX maybe this should be removed completely. moin emits all attachments as <link rel="Appendix" ...>
        # and it is at least questionable if this fits into the original intent of rel="Appendix".
        if pagename and request.user.may.read(pagename):
            from MoinMoin.action import AttachFile
            AttachFile.send_link_rel(request, pagename)

        output.extend([
            '<link rel="Search" href="%s">\n' % request.href(page_find_page),
            '<link rel="Index" href="%s">\n' % request.href(page_title_index),
            '<link rel="Glossary" href="%s">\n' %
            request.href(page_word_index),
            '<link rel="Help" href="%s">\n' %
            request.href(page_help_formatting),
        ])

        output.append("</head>\n")
        request.write(''.join(output))
        output = []

        # start the <body>
        bodyattr = []
        if keywords.has_key('body_attr'):
            bodyattr.append(' ')
            bodyattr.append(keywords['body_attr'])

        # Add doubleclick edit action
        if (pagename and keywords.get('allow_doubleclick', 0)
                and not keywords.get('print_mode', 0)
                and request.user.edit_on_doubleclick):
            if request.user.may.write(pagename):  # separating this gains speed
                url = page.url(request, {'action': 'edit'})
                bodyattr.append(''' ondblclick="location.href='%s'" ''' %
                                wikiutil.escape(url, True))

        # Set body to the user interface language and direction
        bodyattr.append(' %s' % self.ui_lang_attr())

        body_onload = keywords.get('body_onload', '')
        if body_onload:
            bodyattr.append(''' onload="%s"''' % body_onload)
        output.append('\n<body%s>\n' % ''.join(bodyattr))

        # Output -----------------------------------------------------------

        # If in print mode, start page div and emit the title
        if keywords.get('print_mode', 0):
            d = {
                'title_text': text,
                'page': page,
                'page_name': pagename or '',
                'rev': rev,
            }
            request.themedict = d
            output.append(self.startPage())
            output.append(self.interwiki(d))
            output.append(self.title(d))

        # In standard mode, emit theme.header
        else:
            exists = pagename and page.exists(includeDeleted=True)
            # prepare dict for theme code:
            d = {
                'theme': self.name,
                'script_name': scriptname,
                'title_text': text,
                'logo_string': request.cfg.logo_string,
                'site_name': request.cfg.sitename,
                'page': page,
                'rev': rev,
                'pagesize': pagename and page.size() or 0,
                # exists checked to avoid creation of empty edit-log for non-existing pages
                'last_edit_info': exists and page.lastEditInfo() or '',
                'page_name': pagename or '',
                'page_find_page': page_find_page,
                'page_front_page': page_front_page,
                'home_page': home_page,
                'page_help_contents': page_help_contents,
                'page_help_formatting': page_help_formatting,
                'page_parent_page': page_parent_page,
                'page_title_index': page_title_index,
                'page_word_index': page_word_index,
                'user_name': request.user.name,
                'user_valid': request.user.valid,
                'msg': self._status,
                'trail': keywords.get('trail', None),
                # Discontinued keys, keep for a while for 3rd party theme developers
                'titlesearch': 'use self.searchform(d)',
                'textsearch': 'use self.searchform(d)',
                'navibar': ['use self.navibar(d)'],
                'available_actions':
                ['use self.request.availableActions(page)'],
            }

            # add quoted versions of pagenames
            newdict = {}
            for key in d:
                if key.startswith('page_'):
                    if not d[key] is None:
                        newdict['q_' + key] = wikiutil.quoteWikinameURL(d[key])
                    else:
                        newdict['q_' + key] = None
            d.update(newdict)
            request.themedict = d

            # now call the theming code to do the rendering
            if keywords.get('editor_mode', 0):
                output.append(self.editorheader(d))
            else:
                output.append(self.header(d))

        # emit it
        request.write(''.join(output))
        output = []
        self._send_title_called = True
コード例 #21
0
    def menu(self, d):
        """
        Build dropdown menu html. Incompatible with original actionsMenu() method.

        Menu can be customized by adding a config variable 'memodump_menuoverride'.
        The variable will override the default menu set.
        Additional menu definitions are given via config method 'memodump_menu_def(request)'.
        See the code below or project wiki for details.

        @param d: parameter dictionary
        @rtype: string
        @return: menu html
        """
        request = self.request
        _ = request.getText
        rev = request.rev
        page = d['page']

        page_recent_changes = wikiutil.getLocalizedPage(request, u'RecentChanges')
        page_find_page = wikiutil.getLocalizedPage(request, u'FindPage')
        page_help_contents = wikiutil.getLocalizedPage(request, u'HelpContents')
        page_help_formatting = wikiutil.getLocalizedPage(request, u'HelpOnFormatting')
        page_help_wikisyntax = wikiutil.getLocalizedPage(request, u'HelpOnMoinWikiSyntax')
        page_title_index = wikiutil.getLocalizedPage(request, u'TitleIndex')
        page_word_index = wikiutil.getLocalizedPage(request, u'WordIndex')
        page_front_page = wikiutil.getFrontPage(request)
        page_sidebar = Page(request, request.getPragma('sidebar', u'SideBar'))
        quicklink = self.menuQuickLink(page)
        subscribe = self.menuSubscribe(page)

        try:
            menu = request.cfg.memodump_menuoverride
        except AttributeError:
            # default list of items in dropdown menu.
            # menu items are assembled in this order.
            # see wiki for detailed info on customization.
            menu = [
                '===== Navigation =====',
                'RecentChanges',
                'FindPage',
                'LocalSiteMap',
                '__separator__',
                '===== Help =====',
                'HelpContents',
                'HelpOnMoinWikiSyntax',
                '__separator__',
                '===== Display =====',
                'AttachFile',
                'info',
                'raw',
                'print',
                '__separator__',
                '===== Edit =====',
                'RenamePage',
                'DeletePage',
                'revert',
                'CopyPage',
                'Load',
                'Save',
                'Despam',
                'editSideBar',
                '__separator__',
                '===== User =====',
                'quicklink',
                'subscribe',
            ]

        # menu element definitions
        menu_def = {
            'raw': {
                # Title for this menu entry
                'title': _('Raw Text'),
                # href and args are for normal entries ('special': False), otherwise ignored.
                # 'href': Nonexistent or empty for current page
                'href': '',
                # 'args': {'query1': 'value1', 'query2': 'value2', }
                # Optionally specify this for <a href="href?query1=value1&query2=value2">
                # If href and args are both nonexistent or empty, key is automatically interpreted to be an action name
                # and href and args are automatically set.
                'args': '',
                # 'special' can be:
                #   'disabled', 'removed', 'separator' or 'header' for whatever they say,
                #    False, None or nonexistent for normal menu display.
                # 'separator' and 'header' are automatically removed when there are no entries to show among them.
                'special': False,
            },
            'print': {'title': _('Print View'), },
            'refresh': {
                'title': _('Delete Cache'),
                'special': not (self.memodumpIsAvailableAction(page, 'refresh') and page.canUseCache()) and 'removed',
            },
            'SpellCheck': {'title': _('Check Spelling'), },
            'RenamePage': {'title': _('Rename Page'), },
            'CopyPage':   {'title': _('Copy Page'), },
            'DeletePage': {'title': _('Delete Page'), },
            'LikePages':  {'title': _('Like Pages'), },
            'LocalSiteMap': {'title': _('Local Site Map'), },
            'MyPages':    {'title': _('My Pages'), },
            'SubscribeUser': {
                'title': _('Subscribe User'),
                'special': not (self.memodumpIsAvailableAction(page, 'SubscribeUser')
                                and request.user.may.admin(page.page_name)) and 'removed',
            },
            'Despam': {
                'title': _('Remove Spam'),
                'special': not (self.memodumpIsAvailableAction(page, 'Despam') and request.user.isSuperUser()) and 'removed',
            },
            'revert': {
                'title': _('Revert to this revision'),
                'special': not (self.memodumpIsAvailableAction(page, 'revert')
                                and rev
                                and request.user.may.revert(page.page_name)) and 'removed',
            },
            'PackagePages': {'title': _('Package Pages'), },
            'RenderAsDocbook': {'title': _('Render as Docbook'), },
            'SyncPages': {'title': _('Sync Pages'), },
            'AttachFile': {'title': _('Attachments'), },
            'quicklink': {
                'title': quicklink[1], 'args': dict(action=quicklink[0], rev=rev),
                'special': not quicklink[0] and 'removed',
            },
            'subscribe': {
                'title': subscribe[1], 'args': dict(action=subscribe[0], rev=rev),
                'special': not subscribe[0] and 'removed',
            },
            'info': {'title': _('Info'), },
# menu items not in menu_def will be assumed to be action names,
# and receive appropriate title, href, and args automatically.
#           'Load': {'title': _('Load'), },
#           'Save': {'title': _('Save'), },
            # menu decorations
            '__separator__':   {'title': _('------------------------'), 'special': 'separator', },
            '----':            {'title': _('------------------------'), 'special': 'separator', },
            '-----':           {'title': _('------------------------'), 'special': 'separator', },
            '------':          {'title': _('------------------------'), 'special': 'separator', },
            '-------':         {'title': _('------------------------'), 'special': 'separator', },
            '--------':        {'title': _('------------------------'), 'special': 'separator', },
            '---------':       {'title': _('------------------------'), 'special': 'separator', },
            '----------':      {'title': _('------------------------'), 'special': 'separator', },
            # header example
            '__title_navigation__': {'title': _('Navigation'), 'special': 'header', },
            # useful pages
            'RecentChanges':   {'title': page_recent_changes.page_name, 'href': page_recent_changes.url(request)},
            'FindPage':        {'title': page_find_page.page_name, 'href': page_find_page.url(request)},
            'HelpContents':    {'title': page_help_contents.page_name, 'href': page_help_contents.url(request)},
            'HelpOnFormatting': {'title': page_help_formatting.page_name, 'href': page_help_formatting.url(request)},
            'HelpOnMoinWikiSyntax': {'title': page_help_wikisyntax.page_name, 'href': page_help_wikisyntax.url(request)},
            'TitleIndex':      {'title': page_title_index.page_name, 'href': page_title_index.url(request)},
            'WordIndex':       {'title': page_word_index.page_name, 'href': page_word_index.url(request)},
            'FrontPage':       {'title': page_front_page.page_name, 'href': page_front_page.url(request)},
            'SideBar':         {'title': page_sidebar.page_name, 'href': page_sidebar.url(request)},
            'editSideBar': {
                'title': _('Edit SideBar'), 'href': page_sidebar.url(request),
                'args': dict(action='edit'),
                'special': not self.memodumpIsEditablePage(page_sidebar) and 'removed'
            },
        }

        # register state determining functions on request for use in config
        request.memodumpIsAvailableAction = self.memodumpIsAvailableAction
        request.memodumpIsEditablePage = self.memodumpIsEditablePage

        try:
            menu_def.update(request.cfg.memodump_menu_def(request))
        except AttributeError:
            pass

        compiled = self.menuCompile(d, menu, menu_def)
        menubody = self.menuRender(compiled)

        if menubody:
            html = u'''
            <li class="dropdown">
              <!-- Menu button -->
              <a href="#" class="menu-nav-menu dropdown-toggle" data-toggle="dropdown">
                %s<span class="padding"></span><span class="caret"></span>
              </a>
              <!-- Dropdown contents -->
              <ul class="dropdown-menu">
%s
              </ul>
            </li> <!-- /dropdown -->
''' % (_('Menu'), menubody)
        else:
            html = u''

        return html
コード例 #22
0
def send_title(request, text, **keywords):
    """
    Output the page header (and title).

    TODO: check all code that call us and add page keyword for the
    current page being rendered.
    
    @param request: the request object
    @param text: the title text
    @keyword link: URL for the title
    @keyword msg: additional message (after saving)
    @keyword pagename: 'PageName'
    @keyword page: the page instance that called us.
    @keyword print_mode: 1 (or 0)
    @keyword media: css media type, defaults to 'screen'
    @keyword allow_doubleclick: 1 (or 0)
    @keyword html_head: additional <head> code
    @keyword body_attr: additional <body> attributes
    @keyword body_onload: additional "onload" JavaScript code
    """
    from MoinMoin.Page import Page
    _ = request.getText
    
    if keywords.has_key('page'):
        page = keywords['page']
        pagename = page.page_name
    else:
        pagename = keywords.get('pagename', '')
        page = Page(request, pagename)
    
    scriptname = request.getScriptname()
    pagename_quoted = quoteWikinameURL(pagename)

    # get name of system pages
    page_front_page = getFrontPage(request).page_name
    page_help_contents = getSysPage(request, 'HelpContents').page_name
    page_title_index = getSysPage(request, 'TitleIndex').page_name
    page_word_index = getSysPage(request, 'WordIndex').page_name
    page_user_prefs = getSysPage(request, 'UserPreferences').page_name
    page_help_formatting = getSysPage(request, 'HelpOnFormatting').page_name
    page_find_page = getSysPage(request, 'FindPage').page_name
    page_home_page = getattr(getHomePage(request), 'page_name', None)
    page_parent_page = getattr(page.getParentPage(), 'page_name', None)
    
    # Prepare the HTML <head> element
    user_head = [request.cfg.html_head]

    # include charset information - needed for moin_dump or any other case
    # when reading the html without a web server
    user_head.append('''<meta http-equiv="Content-Type" content="text/html;charset=%s">\n''' % config.charset)

    meta_keywords = request.getPragma('keywords')
    meta_desc = request.getPragma('description')
    if meta_keywords:
        user_head.append('<meta name="keywords" content="%s">\n' % escape(meta_keywords, 1))
    if meta_desc:
        user_head.append('<meta name="description" content="%s">\n' % escape(meta_desc, 1))

    # search engine precautions / optimization:
    # if it is an action or edit/search, send query headers (noindex,nofollow):
    if request.query_string:
        user_head.append(request.cfg.html_head_queries)
    elif request.request_method == 'POST':
        user_head.append(request.cfg.html_head_posts)
    # if it is a special page, index it and follow the links - we do it
    # for the original, English pages as well as for (the possibly
    # modified) frontpage:
    elif pagename in [page_front_page, request.cfg.page_front_page,
                      page_title_index, ]:
        user_head.append(request.cfg.html_head_index)
    # if it is a normal page, index it, but do not follow the links, because
    # there are a lot of illegal links (like actions) or duplicates:
    else:
        user_head.append(request.cfg.html_head_normal)

    if keywords.has_key('pi_refresh') and keywords['pi_refresh']:
        user_head.append('<meta http-equiv="refresh" content="%(delay)d;URL=%(url)s">' % keywords['pi_refresh'])
    
    # output buffering increases latency but increases throughput as well
    output = []
    # later: <html xmlns=\"http://www.w3.org/1999/xhtml\">
    output.append("""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
%s
%s
%s
""" % (
        ''.join(user_head),
        keywords.get('html_head', ''),
        request.theme.html_head({
            'title': escape(text),
            'sitename': escape(request.cfg.html_pagetitle or request.cfg.sitename),
            'print_mode': keywords.get('print_mode', False),
            'media': keywords.get('media', 'screen'),
        })
    ))

    # Links
    output.append('<link rel="Start" href="%s/%s">\n' % (scriptname, quoteWikinameURL(page_front_page)))
    if pagename:
        output.append('<link rel="Alternate" title="%s" href="%s/%s?action=raw">\n' % (
            _('Wiki Markup'), scriptname, pagename_quoted,))
        output.append('<link rel="Alternate" media="print" title="%s" href="%s/%s?action=print">\n' % (
            _('Print View'), scriptname, pagename_quoted,))

        # !!! currently disabled due to Mozilla link prefetching, see
        # http://www.mozilla.org/projects/netlib/Link_Prefetching_FAQ.html
        #~ all_pages = request.getPageList()
        #~ if all_pages:
        #~     try:
        #~         pos = all_pages.index(pagename)
        #~     except ValueError:
        #~         # this shopuld never happend in theory, but let's be sure
        #~         pass
        #~     else:
        #~         request.write('<link rel="First" href="%s/%s">\n' % (request.getScriptname(), quoteWikinameURL(all_pages[0]))
        #~         if pos > 0:
        #~             request.write('<link rel="Previous" href="%s/%s">\n' % (request.getScriptname(), quoteWikinameURL(all_pages[pos-1])))
        #~         if pos+1 < len(all_pages):
        #~             request.write('<link rel="Next" href="%s/%s">\n' % (request.getScriptname(), quoteWikinameURL(all_pages[pos+1])))
        #~         request.write('<link rel="Last" href="%s/%s">\n' % (request.getScriptname(), quoteWikinameURL(all_pages[-1])))

        if page_parent_page:
            output.append('<link rel="Up" href="%s/%s">\n' % (scriptname, quoteWikinameURL(page_parent_page)))

    # write buffer because we call AttachFile
    request.write(''.join(output))
    output = []

    if pagename:
        from MoinMoin.action import AttachFile
        AttachFile.send_link_rel(request, pagename)

    output.extend([
        '<link rel="Search" href="%s/%s">\n' % (scriptname, quoteWikinameURL(page_find_page)),
        '<link rel="Index" href="%s/%s">\n' % (scriptname, quoteWikinameURL(page_title_index)),
        '<link rel="Glossary" href="%s/%s">\n' % (scriptname, quoteWikinameURL(page_word_index)),
        '<link rel="Help" href="%s/%s">\n' % (scriptname, quoteWikinameURL(page_help_formatting)),
                  ])

    output.append("</head>\n")
    request.write(''.join(output))
    output = []
    request.flush()

    # start the <body>
    bodyattr = []
    if keywords.has_key('body_attr'):
        bodyattr.append(' ')
        bodyattr.append(keywords['body_attr'])

    # Add doubleclick edit action
    if (pagename and keywords.get('allow_doubleclick', 0) and
        not keywords.get('print_mode', 0) and
        request.user.edit_on_doubleclick):
        if request.user.may.write(pagename): # separating this gains speed
            querystr = escape(util.web.makeQueryString({'action': 'edit'}))
            # TODO: remove escape=0 in 1.4
            url = page.url(request, querystr, escape=0)
            bodyattr.append(''' ondblclick="location.href='%s'"''' % url)

    # Set body to the user interface language and direction
    bodyattr.append(' %s' % request.theme.ui_lang_attr())
    
    body_onload = keywords.get('body_onload', '')
    if body_onload:
        bodyattr.append(''' onload="%s"''' % body_onload)
    output.append('\n<body%s>\n' % ''.join(bodyattr))

    # Output -----------------------------------------------------------

    theme = request.theme
    
    # If in print mode, start page div and emit the title
    if keywords.get('print_mode', 0):
        d = {'title_text': text, 'title_link': None, 'page': page,}
        request.themedict = d
        output.append(theme.startPage())
        output.append(theme.title(d))      

    # In standard mode, emit theme.header
    else:
        # prepare dict for theme code:
        d = {
            'theme': theme.name,
            'script_name': scriptname,
            'title_text': text,
            'title_link': keywords.get('link', ''),
            'logo_string': request.cfg.logo_string,
            'site_name': request.cfg.sitename,
            'page': page,
            'pagesize': pagename and page.size() or 0,
            'last_edit_info': pagename and page.lastEditInfo() or '',
            'page_name': pagename or '',
            'page_find_page': page_find_page,
            'page_front_page': page_front_page,
            'page_home_page': page_home_page,
            'page_help_contents': page_help_contents,
            'page_help_formatting': page_help_formatting,
            'page_parent_page': page_parent_page,
            'page_title_index': page_title_index,
            'page_word_index': page_word_index,
            'page_user_prefs': page_user_prefs,
            'user_name': request.user.name,
            'user_valid': request.user.valid,
            'user_prefs': (page_user_prefs, request.user.name)[request.user.valid],
            'msg': keywords.get('msg', ''),
            'trail': keywords.get('trail', None),
            # Discontinued keys, keep for a while for 3rd party theme developers
            'titlesearch': 'use self.searchform(d)',
            'textsearch': 'use self.searchform(d)',
            'navibar': ['use self.navibar(d)'],
            'available_actions': ['use self.request.availableActions(page)'],
        }

        # add quoted versions of pagenames
        newdict = {}
        for key in d:
            if key.startswith('page_'):
                if not d[key] is None:
                    newdict['q_'+key] = quoteWikinameURL(d[key])
                else:
                    newdict['q_'+key] = None
        d.update(newdict)
        request.themedict = d

        # now call the theming code to do the rendering
        output.append(theme.header(d))
    
    # emit it
    request.write(''.join(output))
    output = []
    request.flush()
コード例 #23
0
ファイル: fullsearch.py プロジェクト: moinwiki/moin-legacy
def execute(pagename, request, fieldname='value', titlesearch=0):
    _ = request.getText
    titlesearch = isTitleSearch(request)

    # context is relevant only for full search
    if titlesearch:        
        context = 0      
    else:
        context = int(request.form.get('context', [0])[0])        
    
    # Get other form parameters
    needle = request.form.get(fieldname, [''])[0]
    case = int(request.form.get('case', [0])[0])
    regex = int(request.form.get('regex', [0])[0]) # no interface currently

    max_context = 1 # only show first `max_context` contexts XXX still unused

    # check for sensible search term
    striped = needle.strip()
    if len(striped) == 0:
        err = _('Please use a more selective search term instead '
                'of {{{"%s"}}}') % needle
        # send http headers
        request.http_headers()
        Page(request, pagename).send_page(request, msg=err) 
        return

    # search the pages
    from MoinMoin import search
    query = search.QueryParser(case=case, regex=regex,
                               titlesearch=titlesearch).parse_query(needle)
    results = search.searchPages(request, query)

    # directly show a single hit
    # XXX won't work with attachment search
    # improve if we have one...
    if len(results.hits) == 1:
        page = Page(request, results.hits[0].page_name)
        # TODO: remove escape=0 in 1.4
        url = page.url(request, querystr={'highlight': query.highlight_re()},
                       escape=0)
        request.http_redirect(url)
        raise MoinMoinNoFooter

    # send http headers
    request.http_headers()

    # This action generate data using the user language
    request.setContentLanguage(request.lang)

    # Setup for type of search
    if titlesearch:
        title = _('Title Search: "%s"')
        results.sortByPagename()
    else:
        title = _('Full Text Search: "%s"')
        results.sortByWeight() 

    wikiutil.send_title(request, title % needle, form=request.form,
                        pagename=pagename)
    
    # Start content (important for RTL support)
    formatter = Formatter(request)
    request.write(formatter.startContent("content"))

    # First search stats
    request.write(results.stats(request, formatter))

    # Then search results
    info = not titlesearch
    if context:
        output = results.pageListWithContext(request, formatter, info=info,
                                             context=context)
    else:
        output = results.pageList(request, formatter, info=info)        
    request.write(output)

    # End content and send footer
    request.write(formatter.endContent())
    wikiutil.send_footer(request, pagename, editable=0, showactions=0,
                         form=request.form)
コード例 #24
0
ファイル: h1ds.py プロジェクト: dpretty/h1ds
    def send_title(self, text, **keywords):
        """
        Output the page header (and title).

        @param text: the title text
        @keyword page: the page instance that called us - using this is more efficient than using pagename..
        @keyword pagename: 'PageName'
        @keyword print_mode: 1 (or 0)
        @keyword editor_mode: 1 (or 0)
        @keyword media: css media type, defaults to 'screen'
        @keyword allow_doubleclick: 1 (or 0)
        @keyword html_head: additional <head> code
        @keyword body_attr: additional <body> attributes
        @keyword body_onload: additional "onload" JavaScript code
        """
        request = self.request
        _ = request.getText
        rev = request.rev

        if keywords.has_key('page'):
            page = keywords['page']
            pagename = page.page_name
        else:
            pagename = keywords.get('pagename', '')
            page = Page(request, pagename)
        if keywords.get('msg', ''):
            raise DeprecationWarning("Using send_page(msg=) is deprecated! Use theme.add_msg() instead!")
        scriptname = request.script_root

        # get name of system pages
        page_front_page = wikiutil.getFrontPage(request).page_name
        page_help_contents = wikiutil.getLocalizedPage(request, 'HelpContents').page_name
        page_title_index = wikiutil.getLocalizedPage(request, 'TitleIndex').page_name
        page_site_navigation = wikiutil.getLocalizedPage(request, 'SiteNavigation').page_name
        page_word_index = wikiutil.getLocalizedPage(request, 'WordIndex').page_name
        page_help_formatting = wikiutil.getLocalizedPage(request, 'HelpOnFormatting').page_name
        page_find_page = wikiutil.getLocalizedPage(request, 'FindPage').page_name
        home_page = wikiutil.getInterwikiHomePage(request) # sorry theme API change!!! Either None or tuple (wikiname,pagename) now.
        page_parent_page = getattr(page.getParentPage(), 'page_name', None)

        # Prepare the HTML <head> element
        user_head = [request.cfg.html_head]

        # include charset information - needed for moin_dump or any other case
        # when reading the html without a web server
        user_head.append('''<meta http-equiv="Content-Type" content="%s;charset=%s">\n''' % (page.output_mimetype, page.output_charset))

        meta_keywords = request.getPragma('keywords')
        meta_desc = request.getPragma('description')
        if meta_keywords:
            user_head.append('<meta name="keywords" content="%s">\n' % wikiutil.escape(meta_keywords, 1))
        if meta_desc:
            user_head.append('<meta name="description" content="%s">\n' % wikiutil.escape(meta_desc, 1))

        # search engine precautions / optimization:
        # if it is an action or edit/search, send query headers (noindex,nofollow):
        if request.query_string:
            user_head.append(request.cfg.html_head_queries)
        elif request.method == 'POST':
            user_head.append(request.cfg.html_head_posts)
        # we don't want to have BadContent stuff indexed:
        elif pagename in ['BadContent', 'LocalBadContent', ]:
            user_head.append(request.cfg.html_head_posts)
        # if it is a special page, index it and follow the links - we do it
        # for the original, English pages as well as for (the possibly
        # modified) frontpage:
        elif pagename in [page_front_page, request.cfg.page_front_page,
                          page_title_index, 'TitleIndex',
                          page_find_page, 'FindPage',
                          page_site_navigation, 'SiteNavigation',
                          'RecentChanges', ]:
            user_head.append(request.cfg.html_head_index)
        # if it is a normal page, index it, but do not follow the links, because
        # there are a lot of illegal links (like actions) or duplicates:
        else:
            user_head.append(request.cfg.html_head_normal)

        if 'pi_refresh' in keywords and keywords['pi_refresh']:
            user_head.append('<meta http-equiv="refresh" content="%d;URL=%s">' % keywords['pi_refresh'])

        # output buffering increases latency but increases throughput as well
        output = []
        # later: <html xmlns=\"http://www.w3.org/1999/xhtml\">
        output.append("""\
<!doctype html>
<!--[if lt IE 7]> <html class="no-js ie6 oldie" lang="en"> <![endif]-->
<!--[if IE 7]>    <html class="no-js ie7 oldie" lang="en"> <![endif]-->
<!--[if IE 8]>    <html class="no-js ie8 oldie" lang="en"> <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en"> <!--<![endif]-->
<head>
  <meta charset="utf-8">
  <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
  <title>%(title)s</title>
  <meta name="description" content="">
  <meta name="author" content="">
  <meta name="viewport" content="width=device-width,initial-scale=1">
  <link rel="stylesheet" href="%(static_url)scss/style.css">
  <script src="%(static_url)sjs/libs/moin.common.js"></script>
<script type="text/javascript">
<!--
var search_hint = "Search";
//-->
</script>
  <script src="%(static_url)sjs/libs/modernizr-2.0.6.min.js"></script>
""" %{'title':text, 'static_url':django_settings.STATIC_URL})

        # Links
        output.append('<link rel="Start" href="%s">\n' % request.href(page_front_page))
        if pagename:
            output.append('<link rel="Alternate" title="%s" href="%s">\n' % (
                    _('Wiki Markup'), request.href(pagename, action='raw')))
            output.append('<link rel="Alternate" media="print" title="%s" href="%s">\n' % (
                    _('Print View'), request.href(pagename, action='print')))

            # !!! currently disabled due to Mozilla link prefetching, see
            # http://www.mozilla.org/projects/netlib/Link_Prefetching_FAQ.html
            #~ all_pages = request.getPageList()
            #~ if all_pages:
            #~     try:
            #~         pos = all_pages.index(pagename)
            #~     except ValueError:
            #~         # this shopuld never happend in theory, but let's be sure
            #~         pass
            #~     else:
            #~         request.write('<link rel="First" href="%s/%s">\n' % (request.script_root, quoteWikinameURL(all_pages[0]))
            #~         if pos > 0:
            #~             request.write('<link rel="Previous" href="%s/%s">\n' % (request.script_root, quoteWikinameURL(all_pages[pos-1])))
            #~         if pos+1 < len(all_pages):
            #~             request.write('<link rel="Next" href="%s/%s">\n' % (request.script_root, quoteWikinameURL(all_pages[pos+1])))
            #~         request.write('<link rel="Last" href="%s/%s">\n' % (request.script_root, quoteWikinameURL(all_pages[-1])))

            if page_parent_page:
                output.append('<link rel="Up" href="%s">\n' % request.href(page_parent_page))

        # write buffer because we call AttachFile
        request.write(''.join(output))
        output = []

        # XXX maybe this should be removed completely. moin emits all attachments as <link rel="Appendix" ...>
        # and it is at least questionable if this fits into the original intent of rel="Appendix".
        if pagename and request.user.may.read(pagename):
            from MoinMoin.action import AttachFile
            AttachFile.send_link_rel(request, pagename)

        output.extend([
            '<link rel="Search" href="%s">\n' % request.href(page_find_page),
            '<link rel="Index" href="%s">\n' % request.href(page_title_index),
            '<link rel="Glossary" href="%s">\n' % request.href(page_word_index),
            '<link rel="Help" href="%s">\n' % request.href(page_help_formatting),
                      ])

        output.append("</head>\n")
        request.write(''.join(output))
        output = []

        # start the <body>
        bodyattr = []
        if keywords.has_key('body_attr'):
            bodyattr.append(' ')
            bodyattr.append(keywords['body_attr'])

        # Add doubleclick edit action
        if (pagename and keywords.get('allow_doubleclick', 0) and
            not keywords.get('print_mode', 0) and
            request.user.edit_on_doubleclick):
            if request.user.may.write(pagename): # separating this gains speed
                url = page.url(request, {'action': 'edit'})
                bodyattr.append(''' ondblclick="location.href='%s'" ''' % wikiutil.escape(url, True))

        # Set body to the user interface language and direction
        bodyattr.append(' %s' % self.ui_lang_attr())

        body_onload = keywords.get('body_onload', '')
        if body_onload:
            bodyattr.append(''' onload="%s"''' % body_onload)
        output.append('\n<body%s>\n' % ''.join(bodyattr))

        # Output -----------------------------------------------------------

        # If in print mode, start page div and emit the title
        if keywords.get('print_mode', 0):
            d = {
                'title_text': text,
                'page': page,
                'page_name': pagename or '',
                'rev': rev,
            }
            request.themedict = d
            output.append(self.startPage())
            output.append(self.interwiki(d))
            output.append(self.title(d))

        # In standard mode, emit theme.header
        else:
            exists = pagename and page.exists(includeDeleted=True)
            # prepare dict for theme code:
            d = {
                'theme': self.name,
                'script_name': scriptname,
                'title_text': text,
                'logo_string': request.cfg.logo_string,
                'site_name': request.cfg.sitename,
                'page': page,
                'rev': rev,
                'pagesize': pagename and page.size() or 0,
                # exists checked to avoid creation of empty edit-log for non-existing pages
                'last_edit_info': exists and page.lastEditInfo() or '',
                'page_name': pagename or '',
                'page_find_page': page_find_page,
                'page_front_page': page_front_page,
                'home_page': home_page,
                'page_help_contents': page_help_contents,
                'page_help_formatting': page_help_formatting,
                'page_parent_page': page_parent_page,
                'page_title_index': page_title_index,
                'page_word_index': page_word_index,
                'user_name': request.user.name,
                'user_valid': request.user.valid,
                'msg': self._status,
                'trail': keywords.get('trail', None),
                # Discontinued keys, keep for a while for 3rd party theme developers
                'titlesearch': 'use self.searchform(d)',
                'textsearch': 'use self.searchform(d)',
                'navibar': ['use self.navibar(d)'],
                'available_actions': ['use self.request.availableActions(page)'],
            }

            # add quoted versions of pagenames
            newdict = {}
            for key in d:
                if key.startswith('page_'):
                    if not d[key] is None:
                        newdict['q_'+key] = wikiutil.quoteWikinameURL(d[key])
                    else:
                        newdict['q_'+key] = None
            d.update(newdict)
            request.themedict = d

            # now call the theming code to do the rendering
            if keywords.get('editor_mode', 0):
                output.append(self.editorheader(d))
            else:
                output.append(self.header(d))

        # emit it
        request.write(''.join(output))
        output = []
        self._send_title_called = True
コード例 #25
0
ファイル: alicia.py プロジェクト: danirus/moin-theme-alicia
    def get_menu(self, dic, user_items=None):
        req = self.request
        rev = req.rev
        _ = req.getText
        page = dic['page']

        page_recent_changes = getLocalizedPage(req, u'RecentChanges')
        page_find_page = getLocalizedPage(req, u'FindPage')
        page_help_contents = getLocalizedPage(req, u'HelpContents')
        page_help_formatting = getLocalizedPage(req, u'HelpOnFormatting')
        page_help_wikisyntax = getLocalizedPage(req, u'HelpOnMoinWikiSyntax')
        page_title_index = getLocalizedPage(req, u'TitleIndex')
        page_word_index = getLocalizedPage(req, u'WordIndex')
        page_front_page = wikiutil.getFrontPage(req)
        page_sidebar = Page(req, req.getPragma('sidebar', u'SideBar'))
        quicklink = self._menu_quick_link(page)
        subscribe = self._menu_subscribe(page)

        menu_cfg = {
            'raw': {
                # Title for this menu entry
                'title': _('Raw Text'),
                # href and args are for normal entries ('special': False),
                # otherwise ignored.
                # 'href': Nonexistent or empty for current page
                'href': '',
                # 'args': {'query1': 'value1', 'query2': 'value2', }
                # Optionally specify this for:
                #  <a href="href?query1=value1&query2=value2">
                # If href and args are both nonexistent or empty, key is
                # automatically interpreted to be an action name and href
                # and args are automatically set.
                'args': '',
                # 'special' can be:
                #   'disabled', 'removed', 'separator' or 'header' for
                #   whatever they say, False, None or nonexistent for
                #   normal menu display.
                # 'separator' and 'header' are automatically removed when
                # there are no entries to show among them.
                'special': False,
                'icon': 'fa fa-file-o'
            },
            'print': {
                'title': _('Print View'),
                'icon': 'fa fa-print'
            },
            'refresh': {
                'title':
                _('Delete Cache'),
                'special':
                not (self._is_available_action(page, 'refresh')
                     and page.canUseCache()) and 'removed',
                'icon':
                'fa fa-refresh'
            },
            'SpellCheck': {
                'title': _('Check Spelling'),
                'icon': 'fa fa-check-square-o'
            },
            'RenamePage': {
                'title': _('Rename Page'),
                'icon': 'fa fa-repeat'
            },
            'CopyPage': {
                'title': _('Copy Page'),
                'icon': 'fa fa-clone'
            },
            'DeletePage': {
                'title': _('Delete Page'),
                'icon': 'fa fa-trash'
            },
            'LikePages': {
                'title': _('Like Pages'),
                'icon': 'fa fa-thumbs-o-up'
            },
            'LocalSiteMap': {
                'title': _('Local Site Map'),
                'icon': 'fa fa-sitemap'
            },
            'MyPages': {
                'title': _('My Pages'),
                'icon': 'fa fa-newspaper-o'
            },
            'SubscribeUser': {
                'title':
                _('Subscribe User'),
                'special':
                not (self._is_available_action(page, 'SubscribeUser')
                     and req.user.may.admin(page.page_name)) and 'removed',
                'icon':
                'fa fa-envelope-o'
            },
            'Despam': {
                'title':
                _('Remove Spam'),
                'special':
                not (self._is_available_action(page, 'Despam')
                     and req.user.isSuperUser()) and 'removed',
                'icon':
                'fa fa-fire'
            },
            'revert': {
                'title':
                _('Revert to this revision'),
                'special':
                not (self._is_available_action(page, 'revert') and rev
                     and req.user.may.revert(page.page_name)) and 'removed',
                'icon':
                'fa fa-undo'
            },
            'PackagePages': {
                'title': _('Package Pages'),
                'icon': 'fa fa-suitcase'
            },
            'RenderAsDocbook': {
                'title': _('Render as Docbook'),
                'icon': 'fa fa-book'
            },
            'SyncPages': {
                'title': _('Sync Pages'),
                'icon': 'fa fa-refresh'
            },
            'AttachFile': {
                'title': _('Attachments'),
                'icon': 'fa fa-paperclip'
            },
            'quicklink': {
                'title': quicklink[1] or _('Quick Link'),
                'args': dict(action=quicklink[0], rev=rev),
                'special': not quicklink[0] and 'removed',
                'icon': 'fa fa-share'
            },
            'subscribe': {
                'title': subscribe[1] or _('Subscribe'),
                'args': dict(action=subscribe[0], rev=rev),
                'special': not subscribe[0] and 'removed',
                'icon': 'fa fa-envelope'
            },
            'info': {
                'title': _('Info'),
                'icon': 'fa fa-info-circle'
            },
            'Load': {
                'title': _('Load'),
                'icon': 'fa fa-upload'
            },
            'Save': {
                'title': _('Save'),
                'icon': 'fa fa-download'
            },
            # useful pages
            'RecentChanges': {
                'title': page_recent_changes.page_name,
                'href': page_recent_changes.url(req),
                'icon': 'fa fa-clock-o'
            },
            'FindPage': {
                'title': page_find_page.page_name,
                'href': page_find_page.url(req),
                'icon': 'fa fa-search'
            },
            'HelpContents': {
                'title': page_help_contents.page_name,
                'href': page_help_contents.url(req),
                'icon': 'fa fa-question-circle'
            },
            'HelpOnFormatting': {
                'title': page_help_formatting.page_name,
                'href': page_help_formatting.url(req),
                'icon': 'fa fa-question-circle'
            },
            'HelpOnMoinWikiSyntax': {
                'title': page_help_wikisyntax.page_name,
                'href': page_help_wikisyntax.url(req),
                'icon': 'fa fa-question-circle'
            },
            'TitleIndex': {
                'title': page_title_index.page_name,
                'href': page_title_index.url(req),
                'icon': 'fa fa-list'
            },
            'WordIndex': {
                'title': page_word_index.page_name,
                'href': page_word_index.url(req),
                'icon': 'fa fa-list'
            },
            'FrontPage': {
                'title': page_front_page.page_name,
                'href': page_front_page.url(req),
                'icon': 'fa fa-home'
            },
            'SideBar': {
                'title': page_sidebar.page_name,
                'href': page_sidebar.url(req),
                'icon': 'fa fa-columns'
            },
            'editSideBar': {
                'title': _('Edit SideBar'),
                'href': page_sidebar.url(req),
                'args': dict(action='edit'),
                'special': (not self._is_editable_page(page_sidebar)
                            and 'removed'),
                'icon': 'fa fa-columns'
            },
        }

        output = []

        for group in self._menu:
            _group = {'label': _(group['label']), 'entries': []}
            for entry in group['entries']:
                data = menu_cfg.get(entry)
                if data:
                    if data.get('special'):
                        _group['entries'].append(data)
                    else:
                        if not (data.get('href') or data.get('args')):
                            # It's an Action.
                            if self._is_available_action(page, entry):
                                query = self._get_query_string({
                                    'action': entry,
                                    'rev': rev
                                })
                                _entry = {
                                    'title': data.get('title', _(entry)),
                                    'href': u'%s%s' % (page.url(req), query),
                                    'icon': data['icon'],
                                }
                                _group['entries'].append(_entry)
                            else:
                                continue
                        else:
                            # A normal menu entry.
                            if not data.get('href'):
                                data['href'] = page.url(req)
                            if data.get('args'):
                                data['href'] = u'%s%s' % (
                                    data['href'],
                                    self._get_query_string(data['args']))
                            _group['entries'].append({
                                'title':
                                data.get('title', _(entry)),
                                'href':
                                data['href'],
                                'icon':
                                data['icon'],
                            })
            if _group['label'] == 'User':
                for k in user_items.keys():
                    if user_items[k]:
                        _group['entries'].append({
                            'title':
                            _(user_items[k]['title']),
                            'href':
                            user_items[k]['url'],
                            'icon':
                            user_items[k]['icon']
                        })
            output.append(_group)
        return output
コード例 #26
0
def execute(pagename, request):
    _ = request.getText

    def wr(fmt, *args):
        request.write(form_writer(fmt, *args))

    # This action generates data using the user language
    request.setContentLanguage(request.lang)
    form = values_to_form(request.values)

    if form.has_key('cancel'):
        request.reset()
        backto = form.get('backto', [None])[0]
        request.theme.add_msg(_('Edit was cancelled.'), "error")
        if backto:
            page = Page(request, backto)
            request.http_redirect(page.url(request))
            request.page = page

        request.page.send_page()
    elif form.has_key('save') or form.has_key('saveform'):
        if request.environ['REQUEST_METHOD'] != 'POST':
            request.page.send_page()
            return

        template = form.get('template', [None])[0]

        # MetaFormEdit is much closer to set_meta in function
        if form.has_key('saveform'):
            added, cleared = {pagename: dict()}, {pagename: list()}

            # Pre-create page if it does not exist, using the template specified
            if template:
                added[pagename]['gwikitemplate'] = template

            # Ignore form clutter
            ignore = set()
            files = dict()
            for key in request.files:
                ignore.add(key)
                _file = request.files.get(key)
                filename = _file.filename
                fileobj = _file.stream

#                    if type(fileobj) != file:
#                        continue

                banana = key.split(SEPARATOR)
                keys = files.setdefault(banana[0], dict())
                values = keys.setdefault(banana[1], list())
                values.append((filename, fileobj))

            keys = list()
            for key in form:
                if key not in ignore and SEPARATOR in key:
                    keys.append(key.split(SEPARATOR)[1])
#            keys = [x.split(SEPARATOR)[1] for x in form if SEPARATOR in x]

            for key in keys:
                cleared[pagename].append(key)
                vals = [x.strip() for x in form[pagename + SEPARATOR + key]
                            if x.strip()]
                if vals:
                    added[pagename].setdefault(key, list()).extend(vals)

            msgs = list()
            # Add attachments
            for pname in files:
                for key in files[pname]:
                    for value in files[pname][key]:
                        name = value[0]
                        try:
                            t, s = add_attachment(request, pname,
                                                  name, value[1])
                            added.setdefault(pname, dict()).setdefault(
                                key, list()).append("[[attachment:%s]]" % name)
                        except AttachmentAlreadyExists:
                            msgs = ["Attachment '%s' already exists." % name]

            _, msgss = set_metas(request, cleared, dict(), added)
            msgs.extend(msgss)

        else:
            # MetaEdit
            pages, msgs, files = parse_editform(request, form)

            if pages:
                saved_templates = False

                for page, _ in pages.iteritems():
                    # Save the template if needed
                    if not Page(request, page).exists() and template:
                        msgs.append(save_template(request, page, template))
                        saved_templates = True

                # If new pages were changed we need to redo parsing
                # the form to know what we really need to edit
                if saved_templates:
                    pages, newmsgs, files = parse_editform(request, form)

                for page, (oldMeta, newMeta) in pages.iteritems():
                    msgs.append('%s: ' % page +
                                edit_meta(request, page, oldMeta, newMeta))

                for page in files:
                    for key in files[page]:
                        name, content = files[page][key]
                        t, s = add_attachment(request, page, name, content)
            else:
                msgs.append(request.getText('No pages changed'))

        msg = ''
        for line in msgs:
            msg += line + request.formatter.linebreak(0)

        request.reset()
        backto = form.get('backto', [None])[0]
        if backto:
            page = Page(request, backto)
            request.http_redirect(page.url(request))
        else:
            page = Page(request, pagename)

        request.theme.add_msg(msg)
        request.page = page
        page.send_page()
    elif form.has_key('args'):
        enter_page(request, pagename, 'Metatable editor')
        formatter = request.page.formatter

        request.write(formatter.heading(1, 2))
        request.write(formatter.text(_("Edit metatable")))
        request.write(formatter.heading(0, 2))
        args = ', '.join(form['args'])
        show_editform(wr, request, pagename, args)

        exit_page(request, pagename)
    else:
        enter_page(request, pagename, 'Metatable editor')
        formatter = request.page.formatter

        request.write(formatter.heading(1, 2))
        request.write(formatter.text(_("Edit current page")))
        request.write(formatter.heading(0, 2))
        show_editform(wr, request, pagename, pagename)

        request.write(formatter.heading(1, 2))
        request.write(formatter.text(_("Edit metatable")))
        request.write(formatter.heading(0, 2))
        show_queryform(wr, request, pagename)

        exit_page(request, pagename)
    return
コード例 #27
0
    def execute(self):
        ngowikiutil = NgoWikiUtil(self.request)
        ngowikiutil.open_database()
        try:
            offset = 0
            length = 10
            sortby = "lastmodified"
            order = "DESC"
            filterByTags = self.filterByTags
            favorite = None

            form = self.request.values
            if 'from' in form:
                offset = int(form['from'])
            if 'length' in form:
                length = int(form['length'])
            if 'sortby' in form:
                sortby = form['sortby']
                if sortby == 'title':
                    order = "ASC"
            if 'order' in form:
                order = form['order']
            if 'favorite' in form and form[
                    'favorite'] != "false" and self.user != None and self.user.valid:
                favorite = self.user.id
            if 'filterByTags' in form:
                filterByTags = form['filterByTags'].split(",")

            for tag in filterByTags:
                ngowikiutil.update_tag_hitcount(tag)

            results = ngowikiutil.select_pages_by_tag(filterByTags, favorite,
                                                      sortby, order, offset,
                                                      length)
            total = ngowikiutil.count_pages_by_tag(filterByTags, favorite)

            buffer = []
            buffer.append(
                '''
                <script language="javascript">window.__ListPagesByTag_filterByTag = %(filterByTags)s;window.__ListPagesByTag_filterByTag_default = %(filterByTagsDefault)s;</script>
                <div id="listpagesbytag_sorter"></div>
				<div id="listpagesbytag_favorite"></div>
                <div id="listpagesbytag_filter"></div>
            ''' % {
                    "filterByTagsDefault": json.dumps(self.filterByTags),
                    "filterByTags": json.dumps(",".join(filterByTags))
                })

            template = '''
                <table class="listitem_with_logosummary">
                    <tr>
                        <!--
                        <td class="logo">
                            %(logo)s
                        </td>
                        -->
                        <td>
                           <div class="title">
                              <a href="%(link)s">%(title)s</a>
                           </div>
                           <div class="meta">
                               <span>%(lastmodified)s</span>
                               <span>%(tags)s</span>
                               <span><span class="metaitem">%(likecount)s<span></span>
                               <span><span class="metaitem">%(commentcount)s<span></span>
                               <span><span class="metaitem">%(hitcount)s<span></span>
                           </div>
                           <div class="summary">%(summary)s</div>
                        </td>
                     </tr>
                 </table>
                '''
            for result in results:
                page = Page(self.request, result["path"])
                logo = '<div class="logo defaultLogo">&nbsp;</div>'
                if len(result["logo"]) > 0 and exists(
                        self.request, result["path"], result["logo"]):
                    logo = '<img class="logo" src="' + getAttachUrl(
                        result["path"], result["logo"], self.request) + '">'
                link = page.url(self.request)
                title = result["title"]
                lastmodified = page.mtime_printable(self.request)
                summary = result["summary"].replace("'''", "").replace(
                    u"【请在此插入图片】", "").replace(u"【请在此插入图片,最多可插入9张】", "")

                tags = (", ".join(
                    map(
                        lambda x: '<a href=\'javascript:add_filter_by_tag(' +
                        json.dumps(x["tag"]) + ')\' >' + x["tag"] + '</a>',
                        filter(
                            lambda x: x["type"] == 1 or x["type"] == 2,
                            ngowikiutil.select_page_tags_by_id(
                                result["id"])))))

                if len(tags) > 0:
                    tags = '<span class="metaitem">' + tags + '</span>'

                buffer.append(
                    template % {
                        "logo":
                        logo,
                        "title":
                        title,
                        "link":
                        link,
                        "lastmodified":
                        lastmodified,
                        "tags":
                        tags,
                        "summary":
                        summary,
                        "likecount":
                        u'\u559c\u6b22\uff1a' + str(result["likecount"]),
                        "commentcount":
                        u'\u8bc4\u8bba\u6570\uff1a' +
                        str(result["commentcount"]),
                        "hitcount":
                        u'\u8bbf\u95ee\u91cf\uff1a' + str(result["hitcount"])
                    })

            buffer.append("<script language='javascript'>render_pagingbar(" +
                          str(total) + ', ' + str(length) + ');</script>')
            ret = ''.join(buffer)
            return ret
        finally:
            ngowikiutil.close_database(True)
コード例 #28
0
ファイル: diff.py プロジェクト: aahlad/soar
def execute(pagename, request):
    """ Handle "action=diff"
        checking for either a "rev=formerrevision" parameter
        or rev1 and rev2 parameters
    """
    if not request.user.may.read(pagename):
        Page(request, pagename).send_page()
        return

    try:
        date = request.values['date']
        try:
            date = long(date)  # must be long for py 2.2.x
        except StandardError:
            date = 0
    except KeyError:
        date = 0

    try:
        rev1 = int(request.values.get('rev1', -1))
    except StandardError:
        rev1 = 0
    try:
        rev2 = int(request.values.get('rev2', 0))
    except StandardError:
        rev2 = 0

    if rev1 == -1 and rev2 == 0:
        rev1 = request.rev
        if rev1 is None:
            rev1 = -1

    # spacing flag?
    ignorews = int(request.values.get('ignorews', 0))

    _ = request.getText

    # get a list of old revisions, and back out if none are available
    currentpage = Page(request, pagename)
    currentrev = currentpage.current_rev()
    if currentrev < 2:
        request.theme.add_msg(_("No older revisions available!"), "error")
        currentpage.send_page()
        return

    if date:  # this is how we get called from RecentChanges
        rev1 = 0
        log = editlog.EditLog(request, rootpagename=pagename)
        for line in log.reverse():
            if date >= line.ed_time_usecs and int(line.rev) != 99999999:
                rev1 = int(line.rev)
                break
        else:
            rev1 = 1
        rev2 = 0

    if rev1 > 0 and rev2 > 0 and rev1 > rev2 or rev1 == 0 and rev2 > 0:
        rev1, rev2 = rev2, rev1

    if rev1 == -1:
        oldrev = currentrev - 1
        oldpage = Page(request, pagename, rev=oldrev)
    elif rev1 == 0:
        oldrev = currentrev
        oldpage = currentpage
    else:
        oldrev = rev1
        oldpage = Page(request, pagename, rev=oldrev)

    if rev2 == 0:
        newrev = currentrev
        newpage = currentpage
    else:
        newrev = rev2
        newpage = Page(request, pagename, rev=newrev)

    oldlog = oldpage.editlog_entry()
    newlog = newpage.editlog_entry()

    if not oldlog or not newlog:
        # We use "No log entries found." msg because we already have i18n
        # for that. Better would "At least one log entry was not found.".
        request.theme.add_msg(_("No log entries found."), "error")
        currentpage.send_page()
        return

    edit_count = abs(newrev - oldrev)

    # Start output
    # This action generates content in the user language
    request.setContentLanguage(request.lang)

    request.theme.send_title(_('Diff for "%s"') % (pagename, ),
                             pagename=pagename,
                             allow_doubleclick=1)

    f = request.formatter
    request.write(f.div(1, id="content"))

    oldrev = oldpage.get_real_rev()
    newrev = newpage.get_real_rev()

    title = _('Differences between revisions %d and %d') % (oldrev, newrev)
    if edit_count > 1:
        title += ' ' + _('(spanning %d versions)') % (edit_count, )
    title = f.text(title)

    page_url = wikiutil.escape(currentpage.url(request), True)

    def enabled(val):
        return not val and u' disabled="disabled"' or u''

    revert_html = ""
    if request.user.may.revert(pagename):
        revert_html = """
  <form action="%s" method="get">
   <div style="text-align:center">
    <input name="action" value="revert" type="hidden">
    <input name="rev" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 """ % (page_url, rev2, _("Revert to this revision"),
        enabled(newrev < currentrev))

    other_diff_button_html = """
 <td style="border:0;">
  <form action="%s" method="get">
   <div style="text-align:%s">
    <input name="action" value="diff" type="hidden">
    <input name="rev1" value="%d" type="hidden">
    <input name="rev2" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 </td>
"""

    navigation_html = """
<span class="diff-header">%%s</span>
<table class="diff">
<tr>
 %(button)s
 <td style="border:0">
   %%s
 </td>
 %(button)s
</tr>
</table>
""" % {
        'button': other_diff_button_html
    }

    prev_oldrev = (oldrev > 1) and (oldrev - 1) or 1
    next_oldrev = (oldrev < currentrev) and (oldrev + 1) or currentrev

    prev_newrev = (newrev > 1) and (newrev - 1) or 1
    next_newrev = (newrev < currentrev) and (newrev + 1) or currentrev

    navigation_html = navigation_html % (
        title,
        page_url,
        "left",
        prev_oldrev,
        oldrev,
        _("Previous change"),
        enabled(oldrev > 1),
        revert_html,
        page_url,
        "right",
        newrev,
        next_newrev,
        _("Next change"),
        enabled(newrev < currentrev),
    )

    request.write(f.rawHTML(navigation_html))

    def rev_nav_link(enabled, old_rev, new_rev, caption, css_classes,
                     enabled_title, disabled_title):
        if enabled:
            return currentpage.link_to(
                request,
                on=1,
                querystr={
                    'action': 'diff',
                    'rev1': old_rev,
                    'rev2': new_rev,
                },
                css_class="diff-nav-link %s" % css_classes,
                title=enabled_title) + request.formatter.text(
                    caption) + currentpage.link_to(request, on=0)
        else:
            return '<span class="diff-no-nav-link %(css_classes)s" title="%(disabled_title)s">%(caption)s</span>' % {
                'css_classes': css_classes,
                'disabled_title': disabled_title,
                'caption': caption,
            }

    rev_info_html = """
  <div class="diff-info diff-info-header">%%(rev_first_link)s %%(rev_prev_link)s %(rev_header)s %%(rev_next_link)s %%(rev_last_link)s</div>
  <div class="diff-info diff-info-rev-size"><span class="diff-info-caption">%(rev_size_caption)s:</span> <span class="diff-info-value">%%(rev_size)d</span></div>
  <div class="diff-info diff-info-rev-author"><span class="diff-info-caption">%(rev_author_caption)s:</span> <span class="diff-info-value">%%(rev_author)s</span></div>
  <div class="diff-info diff-info-rev-comment"><span class="diff-info-caption">%(rev_comment_caption)s:</span> <span class="diff-info-value">%%(rev_comment)s</span></div>
""" % {
        'rev_header': _('Revision %(rev)d as of %(date)s'),
        'rev_size_caption': _('Size'),
        'rev_author_caption': _('Editor'),
        'rev_ts_caption': _('Date'),
        'rev_comment_caption': _('Comment'),
    }

    rev_info_old_html = rev_info_html % {
        'rev_first_link':
        rev_nav_link(oldrev > 1, 1, newrev, u'\u21e4',
                     'diff-first-link diff-old-rev',
                     _('Diff with oldest revision in left pane'),
                     _("No older revision available for diff")),
        'rev_prev_link':
        rev_nav_link(oldrev > 1, prev_oldrev, newrev, u'\u2190',
                     'diff-prev-link diff-old-rev',
                     _('Diff with older revision in left pane'),
                     _("No older revision available for diff")),
        'rev_next_link':
        rev_nav_link(
            (oldrev < currentrev) and (next_oldrev < newrev), next_oldrev,
            newrev, u'\u2192', 'diff-next-link diff-old-rev',
            _('Diff with newer revision in left pane'),
            _("Can't change to revision newer than in right pane")),
        'rev_last_link':
        '',
        'rev':
        oldrev,
        'rev_size':
        oldpage.size(),
        'rev_author':
        oldlog.getEditor(request) or _('N/A'),
        'date':
        request.user.getFormattedDateTime(
            wikiutil.version2timestamp(oldlog.ed_time_usecs)) or _('N/A'),
        'rev_comment':
        wikiutil.escape(oldlog.comment) or '',
    }

    rev_info_new_html = rev_info_html % {
        'rev_first_link':
        '',
        'rev_prev_link':
        rev_nav_link(
            (newrev > 1) and (oldrev < prev_newrev), oldrev, prev_newrev,
            u'\u2190', 'diff-prev-link diff-new-rev',
            _('Diff with older revision in right pane'),
            _("Can't change to revision older than revision in left pane")),
        'rev_next_link':
        rev_nav_link(newrev < currentrev, oldrev, next_newrev, u'\u2192',
                     'diff-next-link diff-new-rev',
                     _('Diff with newer revision in right pane'),
                     _("No newer revision available for diff")),
        'rev_last_link':
        rev_nav_link(newrev < currentrev, oldrev, currentrev, u'\u21e5',
                     'diff-last-link diff-old-rev',
                     _('Diff with newest revision in right pane'),
                     _("No newer revision available for diff")),
        'rev':
        newrev,
        'rev_size':
        newpage.size(),
        'rev_author':
        newlog.getEditor(request) or _('N/A'),
        'date':
        request.user.getFormattedDateTime(
            wikiutil.version2timestamp(newlog.ed_time_usecs)) or _('N/A'),
        'rev_comment':
        wikiutil.escape(newlog.comment) or '',
    }

    if request.user.show_fancy_diff:
        from MoinMoin.util import diff_html
        request.write(
            f.rawHTML(
                diff_html.diff(request,
                               oldpage.get_raw_body(),
                               newpage.get_raw_body(),
                               old_top=rev_info_old_html,
                               new_top=rev_info_new_html,
                               old_top_class="diff-info",
                               new_top_class="diff-info")))
        newpage.send_page(count_hit=0,
                          content_only=1,
                          content_id="content-below-diff")
    else:
        request.write(
            f.rawHTML(
                '<table class="diff"><tr><td class="diff-info">%s</td><td class="diff-info">%s</td></tr></table>'
                % (rev_info_old_html, rev_info_new_html)))

        from MoinMoin.util import diff_text
        lines = diff_text.diff(oldpage.getlines(), newpage.getlines())
        if not lines:
            msg = f.text(" - " + _("No differences found!"))
            if edit_count > 1:
                msg = msg + f.paragraph(1) + f.text(
                    _('The page was saved %(count)d times, though!') %
                    {'count': edit_count}) + f.paragraph(0)
            request.write(msg)
        else:
            if ignorews:
                request.write(f.text(_('(ignoring whitespace)')),
                              f.linebreak())
            else:
                qstr = {
                    'action': 'diff',
                    'ignorews': '1',
                }
                if rev1:
                    qstr['rev1'] = str(rev1)
                if rev2:
                    qstr['rev2'] = str(rev2)
                request.write(
                    f.paragraph(1),
                    Page(request, pagename).link_to(
                        request,
                        text=_('Ignore changes in the amount of whitespace'),
                        querystr=qstr,
                        rel='nofollow'), f.paragraph(0))

            request.write(f.preformatted(1))
            for line in lines:
                if line[0] == "@":
                    request.write(f.rule(1))
                request.write(f.text(line + '\n'))
            request.write(f.preformatted(0))

    request.write(f.div(0))  # end content div
    request.theme.send_footer(pagename)
    request.theme.send_closing_html()
コード例 #29
0
ファイル: diff.py プロジェクト: Glottotopia/aagd
def execute(pagename, request):
    """ Handle "action=diff"
        checking for either a "rev=formerrevision" parameter
        or rev1 and rev2 parameters
    """
    if not request.user.may.read(pagename):
        Page(request, pagename).send_page()
        return

    try:
        date = request.values['date']
        try:
            date = long(date) # must be long for py 2.2.x
        except StandardError:
            date = 0
    except KeyError:
        date = 0

    try:
        rev1 = int(request.values.get('rev1', -1))
    except StandardError:
        rev1 = 0
    try:
        rev2 = int(request.values.get('rev2', 0))
    except StandardError:
        rev2 = 0

    if rev1 == -1 and rev2 == 0:
        rev1 = request.rev
        if rev1 is None:
            rev1 = -1

    # spacing flag?
    ignorews = int(request.values.get('ignorews', 0))

    _ = request.getText

    # get a list of old revisions, and back out if none are available
    currentpage = Page(request, pagename)
    currentrev = currentpage.current_rev()
    if currentrev < 2:
        request.theme.add_msg(_("No older revisions available!"), "error")
        currentpage.send_page()
        return

    if date: # this is how we get called from RecentChanges
        rev1 = 0
        log = editlog.EditLog(request, rootpagename=pagename)
        for line in log.reverse():
            if date >= line.ed_time_usecs and int(line.rev) != 99999999:
                rev1 = int(line.rev)
                break
        else:
            rev1 = 1
        rev2 = 0

    if rev1 > 0 and rev2 > 0 and rev1 > rev2 or rev1 == 0 and rev2 > 0:
        rev1, rev2 = rev2, rev1

    if rev1 == -1:
        oldrev = currentrev - 1
        oldpage = Page(request, pagename, rev=oldrev)
    elif rev1 == 0:
        oldrev = currentrev
        oldpage = currentpage
    else:
        oldrev = rev1
        oldpage = Page(request, pagename, rev=oldrev)

    if rev2 == 0:
        newrev = currentrev
        newpage = currentpage
    else:
        newrev = rev2
        newpage = Page(request, pagename, rev=newrev)

    oldlog = oldpage.editlog_entry()
    newlog = newpage.editlog_entry()

    if not oldlog or not newlog:
        # We use "No log entries found." msg because we already have i18n
        # for that. Better would "At least one log entry was not found.".
        request.theme.add_msg(_("No log entries found."), "error")
        currentpage.send_page()
        return

    edit_count = abs(newrev - oldrev)

    # Start output
    # This action generates content in the user language
    request.setContentLanguage(request.lang)

    request.theme.send_title(_('Diff for "%s"') % (pagename, ), pagename=pagename, allow_doubleclick=1)

    f = request.formatter
    request.write(f.div(1, id="content"))

    oldrev = oldpage.get_real_rev()
    newrev = newpage.get_real_rev()

    title = _('Differences between revisions %d and %d') % (oldrev, newrev)
    if edit_count > 1:
        title += ' ' + _('(spanning %d versions)') % (edit_count, )
    title = f.text(title)

    page_url = wikiutil.escape(currentpage.url(request), True)

    def enabled(val):
        return not val and u' disabled="disabled"' or u''

    revert_html = ""
    if request.user.may.revert(pagename):
        revert_html = """
  <form action="%s" method="get">
   <div style="text-align:center">
    <input name="action" value="revert" type="hidden">
    <input name="rev" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 """ % (page_url, rev2, _("Revert to this revision"), enabled(newrev < currentrev))

    other_diff_button_html = """
 <td style="border:0;">
  <form action="%s" method="get">
   <div style="text-align:%s">
    <input name="action" value="diff" type="hidden">
    <input name="rev1" value="%d" type="hidden">
    <input name="rev2" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 </td>
"""

    navigation_html = """
<span class="diff-header">%%s</span>
<table class="diff">
<tr>
 %(button)s
 <td style="border:0">
   %%s
 </td>
 %(button)s
</tr>
</table>
""" % {'button': other_diff_button_html}

    prev_oldrev = (oldrev > 1) and (oldrev - 1) or 1
    next_oldrev = (oldrev < currentrev) and (oldrev + 1) or currentrev

    prev_newrev = (newrev > 1) and (newrev - 1) or 1
    next_newrev = (newrev < currentrev) and (newrev + 1) or currentrev

    navigation_html = navigation_html % (title,
       page_url, "left", prev_oldrev, oldrev, _("Previous change"), enabled(oldrev > 1),
       revert_html,
       page_url, "right", newrev, next_newrev, _("Next change"), enabled(newrev < currentrev), )

    request.write(f.rawHTML(navigation_html))

    def rev_nav_link(enabled, old_rev, new_rev, caption, css_classes, enabled_title, disabled_title):
        if enabled:
            return currentpage.link_to(request, on=1, querystr={
                    'action': 'diff',
                    'rev1': old_rev,
                    'rev2': new_rev,
                    }, css_class="diff-nav-link %s" % css_classes, title=enabled_title) + request.formatter.text(caption) + currentpage.link_to(request, on=0)
        else:
            return '<span class="diff-no-nav-link %(css_classes)s" title="%(disabled_title)s">%(caption)s</span>' % {
                'css_classes': css_classes,
                'disabled_title': disabled_title,
                'caption': caption,
                }

    rev_info_html = """
  <div class="diff-info diff-info-header">%%(rev_first_link)s %%(rev_prev_link)s %(rev_header)s %%(rev_next_link)s %%(rev_last_link)s</div>
  <div class="diff-info diff-info-rev-size"><span class="diff-info-caption">%(rev_size_caption)s:</span> <span class="diff-info-value">%%(rev_size)d</span></div>
  <div class="diff-info diff-info-rev-author"><span class="diff-info-caption">%(rev_author_caption)s:</span> <span class="diff-info-value">%%(rev_author)s</span></div>
  <div class="diff-info diff-info-rev-comment"><span class="diff-info-caption">%(rev_comment_caption)s:</span> <span class="diff-info-value">%%(rev_comment)s</span></div>
""" % {
    'rev_header': _('Revision %(rev)d as of %(date)s'),
    'rev_size_caption': _('Size'),
    'rev_author_caption': _('Editor'),
    'rev_ts_caption': _('Date'),
    'rev_comment_caption': _('Comment'),
}

    rev_info_old_html = rev_info_html % {
        'rev_first_link': rev_nav_link(oldrev > 1, 1, newrev, u'\u21e4', 'diff-first-link diff-old-rev', _('Diff with oldest revision in left pane'), _("No older revision available for diff")),
        'rev_prev_link': rev_nav_link(oldrev > 1, prev_oldrev, newrev, u'\u2190', 'diff-prev-link diff-old-rev', _('Diff with older revision in left pane'), _("No older revision available for diff")),
        'rev_next_link': rev_nav_link((oldrev < currentrev) and (next_oldrev < newrev), next_oldrev, newrev, u'\u2192', 'diff-next-link diff-old-rev', _('Diff with newer revision in left pane'), _("Can't change to revision newer than in right pane")),
        'rev_last_link': '',
        'rev': oldrev,
        'rev_size': oldpage.size(),
        'rev_author': oldlog.getEditor(request) or _('N/A'),
        'date': request.user.getFormattedDateTime(wikiutil.version2timestamp(oldlog.ed_time_usecs)) or _('N/A'),
        'rev_comment': wikiutil.escape(oldlog.comment) or '',
    }

    rev_info_new_html = rev_info_html % {
        'rev_first_link': '',
        'rev_prev_link': rev_nav_link((newrev > 1) and (oldrev < prev_newrev), oldrev, prev_newrev, u'\u2190', 'diff-prev-link diff-new-rev', _('Diff with older revision in right pane'), _("Can't change to revision older than revision in left pane")),
        'rev_next_link': rev_nav_link(newrev < currentrev, oldrev, next_newrev, u'\u2192', 'diff-next-link diff-new-rev', _('Diff with newer revision in right pane'), _("No newer revision available for diff")),
        'rev_last_link': rev_nav_link(newrev < currentrev, oldrev, currentrev, u'\u21e5', 'diff-last-link diff-old-rev', _('Diff with newest revision in right pane'), _("No newer revision available for diff")),
        'rev': newrev,
        'rev_size': newpage.size(),
        'rev_author': newlog.getEditor(request) or _('N/A'),
        'date': request.user.getFormattedDateTime(wikiutil.version2timestamp(newlog.ed_time_usecs)) or _('N/A'),
        'rev_comment': wikiutil.escape(newlog.comment) or '',
    }

    if request.user.show_fancy_diff:
        from MoinMoin.util import diff_html
        request.write(f.rawHTML(diff_html.diff(request, oldpage.get_raw_body(), newpage.get_raw_body(), old_top=rev_info_old_html, new_top=rev_info_new_html, old_top_class="diff-info", new_top_class="diff-info")))
        newpage.send_page(count_hit=0, content_only=1, content_id="content-below-diff")
    else:
        request.write(f.rawHTML('<table class="diff"><tr><td class="diff-info">%s</td><td class="diff-info">%s</td></tr></table>' % (rev_info_old_html, rev_info_new_html)))

        from MoinMoin.util import diff_text
        lines = diff_text.diff(oldpage.getlines(), newpage.getlines())
        if not lines:
            msg = f.text(" - " + _("No differences found!"))
            if edit_count > 1:
                msg = msg + f.paragraph(1) + f.text(_('The page was saved %(count)d times, though!') % {
                    'count': edit_count}) + f.paragraph(0)
            request.write(msg)
        else:
            if ignorews:
                request.write(f.text(_('(ignoring whitespace)')), f.linebreak())
            else:
                qstr = {'action': 'diff', 'ignorews': '1', }
                if rev1:
                    qstr['rev1'] = str(rev1)
                if rev2:
                    qstr['rev2'] = str(rev2)
                request.write(f.paragraph(1), Page(request, pagename).link_to(request,
                    text=_('Ignore changes in the amount of whitespace'),
                    querystr=qstr, rel='nofollow'), f.paragraph(0))

            request.write(f.preformatted(1))
            for line in lines:
                if line[0] == "@":
                    request.write(f.rule(1))
                request.write(f.text(line + '\n'))
            request.write(f.preformatted(0))

    request.write(f.div(0)) # end content div
    request.theme.send_footer(pagename)
    request.theme.send_closing_html()
コード例 #30
0
ファイル: MonthCalendar.py プロジェクト: steveyen/moingo
    # european / US differences
    months = ('January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December')
    # Set things up for Monday or Sunday as the first day of the week
    if calendar.firstweekday() == calendar.MONDAY:
        wkend = (5, 6)
        wkdays = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
    if calendar.firstweekday() == calendar.SUNDAY:
        wkend = (0, 6)
        wkdays = ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat')

    colorstep = 85
    p = Page(request, thispage)
    qpagenames = '*'.join([wikiutil.quoteWikinameURL(pn) for pn in parmpagename])
    qtemplate = wikiutil.quoteWikinameURL(parmtemplate)
    querystr = "calparms=%%s,%d,%d,%d,%%d,,,%%s" % (parmyear, parmmonth, parmoffset)
    prevlink = p.url(request, querystr % (qpagenames, parmoffset2 - 1, qtemplate))
    nextlink = p.url(request, querystr % (qpagenames, parmoffset2 + 1, qtemplate))
    prevylink = p.url(request, querystr % (qpagenames, parmoffset2 - 12, qtemplate))
    nextylink = p.url(request, querystr % (qpagenames, parmoffset2 + 12, qtemplate))

    prevmonth = formatter.url(1, prevlink, 'cal-link', **attrs) + '&lt;' + formatter.url(0)
    nextmonth = formatter.url(1, nextlink, 'cal-link', **attrs) + '&gt;' + formatter.url(0)
    prevyear = formatter.url(1, prevylink, 'cal-link', **attrs) + '&lt;&lt;' + formatter.url(0)
    nextyear = formatter.url(1, nextylink, 'cal-link', **attrs) + '&gt;&gt;' + formatter.url(0)

    if parmpagename != [thispage]:
        pagelinks = ''
        r, g, b = (255, 0, 0)
        l = len(parmpagename[0])
        steps = len(parmpagename)
        maxsteps = (255 / colorstep)
コード例 #31
0
ファイル: text_rst.py プロジェクト: IvanLogvinov/soar
    def visit_reference(self, node):
        """
            Pass links to MoinMoin to get the correct wiki space url. Extract
            the url and pass it on to the html4css1 writer to handle. Inline
            images are also handled by visit_image. Not sure what the "drawing:"
            link scheme is used for, so for now it is handled here.

            Also included here is a hack to allow MoinMoin macros. This routine
            checks for a link which starts with "<<". This link is passed to the
            MoinMoin formatter and the resulting markup is inserted into the
            document in the place of the original link reference.
        """
        if 'refuri' in node.attributes:
            refuri = node['refuri']
            prefix = ''
            link = refuri
            if ':' in refuri:
                prefix, link = refuri.lstrip().split(':', 1)

            # First see if MoinMoin should handle completely. Exits through add_wiki_markup.
            if refuri.startswith('<<') and refuri.endswith('>>'): # moin macro
                self.process_wiki_text(refuri)
                self.wiki_text = self.fixup_wiki_formatting(self.wiki_text)
                self.add_wiki_markup()

            if prefix == 'drawing':
                self.process_wiki_text("[[%s]]" % refuri)
                self.wiki_text = self.fixup_wiki_formatting(self.wiki_text)
                self.add_wiki_markup()

            # From here down, all links are handled by docutils (except
            # missing attachments), just fixup node['refuri'].
            if prefix == 'attachment':
                if not AttachFile.exists(self.request, self.request.page.page_name, link):
                    # Attachment doesn't exist, give to MoinMoin to insert upload text.
                    self.process_wiki_text("[[%s]]" % refuri)
                    self.wiki_text = self.fixup_wiki_formatting(self.wiki_text)
                    self.add_wiki_markup()
                # Attachment exists, just get a link to it.
                node['refuri'] = AttachFile.getAttachUrl(self.request.page.page_name, link, self.request)
                if not [i for i in node.children if i.__class__ == docutils.nodes.image]:
                    node['classes'].append(prefix)
            elif prefix == 'wiki':
                wiki_name, page_name = wikiutil.split_interwiki(link)
                wikitag, wikiurl, wikitail, err = wikiutil.resolve_interwiki(self.request, wiki_name, page_name)
                wikiurl = wikiutil.mapURL(self.request, wikiurl)
                node['refuri'] = wikiutil.join_wiki(wikiurl, wikitail)
                # Only add additional class information if the reference does
                # not have a child image (don't want to add additional markup
                # for images with targets).
                if not [i for i in node.children if i.__class__ == docutils.nodes.image]:
                    node['classes'].append('interwiki')
            elif prefix != '':
                # Some link scheme (http, file, https, mailto, etc.), add class
                # information if the reference doesn't have a child image (don't
                # want additional markup for images with targets).
                # Don't touch the refuri.
                if not [i for i in node.children if i.__class__ == docutils.nodes.image]:
                    node['classes'].append(prefix)
            else:
                # Default case - make a link to a wiki page.
                pagename, anchor = wikiutil.split_anchor(refuri)
                page = Page(self.request, wikiutil.AbsPageName(self.formatter.page.page_name, pagename))
                node['refuri'] = page.url(self.request, anchor=anchor)
                if not page.exists():
                    node['classes'].append('nonexistent')
        html4css1.HTMLTranslator.visit_reference(self, node)
コード例 #32
0
ファイル: MetaEdit.py プロジェクト: execgit/graphingwiki
def execute(pagename, request):
    _ = request.getText

    def wr(fmt, *args):
        request.write(form_writer(fmt, *args))

    # This action generates data using the user language
    request.setContentLanguage(request.lang)
    form = values_to_form(request.values)

    if form.has_key('cancel'):
        request.reset()
        backto = form.get('backto', [None])[0]
        request.theme.add_msg(_('Edit was cancelled.'), "error")
        if backto:
            page = Page(request, backto)
            request.http_redirect(page.url(request))
            request.page = page

        request.page.send_page()
    elif form.has_key('save') or form.has_key('saveform'):
        if request.environ['REQUEST_METHOD'] != 'POST':
            request.page.send_page()
            return

        template = form.get('template', [None])[0]

        # MetaFormEdit is much closer to set_meta in function
        if form.has_key('saveform'):
            added, cleared = {pagename: dict()}, {pagename: list()}

            # Pre-create page if it does not exist, using the template specified
            if template:
                added[pagename]['gwikitemplate'] = template

            # Ignore form clutter
            ignore = set()
            files = dict()
            for key in request.files:
                ignore.add(key)
                _file = request.files.get(key)
                filename = _file.filename
                fileobj = _file.stream

                #                    if type(fileobj) != file:
                #                        continue

                banana = key.split(SEPARATOR)
                keys = files.setdefault(banana[0], dict())
                values = keys.setdefault(banana[1], list())
                values.append((filename, fileobj))

            keys = list()
            for key in form:
                if key not in ignore and SEPARATOR in key:
                    keys.append(key.split(SEPARATOR)[1])


#            keys = [x.split(SEPARATOR)[1] for x in form if SEPARATOR in x]

            for key in keys:
                cleared[pagename].append(key)
                vals = [
                    x.strip() for x in form[pagename + SEPARATOR + key]
                    if x.strip()
                ]
                if vals:
                    added[pagename].setdefault(key, list()).extend(vals)

            msgs = list()
            # Add attachments
            for pname in files:
                for key in files[pname]:
                    for value in files[pname][key]:
                        name = value[0]
                        try:
                            t, s = add_attachment(request, pname, name,
                                                  value[1])
                            added.setdefault(pname, dict()).setdefault(
                                key, list()).append("[[attachment:%s]]" % name)
                        except AttachmentAlreadyExists:
                            msgs = ["Attachment '%s' already exists." % name]

            _, msgss = set_metas(request, cleared, dict(), added)
            msgs.extend(msgss)

        else:
            # MetaEdit
            pages, msgs, files = parse_editform(request, form)

            if pages:
                saved_templates = False

                for page, _ in pages.iteritems():
                    # Save the template if needed
                    if not Page(request, page).exists() and template:
                        msgs.append(save_template(request, page, template))
                        saved_templates = True

                # If new pages were changed we need to redo parsing
                # the form to know what we really need to edit
                if saved_templates:
                    pages, newmsgs, files = parse_editform(request, form)

                for page, (oldMeta, newMeta) in pages.iteritems():
                    msgs.append('%s: ' % page +
                                edit_meta(request, page, oldMeta, newMeta))

                for page in files:
                    for key in files[page]:
                        name, content = files[page][key]
                        t, s = add_attachment(request, page, name, content)
            else:
                msgs.append(request.getText('No pages changed'))

        msg = ''
        for line in msgs:
            msg += line + request.formatter.linebreak(0)

        request.reset()
        backto = form.get('backto', [None])[0]
        if backto:
            page = Page(request, backto)
            request.http_redirect(page.url(request))
        else:
            page = Page(request, pagename)

        request.theme.add_msg(msg)
        request.page = page
        page.send_page()
    elif form.has_key('args'):
        enter_page(request, pagename, 'Metatable editor')
        formatter = request.page.formatter

        request.write(formatter.heading(1, 2))
        request.write(formatter.text(_("Edit metatable")))
        request.write(formatter.heading(0, 2))
        args = ', '.join(form['args'])
        show_editform(wr, request, pagename, args)

        exit_page(request, pagename)
    else:
        enter_page(request, pagename, 'Metatable editor')
        formatter = request.page.formatter

        request.write(formatter.heading(1, 2))
        request.write(formatter.text(_("Edit current page")))
        request.write(formatter.heading(0, 2))
        show_editform(wr, request, pagename, pagename)

        request.write(formatter.heading(1, 2))
        request.write(formatter.text(_("Edit metatable")))
        request.write(formatter.heading(0, 2))
        show_queryform(wr, request, pagename)

        exit_page(request, pagename)
    return
コード例 #33
0
def do_content(pagename, request):
    request.http_headers()
    page = Page(request, pagename)
    request.write('<!-- Transclusion of %s -->' % request.getQualifiedURL(page.url(request)))
    page.send_page(request, count_hit=0, content_only=1)
    raise MoinMoinNoFooter
コード例 #34
0
ファイル: diff.py プロジェクト: steveyen/moingo
def execute(pagename, request):
    """ Handle "action=diff"
        checking for either a "rev=formerrevision" parameter
        or rev1 and rev2 parameters
    """
    if not request.user.may.read(pagename):
        Page(request, pagename).send_page()
        return

    try:
        date = request.form['date'][0]
        try:
            date = long(date) # must be long for py 2.2.x
        except StandardError:
            date = 0
    except KeyError:
        date = 0

    try:
        rev1 = int(request.form.get('rev1', [-1])[0])
    except StandardError:
        rev1 = 0
    try:
        rev2 = int(request.form.get('rev2', [0])[0])
    except StandardError:
        rev2 = 0

    if rev1 == -1 and rev2 == 0:
        rev1 = request.rev
        if rev1 is None:
            rev1 = -1

    # spacing flag?
    ignorews = int(request.form.get('ignorews', [0])[0])

    _ = request.getText

    # get a list of old revisions, and back out if none are available
    currentpage = Page(request, pagename)
    currentrev = currentpage.current_rev()
    if currentrev < 2:
        request.theme.add_msg(_("No older revisions available!"), "error")
        currentpage.send_page()
        return

    if date: # this is how we get called from RecentChanges
        rev1 = 0
        log = editlog.EditLog(request, rootpagename=pagename)
        for line in log.reverse():
            if date >= line.ed_time_usecs and int(line.rev) != 99999999:
                rev1 = int(line.rev)
                break
        else:
            rev1 = 1
        rev2 = 0

    # Start output
    # This action generates content in the user language
    request.setContentLanguage(request.lang)

    request.emit_http_headers()
    request.theme.send_title(_('Diff for "%s"') % (pagename, ), pagename=pagename, allow_doubleclick=1)

    if rev1 > 0 and rev2 > 0 and rev1 > rev2 or rev1 == 0 and rev2 > 0:
        rev1, rev2 = rev2, rev1

    if rev1 == -1:
        oldrev = currentrev - 1
        oldpage = Page(request, pagename, rev=oldrev)
    elif rev1 == 0:
        oldrev = currentrev
        oldpage = currentpage
    else:
        oldrev = rev1
        oldpage = Page(request, pagename, rev=oldrev)

    if rev2 == 0:
        newrev = currentrev
        newpage = currentpage
    else:
        newrev = rev2
        newpage = Page(request, pagename, rev=newrev)

    edit_count = abs(newrev - oldrev)

    f = request.formatter
    request.write(f.div(1, id="content"))

    oldrev = oldpage.get_real_rev()
    newrev = newpage.get_real_rev()

    revlist = currentpage.getRevList()

    # code below assumes that the page exists and has at least
    # one revision in the revlist, just bail out if not. Users
    # shouldn't really run into this anyway.
    if not revlist:
        request.write(f.div(0)) # end content div
        request.theme.send_footer(pagename)
        request.theme.send_closing_html()
        return

    title = _('Differences between revisions %d and %d') % (oldrev, newrev)
    if edit_count > 1:
        title += ' ' + _('(spanning %d versions)') % (edit_count, )
    title = f.text(title)

    # Revision list starts from 2...
    if oldrev == min(revlist):
        disable_prev = u' disabled="disabled"'
    else:
        disable_prev = u''

    if newrev == max(revlist):
        disable_next = u' disabled="disabled"'
    else:
        disable_next = u''

    page_url = wikiutil.escape(currentpage.url(request), True)

    revert_html = ""
    if request.user.may.revert(pagename):
        revert_html = """
 <td style="border:0">
  <form action="%s" method="get">
   <div style="text-align:center">
    <input name="action" value="revert" type="hidden">
    <input name="rev" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 </td>
 """ % (page_url, rev2, _("Revert to this revision"), disable_next)

    navigation_html = """
<span class="diff-header">%s</span>
<table class="diff">
<tr>
 <td style="border:0">
  <form action="%s" method="get">
   <div style="text-align:left">
    <input name="action" value="diff" type="hidden">
    <input name="rev1" value="%d" type="hidden">
    <input name="rev2" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 </td>
 %s
 <td style="border:0">
  <form action="%s" method="get">
   <div style="text-align:right">
    <input name="action" value="diff" type="hidden">
    <input name="rev1" value="%d" type="hidden">
    <input name="rev2" value="%d" type="hidden">
    <input value="%s" type="submit"%s>
   </div>
  </form>
 </td>
</tr>
</table>
""" % (title,
       page_url, oldrev - 1, oldrev, _("Previous change"), disable_prev,
       revert_html,
       page_url, newrev, newrev + 1, _("Next change"), disable_next, )

    request.write(f.rawHTML(navigation_html))

    if request.user.show_fancy_diff:
        from MoinMoin.util import diff_html
        request.write(f.rawHTML(diff_html.diff(request, oldpage.get_raw_body(), newpage.get_raw_body())))
        newpage.send_page(count_hit=0, content_only=1, content_id="content-below-diff")
    else:
        from MoinMoin.util import diff_text
        lines = diff_text.diff(oldpage.getlines(), newpage.getlines())
        if not lines:
            msg = f.text(" - " + _("No differences found!"))
            if edit_count > 1:
                msg = msg + f.paragraph(1) + f.text(_('The page was saved %(count)d times, though!') % {
                    'count': edit_count}) + f.paragraph(0)
            request.write(msg)
        else:
            if ignorews:
                request.write(f.text(_('(ignoring whitespace)')), f.linebreak())
            else:
                qstr = {'action': 'diff', 'ignorews': '1', }
                if rev1:
                    qstr['rev1'] = str(rev1)
                if rev2:
                    qstr['rev2'] = str(rev2)
                request.write(f.paragraph(1), Page(request, pagename).link_to(request,
                    text=_('Ignore changes in the amount of whitespace'),
                    querystr=qstr, rel='nofollow'), f.paragraph(0))

            request.write(f.preformatted(1))
            for line in lines:
                if line[0] == "@":
                    request.write(f.rule(1))
                request.write(f.text(line + '\n'))
            request.write(f.preformatted(0))

    request.write(f.div(0)) # end content div
    request.theme.send_footer(pagename)
    request.theme.send_closing_html()
コード例 #35
0
def execute(macro, text):
    request = macro.request
    formatter = macro.formatter
    _ = request.getText

    # return immediately if getting links for the current page
    if request.mode_getpagelinks:
        return ''

    currentyear, currentmonth, currentday, h, m, s, wd, yd, ds = request.user.getTime(time.time())
    thispage = formatter.page.page_name
    # does the url have calendar params (= somebody has clicked on prev/next links in calendar) ?
    if 'calparms' in macro.request.args:
        has_calparms = 1 # yes!
        text2 = macro.request.args['calparms']
        cparmpagename, cparmyear, cparmmonth, cparmoffset, cparmoffset2, cparmheight6, cparmanniversary, cparmtemplate = \
            parseargs(request, text2, thispage, currentyear, currentmonth, 0, 0, False, False, u'')
        # Note: cparmheight6 and cparmanniversary are not used, they are just there
        # to have a consistent parameter string in calparms and macro args
    else:
        has_calparms = 0

    if text is None: # macro call without parameters
        text = u''

    # parse and check arguments
    parmpagename, parmyear, parmmonth, parmoffset, parmoffset2, parmheight6, anniversary, parmtemplate = \
        parseargs(request, text, thispage, currentyear, currentmonth, 0, 0, False, False, u'')

    # does url have calendar params and is THIS the right calendar to modify (we can have multiple
    # calendars on the same page)?
    #if has_calparms and (cparmpagename,cparmyear,cparmmonth,cparmoffset) == (parmpagename,parmyear,parmmonth,parmoffset):

    # move all calendars when using the navigation:
    if has_calparms and cparmpagename == parmpagename:
        year, month = yearmonthplusoffset(parmyear, parmmonth, parmoffset + cparmoffset2)
        parmoffset2 = cparmoffset2
        parmtemplate = cparmtemplate
    else:
        year, month = yearmonthplusoffset(parmyear, parmmonth, parmoffset)

    if request.isSpiderAgent and abs(currentyear - year) > 1:
        return '' # this is a bot and it didn't follow the rules (see below)
    if currentyear == year:
        attrs = {}
    else:
        attrs = {'rel': 'nofollow' } # otherwise even well-behaved bots will index forever

    # get the calendar
    monthcal = calendar.monthcalendar(year, month)

    # european / US differences
    months = ('January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December')
    # Set things up for Monday or Sunday as the first day of the week
    if calendar.firstweekday() == calendar.MONDAY:
        wkend = (5, 6)
        wkdays = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
    if calendar.firstweekday() == calendar.SUNDAY:
        wkend = (0, 6)
        wkdays = ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat')

    colorstep = 85
    p = Page(request, thispage)
    qpagenames = '*'.join([wikiutil.quoteWikinameURL(pn) for pn in parmpagename])
    qtemplate = wikiutil.quoteWikinameURL(parmtemplate)
    querystr = "calparms=%%s,%d,%d,%d,%%d,,,%%s" % (parmyear, parmmonth, parmoffset)
    prevlink = p.url(request, querystr % (qpagenames, parmoffset2 - 1, qtemplate))
    nextlink = p.url(request, querystr % (qpagenames, parmoffset2 + 1, qtemplate))
    prevylink = p.url(request, querystr % (qpagenames, parmoffset2 - 12, qtemplate))
    nextylink = p.url(request, querystr % (qpagenames, parmoffset2 + 12, qtemplate))

    prevmonth = formatter.url(1, prevlink, 'cal-link', **attrs) + '&lt;' + formatter.url(0)
    nextmonth = formatter.url(1, nextlink, 'cal-link', **attrs) + '&gt;' + formatter.url(0)
    prevyear = formatter.url(1, prevylink, 'cal-link', **attrs) + '&lt;&lt;' + formatter.url(0)
    nextyear = formatter.url(1, nextylink, 'cal-link', **attrs) + '&gt;&gt;' + formatter.url(0)

    if parmpagename != [thispage]:
        pagelinks = ''
        r, g, b = (255, 0, 0)
        l = len(parmpagename[0])
        steps = len(parmpagename)
        maxsteps = (255 / colorstep)
        if steps > maxsteps:
            steps = maxsteps
        chstep = int(l / steps)
        st = 0
        while st < l:
            ch = parmpagename[0][st:st+chstep]
            r, g, b = cliprgb(r, g, b)
            link = Page(request, parmpagename[0]).link_to(request, ch,
                        rel='nofollow',
                        style='background-color:#%02x%02x%02x;color:#000000;text-decoration:none' % (r, g, b))
            pagelinks = pagelinks + link
            r, g, b = (r, g+colorstep, b)
            st = st + chstep
        r, g, b = (255-colorstep, 255, 255-colorstep)
        for page in parmpagename[1:]:
            link = Page(request, page).link_to(request, page,
                        rel='nofollow',
                        style='background-color:#%02x%02x%02x;color:#000000;text-decoration:none' % (r, g, b))
            pagelinks = pagelinks + '*' + link
        showpagename = '   %s<BR>\n' % pagelinks
    else:
        showpagename = ''
    if calendar.firstweekday() == calendar.SUNDAY:
        resth1 = '  <th colspan="7" class="cal-header">\n' \
                 '%s' \
                 '   %s&nbsp;%s&nbsp;<b>&nbsp;%s&nbsp;%s</b>&nbsp;%s\n&nbsp;%s\n' \
                 '  </th>\n' % (showpagename, prevyear, prevmonth, months[month-1], str(year), nextmonth, nextyear)
    if calendar.firstweekday() == calendar.MONDAY:
        resth1 = '  <th colspan="7" class="cal-header">\n' \
                 '%s' \
                 '   %s&nbsp;%s&nbsp;<b>&nbsp;%s&nbsp;/&nbsp;%s</b>&nbsp;%s\n&nbsp;%s\n' \
                 '  </th>\n' % (showpagename, prevyear, prevmonth, str(year), month, nextmonth, nextyear)
    restr1 = ' <tr>\n%s </tr>\n' % resth1

    r7 = range(7)
    restd2 = []
    for wkday in r7:
        wday = _(wkdays[wkday])
        if wkday in wkend:
            cssday = "cal-weekend"
        else:
            cssday = "cal-workday"
        restd2.append('  <td class="%s">%s</td>\n' % (cssday, wday))
    restr2 = ' <tr>\n%s </tr>\n' % "".join(restd2)

    if parmheight6:
        while len(monthcal) < 6:
            monthcal = monthcal + [[0, 0, 0, 0, 0, 0, 0]]

    maketip_js = []
    restrn = []
    for week in monthcal:
        restdn = []
        for wkday in r7:
            day = week[wkday]
            if not day:
                restdn.append('  <td class="cal-invalidday">&nbsp;</td>\n')
            else:
                page = parmpagename[0]
                if anniversary:
                    link = "%s/%02d-%02d" % (page, month, day)
                else:
                    link = "%s/%4d-%02d-%02d" % (page, year, month, day)
                daypage = Page(request, link)
                if daypage.exists() and request.user.may.read(link):
                    csslink = "cal-usedday"
                    query = {}
                    r, g, b, u = (255, 0, 0, 1)
                    daycontent = daypage.get_raw_body()
                    header1_re = re.compile(r'^\s*=\s(.*)\s=$', re.MULTILINE) # re.UNICODE
                    titletext = []
                    for match in header1_re.finditer(daycontent):
                        if match:
                            title = match.group(1)
                            title = wikiutil.escape(title).replace("'", "\\'")
                            titletext.append(title)
                    tipname_unescaped = link.replace("'", "\\'")
                    link = wikiutil.escape(link).replace("'", "\\'")
                    tipname = link
                    tiptitle = link
                    tiptext = '<br>'.join(titletext)
                    maketip_js.append("maketip('%s','%s','%s');" % (tipname, tiptitle, tiptext))
                    attrs = {'onMouseOver': "tip('%s')" % tipname_unescaped,
                             'onMouseOut': "untip()"}
                else:
                    csslink = "cal-emptyday"
                    if parmtemplate:
                        query = {'action': 'edit', 'template': parmtemplate}
                    else:
                        query = {}
                    r, g, b, u = (255, 255, 255, 0)
                    if wkday in wkend:
                        csslink = "cal-weekend"
                    attrs = {'rel': 'nofollow'}
                for otherpage in parmpagename[1:]:
                    otherlink = "%s/%4d-%02d-%02d" % (otherpage, year, month, day)
                    otherdaypage = Page(request, otherlink)
                    if otherdaypage.exists():
                        csslink = "cal-usedday"
                        if u == 0:
                            r, g, b = (r-colorstep, g, b-colorstep)
                        else:
                            r, g, b = (r, g+colorstep, b)
                r, g, b = cliprgb(r, g, b)
                style = 'background-color:#%02x%02x%02x' % (r, g, b)
                fmtlink = formatter.url(1, daypage.url(request, query), csslink, **attrs) + str(day) + formatter.url(0)
                if day == currentday and month == currentmonth and year == currentyear:
                    cssday = "cal-today"
                    fmtlink = "<b>%s</b>" % fmtlink # for browser with CSS probs
                else:
                    cssday = "cal-nottoday"
                restdn.append('  <td style="%s" class="%s">%s</td>\n' % (style, cssday, fmtlink))
        restrn.append(' <tr>\n%s </tr>\n' % "".join(restdn))

    restable = '<table border="2" cellspacing="2" cellpadding="2">\n<col width="14%%" span="7">%s%s%s</table>\n'
    restable = restable % (restr1, restr2, "".join(restrn))

    if maketip_js:
        tip_js = '''<script language="JavaScript" type="text/javascript">
<!--
%s
// -->
</script>
''' % '\n'.join(maketip_js)
    else:
        tip_js = ''

    result = """\
<script type="text/javascript" src="%s/common/js/infobox.js"></script>
<div id="%s" style="position:absolute; visibility:hidden; z-index:20; top:-999em; left:0px;"></div>
%s%s
""" % (request.cfg.url_prefix_static, formatter.make_id_unique('infodiv'), tip_js, restable)
    return formatter.rawHTML(result)
コード例 #36
0
    def execute(self):

        if FrontpageMacro.lastupdated == None or long(time.time()) - FrontpageMacro.lastupdated > 3600L:
            ngowikiutil = NgoWikiUtil(self.request)
            try:
                ngowikiutil.open_database()
                FrontpageMacro.totalcount_activities = ngowikiutil.count_pages_by_tag([u'服务产品类']) + ngowikiutil.count_pages_by_tag([u'视听产品类']) + ngowikiutil.count_pages_by_tag([u'实体产品类'])
                FrontpageMacro.totalcount_ngos = ngowikiutil.count_pages_by_tag([u'公益机构类'])
                FrontpageMacro.totalcount_enterprises = ngowikiutil.count_pages_by_tag([u'企业志愿组织类'])
                FrontpageMacro.featured_activities = ngowikiutil.select_pages_with_one_of_tags([u'服务产品类', u'视听产品类', u'实体产品类'], 'featured', 'DESC', 0, 20)
                for record in FrontpageMacro.featured_activities[0:2]:
                    record["summary"] = record["summary"].replace("'''", "")
                    record["summary"] = filter_summary(record["summary"], 50);
                    pagename = record["path"]
                    page = Page(self.request, pagename)
                    record["link"] = page.url(self.request)
                    if len(record["logo"]) > 0 and exists(self.request, record["path"], record["logo"]):
                        record["logo_link"] = getAttachUrl(record["path"], record["logo"], self.request)
                    else:
                        findLogo = False
                        text = page.getPageText()
                        match = re.search('\\{\\{attachment:([^\\|]+)\\|\\|.*}}', text)
                        if match != None:
                            logo = match.group(1)
                            if exists(self.request, record["path"], logo):
                                record["logo_link"] = getAttachUrl(record["path"], logo, self.request)
                                findLogo = True
                        if not findLogo:
                            record["logo_link"] = self.request.cfg.url_prefix_static + "/ngowiki/img/no-logo.png"
                FrontpageMacro.featured_ngos = ngowikiutil.select_pages_by_tag([u'公益机构类'], 'featured', 'DESC', 0, 2)
                for record in FrontpageMacro.featured_ngos:
                    pagename = record["path"]
                    record["summary"] = filter_summary(record["summary"], 50)
                    page = Page(self.request, pagename)
                    record["link"] = page.url(self.request)
                    if len(record["logo"]) > 0 and exists(self.request, record["path"], record["logo"]):
                        record["logo_link"] = getAttachUrl(record["path"], record["logo"], self.request)
                    else:
                        record["logo_link"] = self.request.cfg.url_prefix_static + "/ngowiki/img/no-logo.png"
                FrontpageMacro.recently_added = ngowikiutil.select_latest_created_pages([u'服务产品类', u'视听产品类', u'实体产品类', u'公益机构类', u'企业志愿组织类'], 0, 5)
                for record in FrontpageMacro.recently_added:
                    pagename = record["path"]
                    page = Page(self.request, pagename)
                    record["link"] = page.url(self.request)
                    if len(record["logo"]) > 0 and exists(self.request, record["path"], record["logo"]):
                        record["logo_link"] = getAttachUrl(record["path"], record["logo"], self.request)
                    else:
                        record["logo_link"] = self.request.cfg.url_prefix_static + "/ngowiki/img/no-logo.png"
                    if u'服务产品类' in ngowikiutil.parse_page(page)["categories"] or u'视听产品类' in ngowikiutil.parse_page(page)["categories"] or u'实体产品类' in ngowikiutil.parse_page(page)["categories"]:
                        record["recently_added_type"] = "activity"
                    elif u'公益机构类' in ngowikiutil.parse_page(page)["categories"]:
                        record["recently_added_type"] = "ngo"
                    else:
                        record["recently_added_type"] = "enterprise"
                FrontpageMacro.news_items = ngowikiutil.select_pages_by_tag([u'新闻动态类'], 'featured', 'DESC', 0, 100)
                for record in FrontpageMacro.news_items:
                    pagename = record["path"]
                    page = Page(self.request, pagename)
                    record["link"] = page.url(self.request)
                    if len(record["logo"]) > 0 and exists(self.request, record["path"], record["logo"]):
                        record["logo_link"] = getAttachUrl(record["path"], record["logo"], self.request)
                    else:
                        record["logo_link"] = self.request.cfg.url_prefix_static + "/ngowiki/img/no-logo.png"
            finally:
                ngowikiutil.close_database(True)
            FrontpageMacro.lastupdated = long(time.time())

        context = {
            'totalcount_activities': FrontpageMacro.totalcount_activities,
            'totalcount_ngos': FrontpageMacro.totalcount_ngos,
            'totalcount_enterprises': FrontpageMacro.totalcount_enterprises,
            'featured_activities': FrontpageMacro.featured_activities,
            'featured_ngos': FrontpageMacro.featured_ngos,
            'recently_added': FrontpageMacro.recently_added,
            'news_items': FrontpageMacro.news_items,
            'logo_url': self.request.cfg.url_prefix_static + "/ngowiki/img/sitelogo.png",
            'slogan_url': self.request.cfg.url_prefix_static + "/ngowiki/img/slogan2.png"
        }

        engine = tenjin.Engine(path=[os.path.dirname(__file__) + '/views'])
        html = engine.render('Frontpage.pyhtml', context)

        return self.formatter.rawHTML(html)
コード例 #37
0
def execute(pagename, request, fieldname='value', titlesearch=0, statistic=0):
    _ = request.getText
    titlesearch = checkTitleSearch(request)
    if titlesearch < 0:
        check_surge_protect(request, kick=True) # get rid of spammer
        return

    if 'metasearch' in request.values: 
        form = MultiDict(request.values)
        form['action'] = 'MetaSearch'
        val = form.get('value', '')
        form['q'] = val
        request.values = CombinedMultiDict([MultiDict(form)])
        return ms_execute(pagename, request)

    advancedsearch = isAdvancedSearch(request)

    form = request.values

    # context is relevant only for full search
    if titlesearch:
        context = 0
    elif advancedsearch:
        context = 180 # XXX: hardcoded context count for advancedsearch
    else:
        context = int(form.get('context', 0))

    # Get other form parameters
    needle = form.get(fieldname, '')
    case = int(form.get('case', 0))
    regex = int(form.get('regex', 0)) # no interface currently
    hitsFrom = int(form.get('from', 0))
    highlight_titles = int(form.get('highlight_titles', 1))
    highlight_pages = int(form.get('highlight_pages', 1))
    mtime = None
    msg = ''
    historysearch = 0

    # if advanced search is enabled we construct our own search query
    if advancedsearch:
        and_terms = form.get('and_terms', '').strip()
        or_terms = form.get('or_terms', '').strip()
        not_terms = form.get('not_terms', '').strip()
        #xor_terms = form.get('xor_terms', '').strip()
        categories = form.getlist('categories') or ['']
        timeframe = form.get('time', '').strip()
        language = form.getlist('language') or ['']
        mimetype = form.getlist('mimetype') or [0]
        excludeunderlay = form.get('excludeunderlay', 0)
        nosystemitems = form.get('nosystemitems', 0)
        historysearch = form.get('historysearch', 0)

        mtime = form.get('mtime', '')
        if mtime:
            mtime_parsed = None

            # get mtime from known date/time formats
            for fmt in (request.user.datetime_fmt,
                    request.cfg.datetime_fmt, request.user.date_fmt,
                    request.cfg.date_fmt):
                try:
                    mtime_parsed = time.strptime(mtime, fmt)
                except ValueError:
                    continue
                else:
                    break

            if mtime_parsed:
                mtime = time.mktime(mtime_parsed)
            else:
                # didn't work, let's try parsedatetime
                cal = Calendar()
                mtime_parsed, parsed_what = cal.parse(mtime)
                # XXX it is unclear if usage of localtime here and in parsedatetime module is correct.
                # time.localtime is the SERVER's local time and of no relevance to the user (being
                # somewhere in the world)
                # mktime is reverse function for localtime, so this maybe fixes it again!?
                if parsed_what > 0 and mtime_parsed <= time.localtime():
                    mtime = time.mktime(mtime_parsed)
                else:
                    mtime_parsed = None # we don't use invalid stuff

            # show info
            if mtime_parsed:
                # XXX mtime_msg is not shown in some cases
                mtime_msg = _("(!) Only pages changed since '''%s''' are being displayed!",
                              wiki=True) % request.user.getFormattedDateTime(mtime)
            else:
                mtime_msg = _('/!\\ The modification date you entered was not '
                        'recognized and is therefore not considered for the '
                        'search results!', wiki=True)
        else:
            mtime_msg = None

        word_re = re.compile(r'(\"[\w\s]+"|\w+)', re.UNICODE)
        needle = ''
        if categories[0]:
            needle += 'category:%s ' % ','.join(categories)
        if language[0]:
            needle += 'language:%s ' % ','.join(language)
        if mimetype[0]:
            needle += 'mimetype:%s ' % ','.join(mimetype)
        if excludeunderlay:
            needle += '-domain:underlay '
        if nosystemitems:
            needle += '-domain:system '
        if and_terms:
            needle += '(%s) ' % and_terms
        if not_terms:
            needle += '(%s) ' % ' '.join(['-%s' % t for t in word_re.findall(not_terms)])
        if or_terms:
            needle += '(%s) ' % ' or '.join(word_re.findall(or_terms))

    # check for sensible search term
    stripped = needle.strip()
    if len(stripped) == 0:
        request.theme.add_msg(_('Please use a more selective search term instead '
                'of {{{"%s"}}}', wiki=True) % wikiutil.escape(needle), "error")
        Page(request, pagename).send_page()
        return
    needle = stripped

    # Setup for type of search
    if titlesearch:
        title = _('Title Search: "%s"')
        sort = 'page_name'
    else:
        if advancedsearch:
            title = _('Advanced Search: "%s"')
        else:
            title = _('Full Text Search: "%s"')
        sort = 'weight'

    # search the pages
    from MoinMoin.search import searchPages, QueryParser, QueryError
    try:
        query = QueryParser(case=case, regex=regex,
                titlesearch=titlesearch).parse_query(needle)
    except QueryError: # catch errors in the search query
        request.theme.add_msg(_('Your search query {{{"%s"}}} is invalid. Please refer to '
                'HelpOnSearching for more information.', wiki=True, percent=True) % wikiutil.escape(needle), "error")
        Page(request, pagename).send_page()
        return

    results = searchPages(request, query, sort, mtime, historysearch)

    # directly show a single hit for title searches
    # this is the "quick jump" functionality if you don't remember
    # the pagename exactly, but just some parts of it
    if titlesearch and len(results.hits) == 1:
        page = results.hits[0]
        if not page.attachment: # we did not find an attachment
            page = Page(request, page.page_name)
            querydict = {}
            if highlight_pages:
                highlight = query.highlight_re()
                if highlight:
                    querydict.update({'highlight': highlight})
            url = page.url(request, querystr=querydict)
            request.http_redirect(url)
            return
    if not results.hits: # no hits?
        f = request.formatter
        querydict = wikiutil.parseQueryString(request.query_string).to_dict()
        querydict.update({'titlesearch': 0})

        request.theme.add_msg(_('Your search query {{{"%s"}}} didn\'t return any results. '
                'Please change some terms and refer to HelpOnSearching for '
                'more information.%s', wiki=True, percent=True) % (wikiutil.escape(needle),
                    titlesearch and ''.join([
                        '<br>',
                        _('(!) Consider performing a', wiki=True), ' ',
                        f.url(1, href=request.page.url(request, querydict, escape=0)),
                        _('full-text search with your search terms'),
                        f.url(0), '.',
                    ]) or ''), "error")
        Page(request, pagename).send_page()
        return

    # This action generates data using the user language
    request.setContentLanguage(request.lang)

    request.theme.send_title(title % needle, pagename=pagename)

    # Start content (important for RTL support)
    request.write(request.formatter.startContent("content"))

    # Hints
    f = request.formatter
    hints = []

    if titlesearch:
        querydict = wikiutil.parseQueryString(request.query_string).to_dict()
        querydict.update({'titlesearch': 0})

        hints.append(''.join([
            _("(!) You're performing a title search that might not include"
                ' all related results of your search query in this wiki. <<BR>>', wiki=True),
            ' ',
            f.url(1, href=request.page.url(request, querydict, escape=0)),
            f.text(_('Click here to perform a full-text search with your '
                'search terms!')),
            f.url(0),
        ]))

    if advancedsearch and mtime_msg:
        hints.append(mtime_msg)

    if hints:
        request.write(searchHints(f, hints))

    # Search stats
    request.write(results.stats(request, request.formatter, hitsFrom))

    # Then search results
    info = not titlesearch
    if context:
        output = results.pageListWithContext(request, request.formatter,
                info=info, context=context, hitsFrom=hitsFrom, hitsInfo=1,
                highlight_titles=highlight_titles,
                highlight_pages=highlight_pages)
    else:
        output = results.pageList(request, request.formatter, info=info,
                hitsFrom=hitsFrom, hitsInfo=1,
                highlight_titles=highlight_titles,
                highlight_pages=highlight_pages)

    request.write(output)

    request.write(request.formatter.endContent())
    request.theme.send_footer(pagename)
    request.theme.send_closing_html()
コード例 #38
0
ファイル: alicia.py プロジェクト: danirus/moin-theme-alicia
    def send_title(self, text, **keywords):
        """
        An almost verbatim copy of MoinMoin.theme.__init__.ThemeBase.send_title, 
        that replaces hard coded HTML string template with Jinja2. 
        """
        req = self.request
        _ = req.getText
        rev = req.rev

        if keywords.has_key('page'):
            page = keywords['page']
            pagename = page.page_name
        else:
            pagename = keywords.get('pagename', '')
            page = Page(req, pagename)
        if keywords.get('msg', ''):
            raise DeprecationWarning("Using send_page(msg=) is deprecated! "
                                     "Use theme.add_msg() instead!")
        scriptname = req.script_root

        # get name of system pages
        page_front_page = wikiutil.getFrontPage(req).page_name
        page_help_contents = getLocalizedPage(req, 'HelpContents').page_name
        page_title_index = getLocalizedPage(req, 'TitleIndex').page_name
        page_site_navigation = getLocalizedPage(req,
                                                'SiteNavigation').page_name
        page_word_index = getLocalizedPage(req, 'WordIndex').page_name
        page_help_formatting = getLocalizedPage(req,
                                                'HelpOnFormatting').page_name
        page_find_page = getLocalizedPage(req, 'FindPage').page_name
        home_page = wikiutil.getInterwikiHomePage(req)
        page_parent_page = getattr(page.getParentPage(), 'page_name', None)

        # set content_type, including charset, so web server doesn't touch it:
        req.content_type = "text/html; charset=%s" % config.charset

        meta_keywords = req.getPragma('keywords') or ""
        meta_description = req.getPragma('description') or ""

        rss_link = self.rsslink({'page': page})
        universal_edit_button = self.universal_edit_button({'page': page})
        stylesheets = self.html_stylesheets({
            'print_media':
            keywords.get('print_mode', False),
            'media':
            keywords.get('media', 'screen')
        })
        gui_edit_link = self.guiEditorScript({'page': page})

        context = {
            'title':
            Markup(wikiutil.escape(text)),
            'sitename':
            wikiutil.escape(req.cfg.html_pagetitle or req.cfg.sitename),
            'charset':
            page.output_charset,
            'meta_keywords':
            wikiutil.escape(meta_keywords, 1),
            'meta_description':
            wikiutil.escape(meta_description, 1),
            'robots':
            None,  # might be "index", "noindex", or None
            'refresh_seconds':
            None,
            'refresh_url':
            None,
            'static_base':
            "%s/%s/" % (self.cfg.url_prefix_static, self.name),
            'stylesheets':
            stylesheets,
            'rss_link_title':
            rss_link[0],
            'rss_link_href':
            rss_link[1],
            'universal_edit_button_title':
            universal_edit_button[0],
            'universal_edit_button_href':
            universal_edit_button[1],
            'common_js':
            '%s/common/js/%s.js' % (req.cfg.url_prefix_static, 'common'),
            'search_hint':
            req.getText('Search'),
            'gui_editor_link_href':
            gui_edit_link[0],
            'gui_editor_link_text':
            gui_edit_link[1],
            'extra_html_head':
            Markup(keywords.get('html_head', '')),
            'page_start_href':
            req.href(page_front_page),
            'page_alternate_title':
            None,
            'page_alternate_href':
            '',
            'print_alternate_title':
            None,
            'print_alternate_href':
            '',
            'page_up_href':
            None,
        }

        # search engine precautions / optimization:
        # if it is an action or edit/search, send query headers (noindex,nofollow):
        if req.query_string or req.method == 'POST':
            context['robots'] = "noindex"
        # we don't want to have BadContent stuff indexed:
        elif pagename in [
                'BadContent',
                'LocalBadContent',
        ]:
            context['robots'] = "noindex"
        # if it is a special page, index it and follow the links - we do it
        # for the original, English pages as well as for (the possibly
        # modified) frontpage:
        elif pagename in [
                page_front_page,
                req.cfg.page_front_page,
                page_title_index,
                'TitleIndex',
                page_find_page,
                'FindPage',
                page_site_navigation,
                'SiteNavigation',
                'RecentChanges',
        ]:
            context['robots'] = "index"

        if 'pi_refresh' in keywords and keywords['pi_refresh']:
            context.update({
                'refresh_seconds': keywords['pi_refresh'][0],
                'refresh_url': keywords['pi_refresh'][1]
            })

        # Links
        if pagename:
            context.update({
                'page_alternate_title':
                _('Wiki Markup'),
                'page_alternate_href':
                page.url(req, querystr=dict(action='raw'))
            })
            context.update({
                'print_alternate_title':
                _('Print View'),
                'print_alternate_href':
                page.url(req, querystr=dict(action='print'))
            })
            if page_parent_page:
                context['page_up'] = req.href(page_parent_page)

        output = StringIO()
        write_f_onhold = req.write
        req.write = lambda s: output.write(s.encode('utf-8'))

        if pagename and req.user.may.read(pagename):
            from MoinMoin.action import AttachFile
            AttachFile.send_link_rel(req, pagename)

        context['attached_links'] = Markup(output.getvalue())
        req.write = write_f_onhold

        context['extra_links'] = [
            {
                'rel': "Search",
                'href': "%s" % req.href(page_find_page)
            },
            {
                'rel': "Index",
                'href': "%s" % req.href(page_title_index)
            },
            {
                'rel': "Glossary",
                'href': "%s" % req.href(page_word_index)
            },
            {
                'rel': "Help",
                'href': "%s" % req.href(page_help_formatting)
            },
        ]

        template = self.j2env.get_template('bits/head.html')
        output = template.render(context)
        req.write(output)

        output = []

        # start the <body>
        bodyattr = []
        if keywords.has_key('body_attr'):
            bodyattr.append(' ')
            bodyattr.append(keywords['body_attr'])

        # Set body to the user interface language and direction
        bodyattr.append(' %s' % self.ui_lang_attr())

        body_onload = keywords.get('body_onload', '')
        if body_onload:
            bodyattr.append(''' onload="%s"''' % body_onload)
        output.append('\n<body%s>\n' % ''.join(bodyattr))

        # Output -----------------------------------------------------------

        # If in print mode, start page div and emit the title
        if keywords.get('print_mode', 0):
            d = {
                'title_text': text,
                'page': page,
                'page_name': pagename or '',
                'rev': rev,
            }
            req.themedict = d
            output.append(self.startPage())
            output.append(self.interwiki(d))
            output.append(self.title(d))

        # In standard mode, emit theme.header
        else:
            exists = pagename and page.exists(includeDeleted=False)
            # prepare dict for theme code:
            d = {
                'theme': self.name,
                'script_name': scriptname,
                'title_text': text,
                'logo_string': req.cfg.logo_string,
                'site_name': req.cfg.sitename,
                'page': page,
                'rev': rev,
                'pagesize': pagename and page.size() or 0,
                # exists checked to avoid creation of empty edit-log for non-existing pages
                'last_edit_info': exists and page.lastEditInfo() or '',
                'page_name': pagename or '',
                'page_find_page': page_find_page,
                'page_front_page': page_front_page,
                'home_page': home_page,
                'page_help_contents': page_help_contents,
                'page_help_formatting': page_help_formatting,
                'page_parent_page': page_parent_page,
                'page_title_index': page_title_index,
                'page_word_index': page_word_index,
                'user_name': req.user.name,
                'user_valid': req.user.valid,
                'msg': self._status,
                'trail': keywords.get('trail', None),
                # Discontinued keys, keep for a while for 3rd party theme developers
                'titlesearch': 'use self.searchform(d)',
                'textsearch': 'use self.searchform(d)',
                'navibar': ['use self.navibar(d)'],
                'available_actions':
                ['use self.request.availableActions(page)'],
            }

            # add quoted versions of pagenames
            newdict = {}
            for key in d:
                if key.startswith('page_'):
                    if not d[key] is None:
                        newdict['q_' + key] = wikiutil.quoteWikinameURL(d[key])
                    else:
                        newdict['q_' + key] = None
            d.update(newdict)
            req.themedict = d

            # now call the theming code to do the rendering
            if keywords.get('editor_mode', 0):
                output.append(self.editorheader(d))
            else:
                output.append(self.header(d))

        # emit it
        req.write(''.join(output))
        output = []
        self._send_title_called = True