コード例 #1
0
ファイル: Navigation.py プロジェクト: aahlad/soar
    def do_slideshow(self, focus=None):
        """ Slideshow master page links.

            If `focus` is set, it is the name of a slide page; these only
            get the mode toggle and edit links.
        """
        _ = self._
        curpage = focus or self.pagename
        result = []
        request = self.macro.request
        pg = Page(request, curpage)
        if self.print_mode:
            # projection mode
            label = _('Wiki')
            toggle = {}
            result.append(pg.link_to(request, text=_('Edit'), querystr={'action': 'edit'}))
            result.append('   ')
        else:
            # wiki mode
            label = _('Slideshow')
            toggle = self.PROJECTION

        # add mode toggle link
        result.append(pg.link_to(request, text=label, querystr=toggle))

        # leave out the following on slide pages
        if focus is None:
            children = _getPages(request, '^%s/' % re.escape(self.pagename))
            if children:
                # add link to first child if one exists
                result.append('   ')
                result.append(Page(request, children[0]).link_to(request, text=_('Start'), querystr=self.querystr))

        return ''.join(result)
コード例 #2
0
ファイル: LocalSiteMap.py プロジェクト: steveyen/moingo
 def visit(self, request, name, depth):
     """ Visit a page, i.e. create a link.
     """
     if not name:
         return
     pg = Page(request, name)
     action = __name__.split('.')[-1]
     self.append(' ' * (5*depth+1))
     self.append(pg.link_to(request, querystr={'action': action}))
     self.append("&nbsp;<small>[")
     self.append(pg.link_to(request, 'view'))
     self.append("</small>]<br>")
コード例 #3
0
ファイル: LocalSiteMap.py プロジェクト: aahlad/soar
 def visit(self, request, name, depth):
     """ Visit a page, i.e. create a link.
     """
     if not name:
         return
     _ = request.getText
     pg = Page(request, name)
     action = __name__.split('.')[-1]
     self.append('&nbsp;' * (5*depth+1))
     self.append(pg.link_to(request, querystr={'action': action}))
     self.append("&nbsp;<small>[")
     self.append(pg.link_to(request, _('view')))
     self.append("</small>]<br>")
コード例 #4
0
def execute(pagename, request):
    enter_page(request, pagename, "Group Editor")
    macro = wikiutil.importPlugin(request.cfg, "macro", "GroupEditor", "do_macro")
    request.write(macro(request))
    page = Page(request, pagename)
    request.write(page.link_to(request, text=request.getText("back")))
    exit_page(request, pagename)
コード例 #5
0
 def username(self, d):
     """ Assemble the username / userprefs link
     
     @param d: parameter dictionary
     @rtype: string
     @return: username html
     """
     from MoinMoin.Page import Page
     request = self.request
     _ = request.getText
     
     userlinks = []
     # Add username/homepage link for registered users. We don't care
     # if it exists, the user can create it.
     if request.user.valid:
         homepage = Page(request, request.user.name)
         title = homepage.split_title(request)
         homelink = homepage.link_to(request, text=title)
         userlinks.append(homelink)
     
         # Set pref page to localized Preferences page
         prefpage = wikiutil.getSysPage(request, 'UserPreferences')
         title = prefpage.split_title(request)
         userlinks.append(prefpage.link_to(request, text=title))
     else:
         # Add prefpage links with title: Login
         prefpage = wikiutil.getSysPage(request, 'UserPreferences')
         userlinks.append(prefpage.link_to(request, text=_("Login")))
         
     html = '<ul id="username"><li>%s</li></ul>' % '</li>\n<li>'.join(userlinks)
     return html
コード例 #6
0
    def trail(self, d):
        """ Assemble page trail
        
        @param d: parameter dictionary
        @rtype: unicode
        @return: trail html
        """
        request = self.request
        user = request.user
        if user.valid and user.show_page_trail:
            trail = user.getTrail()
            if trail:
                items = []
                # Show all items except the last one which is this page.
                for pagename in trail[:-1]:
                    page = Page(request, pagename)
                    title = page.split_title(request)
                    title = self.shortenPagename(title)
                    link = page.link_to(request, title)
                    items.append('<li>%s</li>' % link)
                html = '''
<ul id="pagetrail">
%s
</ul>''' % '\n'.join(items)
            return html
        return ''
コード例 #7
0
 def pagelink(self, on, pagename='', page=None, **kw):
     FormatterBase.pagelink(self, on, pagename, page, **kw)
     if page is None:
         page = Page(self.request, pagename, formatter=self)
     link_text = page.link_to(self.request, on=on, **kw)
     self._curr.xml_append(tree.text(U(link_text)))
     return ''
コード例 #8
0
ファイル: wikiconfig_sample.py プロジェクト: happytk/moin
def dayone_title_replacer(cfg, pagename, request, **kw):
    mt = get_middleware_type(request, pagename)
    link_only = kw.get('link_only', False)
    if mt == 'do':
        p = Page(request, pagename)
        return p.link_to(request, text=p._get_adpt().text())
    return 
コード例 #9
0
    def trail(self, d):
        """ Assemble page trail

        @param d: parameter dictionary
        @rtype: unicode
        @return: trail html
        """
        request = self.request
        user = request.user
        html = ''
        if not user.valid or user.show_page_trail:
            trail = user.getTrail()
            if trail:
                items = []
                for pagename in trail:
                    try:
                        interwiki, page = wikiutil.split_interwiki(pagename)
                        if interwiki != request.cfg.interwikiname and interwiki != 'Self':
                            link = (self.request.formatter.interwikilink(True, interwiki, page) +
                                    self.shortenPagename(page) +
                                    self.request.formatter.interwikilink(False, interwiki, page))
                            items.append(link)
                            continue
                        else:
                            pagename = page

                    except ValueError:
                        pass
                    page = Page(request, pagename)
                    title = page.split_title()
                    title = self.shortenPagename(title)
                    link = page.link_to(request, title)
                    items.append(link)
                html = u'<div id="pagetrail">%s</div>' % u'<span class="sep"> &raquo; </span>'.join(items)
        return html
コード例 #10
0
ファイル: bplus.py プロジェクト: jgoerzen/bplus
    def title(self, d):
        """ Assemble the title (now using breadcrumbs)

        @param d: parameter dictionary
        @rtype: string
        @return: title html
        """
        _ = self.request.getText
        content = []
        if d['title_text'] == d['page'].split_title(): # just showing a page, no action
            curpage = ''
            segments = d['page_name'].split('/') # was: title_text
            for s in segments[:-1]:
                curpage += s
                thisp = Page(self.request, curpage)
                content.append(thisp.link_to(self.request, thisp.split_title().split('/')[-1]))
                curpage += '/'
            link_text = d['page'].split_title().split('/')[-1]
            link_title = _('Click to do a full-text search for this title')
            link_query = {
                'action': 'fullsearch',
                'value': 'linkto:"%s"' % d['page_name'],
                'context': '180',
            }
            # we dont use d['title_link'] any more, but make it ourselves:
            link = d['page'].link_to(self.request, link_text, querystr=link_query, title=link_title, css_class='backlink', rel='nofollow')
            content.append(link)
        else:
            content.append(wikiutil.escape(d['title_text']))

        location_html = u'<span class="sep"> / </span>'.join(content)
        html = u'<div id="pagelocation">%s</div>' % location_html
        return html
コード例 #11
0
ファイル: application_xml.py プロジェクト: pombredanne/akara
 def pagelink(self, on, pagename="", page=None, **kw):
     FormatterBase.pagelink(self, on, pagename, page, **kw)
     if page is None:
         page = Page(self.request, pagename, formatter=self)
     link_text = page.link_to(self.request, on=on, **kw)
     self._curr.xml_append(tree.text(U(link_text)))
     return ""
コード例 #12
0
    def trail(self, d):
        """ Assemble page trail

        @param d: parameter dictionary
        @rtype: unicode
        @return: trail html
        """
        request = self.request
        user = request.user
        html = ''
        if not user.valid or user.show_page_trail:
            trail = user.getTrail()
            if trail:
                items = []
                for pagename in trail:
                    try:
                        interwiki, page = wikiutil.split_interwiki(pagename)
                        if interwiki != request.cfg.interwikiname and interwiki != 'Self':
                            link = (self.request.formatter.interwikilink(True, interwiki, page) +
                                    self.shortenPagename(page) +
                                    self.request.formatter.interwikilink(False, interwiki, page))
                            items.append(link)
                            continue
                        else:
                            pagename = page

                    except ValueError:
                        pass
                    page = Page(request, pagename)
                    title = page.split_title()
                    title = self.shortenPagename(title)
                    link = page.link_to(request, title)
                    items.append(link)
                html = u'<div id="pagetrail">%s</div>' % u'<span class="sep"> &raquo; </span>'.join(items)
        return html
コード例 #13
0
ファイル: opencollab.py プロジェクト: execgit/graphingwiki
    def breadcrumbs(self, d, is_footer=False):
        request = self.request
        _ = request.getText
        user = request.user

        if not user.valid:
            return ""

        items = []

        if user.show_page_trail:
            trail = user.getTrail()
            if trail:
                for pagename in trail[::-1]:
                    try:
                        interwiki, page = wikiutil.split_interwiki(pagename)
                        if interwiki != request.cfg.interwikiname and interwiki != 'Self':
                            link = (
                                self.request.formatter.interwikilink(
                                    True, interwiki, page) +
                                self.shortenPagename(page) +
                                self.request.formatter.interwikilink(
                                    False, interwiki, page))
                            items.append(link)
                            continue
                        else:
                            pagename = page
                    except ValueError:
                        pass
                    page = Page(request, pagename)
                    title = page.split_title()
                    title = self.shortenPagename(title)
                    link = page.link_to(request, title)
                    items.append(link)

        val = """  <div class="navbar breadcrumb">
    <ul class="breadcrumb navbar-left">"""
        for i, item in enumerate(items):
            if i == 0:
                val += '\n      <li class="active">%s</li>' % item
            else:
                val += '\n      <li>%s</li>' % item

        val += '\n    </ul>\n    <ul class="breadcrumb navbar-right">'
        actions = getattr(request.cfg, 'bootstrap_actions',
                          self.BREADCRUMB_ACTIONS)
        for i, act in enumerate(actions):
            if act[0].isupper() and not act in self.available_actions:
                continue
            val += '\n      <li><a href="?action=%s%s">%s</a></li>' % (act, self.rev, self._actiontitle(act))

        val += u"""
      <li class="toggleCommentsButton" style="display:none;">
          <a href="#" class="nbcomment" onClick="toggleComments(); return false;">%s</a>
      </li>
        %s
    </ul>
  </div>""" % (_('Comments'), self.actionsmenu(d, is_footer))

        return val
コード例 #14
0
    def general(page, pagename, request):
        _ = request.getText
        f = request.formatter

        request.write(f.heading(1, 1), f.text(_('General Information')),
                      f.heading(0, 1))

        request.write(f.paragraph(1), f.text(_("Page size: %d") % page.size()),
                      f.paragraph(0))

        from MoinMoin.support.python_compatibility import hash_new
        digest = hash_new('sha1',
                          page.get_raw_body().encode(
                              config.charset)).hexdigest().upper()
        request.write(
            f.paragraph(1),
            f.rawHTML(
                '%(label)s <tt>%(value)s</tt>' % {
                    'label': _("SHA digest of this page's content is:"),
                    'value': digest,
                }), f.paragraph(0))

        # show attachments (if allowed)
        attachment_info = action.getHandler(request, 'AttachFile', 'info')
        if attachment_info:
            request.write(attachment_info(pagename, request))

        # show subscribers
        subscribers = page.getSubscribers(request,
                                          include_self=1,
                                          return_users=1)
        if subscribers:
            request.write(f.paragraph(1))
            request.write(
                f.text(_('The following users subscribed to this page:')))
            for lang in subscribers:
                request.write(f.linebreak(), f.text('[%s] ' % lang))
                for user in subscribers[lang]:
                    # do NOT disclose email addr, only WikiName
                    userhomepage = Page(request, user.name)
                    if userhomepage.exists():
                        request.write(
                            f.rawHTML(userhomepage.link_to(request) + ' '))
                    else:
                        request.write(f.text(user.name + ' '))
            request.write(f.paragraph(0))

        # show links
        links = page.getPageLinks(request)
        if links:
            request.write(f.paragraph(1))
            request.write(f.text(_('This page links to the following pages:')))
            request.write(f.linebreak())
            for linkedpage in links:
                request.write(
                    f.rawHTML("%s%s " %
                              (Page(request, linkedpage).link_to(request),
                               ",."[linkedpage == links[-1]])))
            request.write(f.paragraph(0))
コード例 #15
0
def execute(pagename, request):
    enter_page(request, pagename, "Group Editor")
    macro = wikiutil.importPlugin(request.cfg, "macro", "GroupEditor",
                                  "do_macro")
    request.write(macro(request))
    page = Page(request, pagename)
    request.write(page.link_to(request, text=request.getText("back")))
    exit_page(request, pagename)
コード例 #16
0
ファイル: text_gedit.py プロジェクト: tanghejun/volunpedia
    def pagelink(self, on, pagename='', page=None, **kw):
        """ Link to a page.

            formatter.text_python will use an optimized call with a page!=None
            parameter. DO NOT USE THIS YOURSELF OR IT WILL BREAK.

            See wikiutil.link_tag() for possible keyword parameters.
        """
        FormatterBase.pagelink(self, on, pagename, page, **kw)
        if page is None:
            page = Page(self.request, pagename, formatter=self)
        return page.link_to(self.request, on=on, **kw)
コード例 #17
0
ファイル: text_gedit.py プロジェクト: IvanLogvinov/soar
    def pagelink(self, on, pagename='', page=None, **kw):
        """ Link to a page.

            formatter.text_python will use an optimized call with a page!=None
            parameter. DO NOT USE THIS YOURSELF OR IT WILL BREAK.

            See wikiutil.link_tag() for possible keyword parameters.
        """
        FormatterBase.pagelink(self, on, pagename, page, **kw)
        if page is None:
            page = Page(self.request, pagename, formatter=self)
        return page.link_to(self.request, on=on, **kw)
コード例 #18
0
    def splitNavilink(self, text, localize=1):
        """ Split navibar links into pagename, link to page

        Admin or user might want to use shorter navibar items by using
        the [page title] or [url title] syntax. In this case, we don't
        use localization, and the links goes to page or to the url, not
        the localized version of page.

        @param text: the text used in config or user preferences
        @rtype: tuple
        @return: pagename or url, link to page or url
        """
        request = self.request
        
        # Handle [pagename title] or [url title] formats
        if text.startswith('[') and text.endswith(']'):
            try:
                pagename, title = text[1:-1].strip().split(' ', 1)
                title = title.strip()
            except (ValueError, TypeError):
                # Just use the text as is.
                pagename = title = text

            # Handle [url title] format
            from MoinMoin import config
            for scheme in self.linkSchemas:
                if pagename.startswith(scheme):
                    title = wikiutil.escape(title)
                    link = '<a href="%s">%s</a>' % (pagename, title)
                    break

            # Handle [pagename title] format
            else:
                # Normalize page names, replace '_' with ' '. Usually
                # all names use spaces internally, but for
                # [name_with_spaces label] we must save the underscores
                # until this point.
                pagename = request.normalizePagename(pagename)
                link = Page(request, pagename).link_to(request, title)

        # Handle regular pagename like "FrontPage"
        else:
            # Use localized pages for the current user
            if localize:
                page = wikiutil.getSysPage(request, text)
            else:
                page = Page(request, text)
            pagename = page.page_name
            title = page.split_title(request)
            title = self.shortenPagename(title)
            link = page.link_to(request, title)

        return pagename, link
コード例 #19
0
ファイル: text_html.py プロジェクト: moinwiki/moin-legacy
    def pagelink(self, on, pagename='', page=None, **kw):
        """ Link to a page.

            formatter.text_python will use an optimized call with a page!=None
            parameter. DO NOT USE THIS YOURSELF OR IT WILL BREAK.

            See wikiutil.link_tag() for possible keyword parameters.
        """
        apply(FormatterBase.pagelink, (self, on, pagename, page), kw)
        if page is None:
            page = Page(self.request, pagename, formatter=self);
            
        if self.request.user.show_nonexist_qm and on and not page.exists():
            self.pagelink_preclosed = True
            return (page.link_to(self.request, on=1, **kw) +
                    self.text("?") +
                    page.link_to(self.request, on=0, **kw))
        elif not on and self.pagelink_preclosed:
            self.pagelink_preclosed = False
            return ""
        else:
            return page.link_to(self.request, on=on, **kw)
コード例 #20
0
ファイル: info.py プロジェクト: steveyen/moingo
    def general(page, pagename, request):
        _ = request.getText
        f = request.formatter

        request.write(f.heading(1, 1),
                      f.text(_('General Information')),
                      f.heading(0, 1))

        request.write(f.paragraph(1),
                      f.text(_("Page size: %d") % page.size()),
                      f.paragraph(0))

        from MoinMoin.support.python_compatibility import hash_new
        digest = hash_new('sha1', page.get_raw_body().encode(config.charset)).hexdigest().upper()
        request.write(f.paragraph(1),
                      f.rawHTML('%(label)s <tt>%(value)s</tt>' % {
                          'label': _("SHA digest of this page's content is:"),
                          'value': digest, }),
                      f.paragraph(0))

        # show attachments (if allowed)
        attachment_info = action.getHandler(request, 'AttachFile', 'info')
        if attachment_info:
            request.write(attachment_info(pagename, request))

        # show subscribers
        subscribers = page.getSubscribers(request, include_self=1, return_users=1)
        if subscribers:
            request.write(f.paragraph(1))
            request.write(f.text(_('The following users subscribed to this page:')))
            for lang in subscribers:
                request.write(f.linebreak(), f.text('[%s] ' % lang))
                for user in subscribers[lang]:
                    # do NOT disclose email addr, only WikiName
                    userhomepage = Page(request, user.name)
                    if userhomepage.exists():
                        request.write(f.rawHTML(userhomepage.link_to(request) + ' '))
                    else:
                        request.write(f.text(user.name + ' '))
            request.write(f.paragraph(0))

        # show links
        links = page.getPageLinks(request)
        if links:
            request.write(f.paragraph(1))
            request.write(f.text(_('This page links to the following pages:')))
            request.write(f.linebreak())
            for linkedpage in links:
                request.write(f.rawHTML("%s%s " % (Page(request, linkedpage).link_to(request), ",."[linkedpage == links[-1]])))
            request.write(f.paragraph(0))
コード例 #21
0
    def trail(self, d):
        """ Assemble page trail

        @param d: parameter dictionary
        @rtype: unicode
        @return: trail html
        """
        _ = self.request.getText
        request = self.request
        user = request.user
        html = u''
        li = u'                <li>%s</li>'

        if not user.valid or user.show_page_trail:
            trail = user.getTrail()
            if trail:
                items = []
                for pagename in trail:
                    try:
                        interwiki, page = wikiutil.split_interwiki(pagename)
                        if interwiki != request.cfg.interwikiname and interwiki != 'Self':
                            link = (self.request.formatter.interwikilink(True, interwiki, page) +
                                    self.shortenPagename(page) +
                                    self.request.formatter.interwikilink(False, interwiki, page))
                            items.append(li % link)
                            continue
                        else:
                            pagename = page

                    except ValueError:
                        pass
                    page = Page(request, pagename)
                    title = page.split_title()
                    title = self.shortenPagename(title)
                    link = page.link_to(request, title)
                    items.append(li % link)

                html = u'''
            <div id="pagetrail">
              <h4>%s</h4>
              <ul>
%s
              </ul>
            </div>
''' % (_('Trail'), u'\n'.join(items))

        return html
コード例 #22
0
    def navibar(self, d):
        request = self.request
        found = {}  # pages we found. prevent duplicates
        items = []  # navibar items
        item = u'<li class="%s">%s</li>'
        current = d['page_name']

        pagenames = [
            u'志愿产品', u'服务产品', u'视听产品', u'实体产品', u'志愿组织', u'公益机构', u'企业志愿组织',
            u'学生志愿组织', u'志愿服务基地', u'应用工具'
        ]
        for pagename in pagenames:
            pagegroup = None

            if pagename == u'志愿产品':
                pagegroup = 'volunpedia_product_pages'
            elif pagename == u'志愿组织':
                pagegroup = 'volunpedia_organization_pages'

            if pagename != u'志愿产品' and pagename != u'志愿组织':
                page = Page(request, pagename)
                title = page.split_title()
                title = self.shortenPagename(title)
                link = page.link_to(request, title)
                cls = 'userlink'
                items.append(item % (cls, link))
                if pagename == u'实体产品' or pagename == u'学生志愿组织':
                    items.append(u'</ul></li>')
                found[pagename] = 1
            else:
                cls = 'userlink'
                link = u'<a href="javascript:;" onclick="var t = document.getElementById(\'' + pagegroup + u'_menu' + u'\'); if(t.style.display == \'none\') {t.style.display = \'\';} else {t.style.display = \'none\';}">' + pagename + u'</a>'
                items.append(item % (cls, link))
                items.append(
                    u'<li id="' + pagegroup + u'_menu' +
                    u'" class="userlink"><ul style="padding-left:2.5em">')

        # Assemble html
        items = u''.join(items)
        html = u'''
<ul id="navibar">
%s
</ul>
''' % items
        return html
コード例 #23
0
ファイル: explorer.py プロジェクト: 130s/roswiki
 def parent_categories_html(self, d):
     html = u''
     request = self.request
     # Get list of categories the page belongs to
     categories = d["page"].getCategories(request)
     if categories:
         items = [u'<ul id="parents">']
         for category in categories:
             page = Page(request, category)
             if self.is_moin_1_5:
                 title = page.split_title(request)  # Moin 1.5.x
             else:
                 title = page.split_title()  # Moin 1.6
             link = page.link_to(request, title)
             items.append('<li>%s</li>' % link)
         items.append(u'</ul>')
         html = '\n'.join(items)
     return html
コード例 #24
0
ファイル: explorer.py プロジェクト: kuj1pal/roswiki
 def parent_categories_html(self, d):
     html = u''
     request = self.request
     # Get list of categories the page belongs to
     categories = d["page"].getCategories(request)
     if categories:
         items = [u'<ul id="parents">']
         for category in categories:
             page = Page(request, category)
             if self.is_moin_1_5:
                 title = page.split_title(request)  # Moin 1.5.x
             else:
                 title = page.split_title()  # Moin 1.6
             link = page.link_to(request, title)
             items.append('<li>%s</li>' % link)
         items.append(u'</ul>')
         html = '\n'.join(items)
     return html
コード例 #25
0
    def general(page, pagename, request):
        _ = request.getText

        request.write('<h2>%s</h2>\n' % _('General Information'))
        
        # show page size
        request.write(("<p>%s</p>" % _("Page size: %d")) % page.size())

        # show SHA digest fingerprint
        import sha
        digest = sha.new(page.get_raw_body().encode(config.charset)).hexdigest().upper()
        request.write('<p>%(label)s <tt>%(value)s</tt></p>' % {
            'label': _("SHA digest of this page's content is:"),
            'value': digest,
            })

        # show attachments (if allowed)
        attachment_info = getHandler(request, 'AttachFile', 'info')
        if attachment_info:
            request.write(attachment_info(pagename, request))

        # show subscribers
        subscribers = page.getSubscribers(request,  include_self=1, return_users=1)
        if subscribers:
            request.write('<p>', _('The following users subscribed to this page:'))
            for lang in subscribers.keys():
                request.write('<br>[%s] ' % lang)
                for user in subscribers[lang]:
                    # do NOT disclose email addr, only WikiName
                    userhomepage = Page(request, user.name)
                    if userhomepage.exists():
                        request.write(userhomepage.link_to(request) + ' ')
                    else:
                        request.write(user.name + ' ')
            request.write('</p>')

        # show links
        links = page.getPageLinks(request)
        if links:
            request.write('<p>', _('This page links to the following pages:'), '<br>')
            for linkedpage in links:
                request.write("%s%s " % (Page(request, linkedpage).link_to(request), ",."[linkedpage == links[-1]]))
            request.write("</p>")
コード例 #26
0
ファイル: simplemente.py プロジェクト: Glottotopia/aagd
    def trail(self, d):
        """ Assemble page trail
        
        Changed: name attrib added for accesskeys
        """
        request = self.request
        user = request.user
        html = ''
        if user.valid and user.show_page_trail:
            trail = user.getTrail()
            if trail:
                items = []
                i = -1
                for pagename in trail:
                    i = i + 1
                    trail_number = "trail%02d" % i
                    try:
                        interwiki, page = pagename.split(":", 1)
                        if request.cfg.interwikiname != interwiki:
                            link = (self.request.formatter.interwikilink(True, interwiki, page, name=trail_number) +
                                    self.shortenPagename(page) +
                                    self.request.formatter.interwikilink(False, interwiki, page))
                            items.append('<li>%s</li>' % link)
                            continue
                        else:
                            pagename = page

                    except ValueError:
                        pass
                    page = Page(request, pagename)
                    # Try..except for backwards compatibility of Moin versions only
                    try:
                        title = page.split_title()
                    except:
                        title = page.split_title(request)
                    title = self.shortenPagename(title)
                    link = page.link_to(request, title, name=trail_number)
                    items.append('<li>%s</li>' % link)
                html = '''
<ul id="pagetrail">
%s
</ul>''' % ''.join(items)
        return html
コード例 #27
0
def do_user_browser(request):
    """ Browser for SystemAdmin macro. """
    from MoinMoin.util.dataset import TupleDataset, Column
    from MoinMoin.Page import Page
    _ = request.getText

    data = TupleDataset()
    data.columns = [
        #Column('id', label=('ID'), align='right'),
        Column('name', label=('Username')),
        Column('email', label=('Email')),
        #Column('action', label=_('Action')),
    ]

    # Iterate over users
    for uid in user.getUserList(request):
        account = user.User(request, uid)

        userhomepage = Page(request, account.name)
        if userhomepage.exists():
            namelink = userhomepage.link_to(request)
        else:
            namelink = account.name

        data.addRow((
            #request.formatter.code(1) + uid + request.formatter.code(0),
            request.formatter.rawHTML(namelink),
            (request.formatter.url(1, 'mailto:' + account.email, 'external', pretty_url=1, unescaped=1) +
             request.formatter.text(account.email) +
             request.formatter.url(0)),
            #'',
        ))

    if data:
        from MoinMoin.widget.browser import DataBrowserWidget

        browser = DataBrowserWidget(request)
        browser.setData(data)
        return browser.toHTML()

    # No data
    return ''
コード例 #28
0
ファイル: BriefRecentChanges.py プロジェクト: wada314/gswiki
def format_page_edits(macro, line):
    request = macro.request
    _ = request.getText
    pagename = line.pagename
    rev = int(line.rev)
    page = Page(request, pagename)

    html_link = u''
    if not page.exists():
        revbefore = rev - 1
        if revbefore and page.exists(rev=revbefore, domain='standard'):
            # indicate page was deleted and show diff to last existing revision of it
            html_link = page.link_to_raw(request, u'(削除) %s' % pagename,
                                         querystr={'action': 'diff'}, rel='nofollow')
        else:
            html_link = pagename
    else:
        html_link = page.link_to(request, pagename, rel='nofollow')

    return u'<li>%s</li>' % html_link
コード例 #29
0
ファイル: __init__.py プロジェクト: imosts/flume
    def trail(self, d):
        """ Assemble page trail
        
        @param d: parameter dictionary
        @rtype: unicode
        @return: trail html
        """
        request = self.request
        user = request.user
        html = ''
        if user.valid and user.show_page_trail:
            trail = user.getTrail()
            if trail:
                items = []
                for pagename in trail:
                    try:
                        interwiki, page = pagename.split(":", 1)
                        # Items in trail are saved as valid interwiki
                        # links, using _ for spaces.
                        page = page.replace('_', ' ')
                        if request.cfg.interwikiname != interwiki:
                            link = (self.request.formatter.interwikilink(True, interwiki, page) +
                                    self.shortenPagename(page) +
                                    self.request.formatter.interwikilink(False, interwiki, page))
                            items.append('<li>%s</li>' % link)
                            continue
                        else:
                            pagename = page
                            
                    except ValueError:
                        pass
                    page = Page(request, pagename)
                    title = page.split_title(request)
                    title = self.shortenPagename(title)
                    link = page.link_to(request, title)
                    items.append('<li>%s</li>' % link)
                html = '''
<ul id="pagetrail">
%s
</ul>''' % ''.join(items)
        return html
コード例 #30
0
def execute(pagename, request):
    """ show misc. infos about a page """
    if not request.user.may.read(pagename):
        Page(request, pagename).send_page()
        return

    def general(page, pagename, request):
        _ = request.getText
        f = request.formatter

        request.write(f.heading(1, 1),
                      f.text(_('General Information')),
                      f.heading(0, 1))

        request.write(f.paragraph(1),
                      f.text(_("Page size: %d") % page.size()),
                      f.paragraph(0))

        digest = hashlib.new('sha1', page.get_raw_body().encode(config.charset)).hexdigest().upper()
        request.write(f.paragraph(1),
                      f.rawHTML('%(label)s <tt>%(value)s</tt>' % {
                          'label': _("SHA digest of this page's content is:"),
                          'value': digest, }),
                      f.paragraph(0))

        # show attachments (if allowed)
        attachment_info = action.getHandler(request, 'AttachFile', 'info')
        if attachment_info:
            request.write(attachment_info(pagename, request))

        # show subscribers
        subscribers = page.getSubscribers(request, include_self=1, return_users=1)
        if subscribers:
            request.write(f.paragraph(1))
            request.write(f.text(_('The following users subscribed to this page:')))
            for lang in subscribers:
                request.write(f.linebreak(), f.text('[%s] ' % lang))
                for user in subscribers[lang]:
                    # do NOT disclose email addr, only WikiName
                    userhomepage = Page(request, user.name)
                    if userhomepage.exists():
                        request.write(f.rawHTML(userhomepage.link_to(request) + ' '))
                    else:
                        request.write(f.text(user.name + ' '))
            request.write(f.paragraph(0))

        # show links
        links = page.getPageLinks(request)
        if links:
            request.write(f.paragraph(1))
            request.write(f.text(_('This page links to the following pages:')))
            request.write(f.linebreak())
            for linkedpage in links:
                request.write(f.rawHTML("%s%s " % (Page(request, linkedpage).link_to(request), ",."[linkedpage == links[-1]])))
            request.write(f.paragraph(0))

    def history(page, pagename, request):
        # show history as default
        _ = request.getText
        default_count, limit_max_count = request.cfg.history_count[0:2]
        paging = request.cfg.history_paging

        try:
            max_count = int(request.values.get('max_count', default_count))
        except ValueError:
            max_count = default_count
        max_count = max(1, min(max_count, limit_max_count))

        # read in the complete log of this page
        log = editlog.EditLog(request, rootpagename=pagename)

        offset = 0
        paging_info_html = ""
        paging_nav_html = ""
        count_select_html = ""

        f = request.formatter

        if paging:
            log_size = log.lines()

            try:
                offset = int(request.values.get('offset', 0))
            except ValueError:
                offset = 0
            offset = max(min(offset, log_size - 1), 0)

            paging_info_html += f.paragraph(1, css_class="searchstats info-paging-info") + _("Showing page edit history entries from '''%(start_offset)d''' to '''%(end_offset)d''' out of '''%(total_count)d''' entries total.", wiki=True) % {
                'start_offset': log_size - min(log_size, offset + max_count) + 1,
                'end_offset': log_size - offset,
                'total_count': log_size,
            } + f.paragraph(0)

            # generating offset navigating links
            if max_count < log_size or offset != 0:
                offset_links = []
                cur_offset = max_count
                near_count = 5 # request.cfg.pagination_size

                min_offset = max(0, (offset + max_count - 1) / max_count - near_count)
                max_offset = min((log_size - 1) / max_count, offset / max_count + near_count)
                offset_added = False

                def add_offset_link(offset, caption=None):
                    offset_links.append(f.table_cell(1, css_class="info-offset-item") +
                        page.link_to(request, on=1, querystr={
                            'action': 'info',
                            'offset': str(offset),
                            'max_count': str(max_count),
                            }, css_class="info-offset-nav-link", rel="nofollow") + f.text(caption or str(log_size - offset)) + page.link_to(request, on=0) +
                        f.table_cell(0)
                    )

                # link to previous page - only if not at start
                if offset > 0:
                    add_offset_link(((offset - 1) / max_count) * max_count, _("Newer"))

                # link to beggining of event log - if min_offset is not minimal
                if min_offset > 0:
                    add_offset_link(0)
                    # adding gap only if min_offset not explicitly following beginning
                    if min_offset > 1:
                        offset_links.append(f.table_cell(1, css_class="info-offset-gap") + f.text(u'\u2026') + f.table_cell(0))

                # generating near pages links
                for cur_offset in range(min_offset, max_offset + 1):
                    # note that current offset may be not multiple of max_count,
                    # so we check whether we should add current offset marker like this
                    if not offset_added and offset <= cur_offset * max_count:
                        # current info history view offset
                        offset_links.append(f.table_cell(1, css_class="info-offset-item info-cur-offset") + f.text(str(log_size - offset)) + f.table_cell(0))
                        offset_added = True

                    # add link, if not at this offset
                    if offset != cur_offset * max_count:
                        add_offset_link(cur_offset * max_count)

                # link to the last page of event log
                if max_offset < (log_size - 1) / max_count:
                    if max_offset < (log_size - 1) / max_count - 1:
                        offset_links.append(f.table_cell(1, css_class="info-offset-gap") + f.text(u'\u2026') + f.table_cell(0))
                    add_offset_link(((log_size - 1) / max_count) * max_count)

                # special case - if offset is greater than max_offset * max_count
                if offset > max_offset * max_count:
                    offset_links.append(f.table_cell(1, css_class="info-offset-item info-cur-offset") + f.text(str(log_size - offset)) + f.table_cell(0))

                # link to next page
                if offset < (log_size - max_count):
                    add_offset_link(((offset + max_count) / max_count) * max_count, _("Older"))

                # generating html
                paging_nav_html += "".join([
                    f.table(1, css_class="searchpages"),
                    f.table_row(1),
                    "".join(offset_links),
                    f.table_row(0),
                    f.table(0),
                ])

        # generating max_count switcher
        # we do it only in case history_count has additional values
        if len(request.cfg.history_count) > 2:
            max_count_possibilities = list(set(request.cfg.history_count))
            max_count_possibilities.sort()
            max_count_html = []
            cur_count_added = False


            for count in max_count_possibilities:
                # max count value can be not in list of predefined values
                if max_count <= count and not cur_count_added:
                    max_count_html.append("".join([
                        f.span(1, css_class="info-count-item info-cur-count"),
                        f.text(str(max_count)),
                        f.span(0),
                    ]))
                    cur_count_added = True

                # checking for limit_max_count to prevent showing unavailable options
                if max_count != count and count <= limit_max_count:
                    max_count_html.append("".join([
                        f.span(1, css_class="info-count-item"),
                        page.link_to(request, on=1, querystr={
                            'action': 'info',
                            'offset': str(offset),
                            'max_count': str(count),
                            }, css_class="info-count-link", rel="nofollow"),
                        f.text(str(count)),
                        page.link_to(request, on=0),
                        f.span(0),
                    ]))

            count_select_html += "".join([
                f.span(1, css_class="info-count-selector"),
                    f.text(" ("),
                    f.text(_("%s items per page")) % (f.span(1, css_class="info-count-selector info-count-selector-divider") + f.text(" | ") + f.span(0)).join(max_count_html),
                    f.text(")"),
                f.span(0),
            ])

        # open log for this page
        from MoinMoin.util.dataset import TupleDataset, Column

        history = TupleDataset()
        history.columns = [
            Column('rev', label='#', align='right'),
            Column('mtime', label=_('Date'), align='right'),
            Column('size', label=_('Size'), align='right'),
            Column('diff', label='<input type="submit" value="%s">' % (_("Diff"))),
            Column('editor', label=_('Editor'), hidden=not request.cfg.show_names),
            Column('comment', label=_('Comment')),
            Column('action', label=_('Action')),
            ]

        # generate history list

        def render_action(text, query, **kw):
            kw.update(dict(rel='nofollow'))
            return page.link_to(request, text, querystr=query, **kw)

        def render_file_action(text, pagename, filename, request, do):
            url = AttachFile.getAttachUrl(pagename, filename, request, do=do)
            if url:
                f = request.formatter
                link = f.url(1, url) + f.text(text) + f.url(0)
                return link

        may_write = request.user.may.write(pagename)
        may_delete = request.user.may.delete(pagename)

        count = 0
        pgactioncount = 0
        for line in log.reverse():
            count += 1

            if paging and count <= offset:
                continue

            rev = int(line.rev)
            actions = []
            if line.action in ('SAVE', 'SAVENEW', 'SAVE/REVERT', 'SAVE/RENAME', ):
                size = page.size(rev=rev)
                actions.append(render_action(_('view'), {'action': 'recall', 'rev': '%d' % rev}))
                if pgactioncount == 0:
                    rchecked = ' checked="checked"'
                    lchecked = ''
                elif pgactioncount == 1:
                    lchecked = ' checked="checked"'
                    rchecked = ''
                else:
                    lchecked = rchecked = ''
                diff = '<input type="radio" name="rev1" value="%d"%s><input type="radio" name="rev2" value="%d"%s>' % (rev, lchecked, rev, rchecked)
                if rev > 1:
                    diff += render_action(' ' + _('to previous'), {'action': 'diff', 'rev1': rev-1, 'rev2': rev})
                comment = line.comment
                if not comment:
                    if '/REVERT' in line.action:
                        comment = _("Revert to revision %(rev)d.") % {'rev': int(line.extra)}
                    elif '/RENAME' in line.action:
                        comment = _("Renamed from '%(oldpagename)s'.") % {'oldpagename': line.extra}
                pgactioncount += 1
            else: # ATT*
                rev = '-'
                diff = '-'

                filename = wikiutil.url_unquote(line.extra)
                comment = "%s: %s %s" % (line.action, filename, line.comment)
                if AttachFile.exists(request, pagename, filename):
                    size = AttachFile.size(request, pagename, filename)
                    actions.append(render_file_action(_('view'), pagename, filename, request, do='view'))
                    actions.append(render_file_action(_('get'), pagename, filename, request, do='get'))
                    if may_delete:
                        actions.append(render_file_action(_('del'), pagename, filename, request, do='del'))
                    if may_write:
                        actions.append(render_file_action(_('edit'), pagename, filename, request, do='modify'))
                else:
                    size = 0

            history.addRow((
                rev,
                request.user.getFormattedDateTime(wikiutil.version2timestamp(line.ed_time_usecs)),
                str(size),
                diff,
                line.getEditor(request) or _("N/A"),
                wikiutil.escape(comment) or '&nbsp;',
                "&nbsp;".join(a for a in actions if a),
            ))
            if (count >= max_count + offset) or (paging and count >= log_size):
                break

        # print version history
        from MoinMoin.widget.browser import DataBrowserWidget

        request.write(unicode(html.H2().append(_('Revision History'))))

        if not count: # there was no entry in logfile
            request.write(_('No log entries found.'))
            return

        history_table = DataBrowserWidget(request)
        history_table.setData(history)

        div = html.DIV(id="page-history")
        div.append(html.INPUT(type="hidden", name="action", value="diff"))
        div.append(history_table.render(method="GET"))

        form = html.FORM(method="GET", action="")
        if paging:
            form.append(f.div(1, css_class="info-paging-info") + paging_info_html + count_select_html + f.div(0))
            form.append("".join([
                f.div(1, css_class="info-paging-nav info-paging-nav-top"),
                paging_nav_html,
                f.div(0),
            ]))
        form.append(div)
        if paging:
            form.append("".join([
                f.div(1, css_class="info-paging-nav info-paging-nav-bottom"),
                paging_nav_html,
                f.div(0)
            ]))
        request.write(unicode(form))

    # main function
    _ = request.getText
    page = Page(request, pagename)
    title = page.split_title()

    request.setContentLanguage(request.lang)
    f = request.formatter

    request.theme.send_title(_('Info for "%s"') % (title, ), page=page)
    menu_items = [
        (_('Show "%(title)s"') % {'title': _('Revision History')},
         {'action': 'info'}),
        (_('Show "%(title)s"') % {'title': _('General Page Infos')},
         {'action': 'info', 'general': '1'}),
        (_('Show "%(title)s"') % {'title': _('Page hits and edits')},
         {'action': 'info', 'hitcounts': '1'}),
    ]
    request.write(f.div(1, id="content")) # start content div
    request.write(f.paragraph(1))
    for text, querystr in menu_items:
        request.write("[%s] " % page.link_to(request, text=text, querystr=querystr, rel='nofollow'))
    request.write(f.paragraph(0))

    show_hitcounts = int(request.values.get('hitcounts', 0)) != 0
    show_general = int(request.values.get('general', 0)) != 0

    if show_hitcounts:
        from MoinMoin.stats import hitcounts
        request.write(hitcounts.linkto(pagename, request, 'page=' + wikiutil.url_quote(pagename)))
    elif show_general:
        general(page, pagename, request)
    else:
        history(page, pagename, request)

    request.write(f.div(0)) # end content div
    request.theme.send_footer(pagename)
    request.theme.send_closing_html()
コード例 #31
0
ファイル: simplemente.py プロジェクト: Glottotopia/aagd
    def splitNavilink(self, text, localize=1):
        """ Split navibar links into pagename, link to page

        Changed: name attrib added for accesskeys
        """
        request = self.request

        # Handle [pagename title] or [url title] formats
        if text.startswith('[') and text.endswith(']'):
            try:
                pagename, title = text[1:-1].strip().split(' ', 1)
                title = title.strip()
                localize = 0
            except (ValueError, TypeError):
                # Just use the text as is.
                pagename = title = text

        # Handle regular pagename like "FrontPage"
        else:
            # Use localized pages for the current user
            if localize:
                # Try..except for backwards compatibility of Moin versions only
                try:
                    page = wikiutil.getSysPage(request, text)
                except:
                    page = wikiutil.getLocalizedPage(request, text)
            else:
                page = Page(request, text)
            pagename = page.page_name
            # Try..except for backwards compatibility of Moin versions only
            try: 
                title = page.split_title()
            except:
                title = page.split_title(request)
            title = self.shortenPagename(title)
            link = page.link_to(request, title, name=title)


        from MoinMoin import config
        for scheme in config.url_schemas:
            if pagename.startswith(scheme):
                title = wikiutil.escape(title)
                link = self.request.formatter.url(1, pagename, name=title) + \
                       self.request.formatter.text(title) +\
                       self.request.formatter.url(0)
                return pagename, link

        # remove wiki: url prefix
        if pagename.startswith("wiki:"):
            pagename = pagename[5:]

        # try handling interwiki links
        try:
            interwiki, page = pagename.split(':', 1)
            thiswiki = request.cfg.interwikiname
            if interwiki == thiswiki:
                pagename = page
                title = page
            else:
                return (pagename,
                        self.request.formatter.interwikilink(True, interwiki, page, name=page) +
                        page +
                        self.request.formatter.interwikilink(False, interwiki, page)
                        )

        except ValueError:
            pass

        pagename = request.normalizePagename(pagename)
        link = Page(request, pagename).link_to(request, title, name=title)

        return pagename, link
コード例 #32
0
def do_user_browser(request):
    """ Browser for SystemAdmin macro. """
    _ = request.getText
    groups = request.groups

    data = TupleDataset()
    data.columns = [
        Column('name', label=_('Username')),
        Column('groups', label=_('Member of Groups')),
        Column('email', label=_('Email')),
        Column('jabber', label=_('Jabber')),
        Column('action', label=_('Action')),
    ]

    # Iterate over users
    for uid in user.getUserList(request):
        account = user.User(request, uid)

        account_groups = set(groups.groups_with_member(account.name))
        wiki_groups = set([group for group in account_groups if isinstance(groups[group], WikiGroup)])
        other_groups = list(account_groups - wiki_groups)

        # First show groups that are defined in wikipages linking to it
        # after show groups from other backends.
        grouppage_links = ', '.join([Page(request, group_name).link_to(request) for group_name in wiki_groups] +
                                    other_groups)

        userhomepage = Page(request, account.name)
        if userhomepage.exists():
            namelink = userhomepage.link_to(request)
        else:
            namelink = wikiutil.escape(account.name)

        # creates the POST data for account disable/enable
        val = "1"
        text=_('Disable user')
        if account.disabled:
            text=_('Enable user')
            val = "0"
            namelink += " (%s)" % _("disabled")

        url = request.page.url(request)
        ret = html.FORM(action=url)
        ret.append(html.INPUT(type='hidden', name='action', value='userprofile'))
        ticket = wikiutil.createTicket(request, action='userprofile')
        ret.append(html.INPUT(type="hidden", name="ticket", value="%s" % ticket))
        ret.append(html.INPUT(type='hidden', name='name', value=account.name))
        ret.append(html.INPUT(type='hidden', name='key', value="disabled"))
        ret.append(html.INPUT(type='hidden', name='val', value=val))
        ret.append(html.INPUT(type='submit', name='userprofile', value=text))
        enable_disable_link = unicode(unicode(ret))

        # creates the POST data for recoverpass
        url = request.page.url(request)
        ret = html.FORM(action=url)
        ret.append(html.INPUT(type='hidden', name='action', value='recoverpass'))
        ret.append(html.INPUT(type='hidden', name='email', value=account.email))
        ret.append(html.INPUT(type='hidden', name='account_sendmail', value="1"))
        ret.append(html.INPUT(type='hidden', name='sysadm', value="users"))
        ret.append(html.INPUT(type='submit', name='recoverpass', value=_('Mail account data')))
        recoverpass_link =  unicode(unicode(ret))

        if account.email:
            email_link = (request.formatter.url(1, 'mailto:' + account.email, css='mailto') +
                          request.formatter.text(account.email) +
                          request.formatter.url(0))
        else:
            email_link = ''

        if account.jid:
            jabber_link = (request.formatter.url(1, 'xmpp:' + account.jid, css='mailto') +
                           request.formatter.text(account.jid) +
                           request.formatter.url(0))
        else:
            jabber_link = ''

        data.addRow((
            (request.formatter.rawHTML(namelink), account.name),
            request.formatter.rawHTML(grouppage_links),
            email_link,
            jabber_link,
            recoverpass_link + enable_disable_link
        ))

    if data:
        from MoinMoin.widget.browser import DataBrowserWidget

        browser = DataBrowserWidget(request)
        browser.setData(data, sort_columns=[0])
        return browser.render()

    # No data
    return ''
コード例 #33
0
ファイル: Include.py プロジェクト: mikejamesthompson/orgsites
def execute(macro, text, args_re=re.compile(_args_re_pattern), title_re=re.compile(_title_re, re.M), called_by_toc=0):
    request = macro.request
    _ = request.getText

    # return immediately if getting links for the current page
    if request.mode_getpagelinks:
        return ''

    # parse and check arguments
    args = args_re.match(text)
    if not args:
        return (_sysmsg % ('error', _('Invalid include arguments "%s"!')) % (text,))

    # prepare including page
    result = []
    print_mode = macro.form.has_key('action') and macro.form['action'][0] == "print"
    this_page = macro.formatter.page
    if not hasattr(this_page, '_macroInclude_pagelist'):
        this_page._macroInclude_pagelist = {}

    # get list of pages to include
    inc_name = wikiutil.AbsPageName(request, this_page.page_name, args.group('name'))
    pagelist = [inc_name]
    if inc_name.startswith("^"):
        try:
            inc_match = re.compile(inc_name)
        except re.error:
            pass # treat as plain page name
        else:
            # Get user filtered readable page list
            pagelist = request.rootpage.getPageList(filter=inc_match.match)

    # sort and limit page list
    pagelist.sort()
    sort_dir = args.group('sort')
    if sort_dir == 'descending':
        pagelist.reverse()
    max_items = args.group('items')
    if max_items:
        pagelist = pagelist[:int(max_items)]

    skipitems = 0
    if args.group("skipitems"):
        skipitems = int(args.group("skipitems"))
    titlesonly = args.group('titlesonly')
    editlink = args.group('editlink')

    # iterate over pages
    for inc_name in pagelist:
        if not request.user.may.read(inc_name):
            continue
        if this_page._macroInclude_pagelist.has_key(inc_name):
            result.append(u'<p><strong class="error">Recursive include of "%s" forbidden</strong></p>' % (inc_name,))
            continue
        if skipitems:
            skipitems -= 1
            continue
        fmt = macro.formatter.__class__(request, is_included=True)
        fmt._base_depth = macro.formatter._base_depth
        inc_page = Page(request, inc_name, formatter=fmt)
        inc_page._macroInclude_pagelist = this_page._macroInclude_pagelist

        # check for "from" and "to" arguments (allowing partial includes)
        body = inc_page.get_raw_body() + '\n'
        from_pos = 0
        to_pos = -1
        from_re = args.group('from')
        if from_re:
            try:
                from_match = re.compile(from_re, re.M).search(body)
            except re.error, e:
                ##result.append("*** fe=%s ***" % e)
                from_match = re.compile(re.escape(from_re), re.M).search(body)
            if from_match:
                from_pos = from_match.end()
            else:
                result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % from_re)
        to_re = args.group('to')
        if to_re:
            try:
                to_match = re.compile(to_re, re.M).search(body, from_pos)
            except re.error:
                to_match = re.compile(re.escape(to_re), re.M).search(body, from_pos)
            if to_match:
                to_pos = to_match.start()
            else:
                result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % to_re)

        if titlesonly:
            newbody = []
            levelstack = []
            for title, level in extract_titles(body[from_pos:to_pos], title_re):
                if levelstack:
                    if level > levelstack[-1]:
                        result.append(macro.formatter.bullet_list(1))
                        levelstack.append(level)
                    else:
                        while levelstack and level < levelstack[-1]:
                            result.append(macro.formatter.bullet_list(0))
                            levelstack.pop()
                        if not levelstack or level != levelstack[-1]:
                            result.append(macro.formatter.bullet_list(1))
                            levelstack.append(level)
                else:
                    result.append(macro.formatter.bullet_list(1))
                    levelstack.append(level)
                result.append(macro.formatter.listitem(1))
                result.append(inc_page.link_to(request, title))
                result.append(macro.formatter.listitem(0))
            while levelstack:
                result.append(macro.formatter.bullet_list(0))
                levelstack.pop()
            continue

        if from_pos or to_pos != -1:
            inc_page.set_raw_body(body[from_pos:to_pos], modified=True)
        ##result.append("*** f=%s t=%s ***" % (from_re, to_re))
        ##result.append("*** f=%d t=%d ***" % (from_pos, to_pos))

        if called_by_toc:
            result.append(inc_page.get_raw_body())
            continue

        if not hasattr(request, "_Include_backto"):
            request._Include_backto = this_page.page_name
        
        # do headings
        level = None
        if args.group('heading') and args.group('hquote'):
            heading = args.group('htext') or inc_page.split_title(request)
            level = 1
            if args.group('level'):
                level = int(args.group('level'))
            if print_mode:
                result.append(macro.formatter.heading(1, level) +
                              macro.formatter.text(heading) +
                              macro.formatter.heading(0, level))
            else:
                import sha
                from MoinMoin import config
                # this heading id might produce duplicate ids,
                # if the same page is included multiple times
                # Encode stuf we feed into sha module.
                pntt = (inc_name + heading).encode(config.charset)
                hid = "head-" + sha.new(pntt).hexdigest()
                request._page_headings.setdefault(pntt, 0)
                request._page_headings[pntt] += 1
                if request._page_headings[pntt] > 1:
                    hid += '-%d'%(request._page_headings[pntt],)
                result.append(
                    #macro.formatter.heading(1, level, hid,
                    #    icons=edit_icon.replace('<img ', '<img align="right" ')) +
                    macro.formatter.heading(1, level, hid) +
                    inc_page.link_to(request, heading, css_class="include-heading-link") +
                    macro.formatter.heading(0, level)
                )

        # set or increment include marker
        this_page._macroInclude_pagelist[inc_name] = \
            this_page._macroInclude_pagelist.get(inc_name, 0) + 1

        # output the included page
        strfile = StringIO.StringIO()
        request.redirect(strfile)
        try:
            cid = request.makeUniqueID("Include_%s" % wikiutil.quoteWikinameFS(inc_page.page_name))
            inc_page.send_page(request, content_only=1, content_id=cid)
            result.append(strfile.getvalue())
        finally:
            request.redirect()

        # decrement or remove include marker
        if this_page._macroInclude_pagelist[inc_name] > 1:
            this_page._macroInclude_pagelist[inc_name] = \
                this_page._macroInclude_pagelist[inc_name] - 1
        else:
            del this_page._macroInclude_pagelist[inc_name]

        # if no heading and not in print mode, then output a helper link
        if editlink and not (level or print_mode):
            result.extend([
                '<div class="include-link">',
                inc_page.link_to(request, '[%s]' % (inc_name,), css_class="include-page-link"),
                inc_page.link_to(request, '[%s]' % (_('edit'),), css_class="include-edit-link", querystr={'action': 'edit', 'backto': request._Include_backto}),
                '</div>',
            ])
コード例 #34
0
def checkSpelling(page, request, own_form=1):
    """ Do spell checking, return a tuple with the result.
    """
    _ = request.getText

    # first check to see if we we're called with a "newwords" parameter
    if 'button_newwords' in request.form:
        _addLocalWords(request)

    # load words
    wordsdict = _loadDict(request)

    localwords = {}
    lsw_page = Page(request, request.cfg.page_local_spelling_words)
    if lsw_page.exists():
        _loadWordsPage(request, localwords, lsw_page)

    # init status vars & load page
    request.clock.start('spellcheck')
    badwords = {}
    text = page.get_raw_body()

    # checker regex and matching substitute function
    word_re = re.compile(r'([%s]?[%s]+)' % (
        config.chars_upper, config.chars_lower), re.UNICODE)

    def checkword(match, wordsdict=wordsdict, badwords=badwords,
            localwords=localwords, num_re=re.compile(r'^\d+$', re.UNICODE)):
        word = match.group(1)
        if len(word) == 1:
            return ""
        w_enc = word.encode(config.charset)
        wl_enc = word.lower().encode(config.charset)
        if not (w_enc in wordsdict or wl_enc in wordsdict or
                w_enc in localwords or wl_enc in localwords):
            if not num_re.match(word):
                badwords[word] = 1
        return ""

    # do the checking
    for line in text.split('\n'):
        if line == '' or line[0] == '#':
            continue
        word_re.sub(checkword, line)

    if badwords:
        badwords = badwords.keys()
        badwords.sort(lambda x, y: cmp(x.lower(), y.lower()))

        # build regex recognizing the bad words
        badwords_re = r'(^|(?<!\w))(%s)(?!\w)'
        badwords_re = badwords_re % ("|".join([re.escape(bw) for bw in badwords]), )
        badwords_re = re.compile(badwords_re, re.UNICODE)

        lsw_msg = ''
        if localwords:
            lsw_msg = ' ' + _('(including %(localwords)d %(pagelink)s)') % {
                'localwords': len(localwords), 'pagelink': lsw_page.link_to(request)}
        msg = _('The following %(badwords)d words could not be found in the dictionary of '
                '%(totalwords)d words%(localwords)s and are highlighted below:') % {
            'badwords': len(badwords),
            'totalwords': len(wordsdict)+len(localwords),
            'localwords': lsw_msg} + "<br>"

        # figure out what this action is called
        action_name = os.path.splitext(os.path.basename(__file__))[0]

        # add a form containing the bad words
        if own_form:
            msg = msg + ('<form method="post" action="%s">\n'
                         '<input type="hidden" name="action" value="%s">\n') % (request.href(page.page_name), action_name)

        checkbox = '<input type="checkbox" name="newwords" value="%(word)s">%(word)s&nbsp;&nbsp;'
        msg = msg + (
            " ".join([checkbox % {'word': wikiutil.escape(w, True), } for w in badwords]) +
            '<p><input type="submit" name="button_newwords" value="%s"></p>' %
                _('Add checked words to dictionary')
        )
        if own_form:
            msg = msg + '</form>'
    else:
        badwords_re = None
        msg = _("No spelling errors found!")

    request.clock.stop('spellcheck')

    return badwords, badwords_re, msg
コード例 #35
0
def construct_table(macro,
                    pagelist,
                    metakeys,
                    legend='',
                    checkAccess=True,
                    styles=dict(),
                    addpagename=False):
    request = macro.request
    request.page.formatter = request.formatter
    out = str()

    row = 0

    entryfmt = {'class': 'metamatrix_entry'}

    # Start table
    out += macro.formatter.linebreak() + u'<div class="metamatrix">' + \
        macro.formatter.table(1)

    # Give a class to headers to make it customisable
    out += macro.formatter.table_row(1, {'rowclass': 'meta_head'})
    # Upper left cell is empty or has the desired legend
    out += t_cell(macro, [legend])

    x_key, y_key = metakeys[:2]

    x_values, y_values = set(), set()

    page_vals = dict()

    for page in pagelist:
        page_vals[page] = get_metas(request,
                                    page,
                                    metakeys,
                                    checkAccess=False,
                                    formatLinks=True)

        x_val = page_vals[page].get(x_key, set())
        y_val = page_vals[page].get(y_key, set())
        x_values.update([(page, x) for x in x_val])
        y_values.update([(page, y) for y in y_val])

    metakeys = metakeys[2:]

    header_cells = list()
    # Make header row
    for oval, value, page in sorted(
        (ordervalue(y), y, page) for page, y in y_values):

        style = styles.get(y_key, dict())

        # Styles can modify key naming
        name = style.get('gwikiname', '').strip('"')

        # We don't want stuff like bullet lists in out header
        headerstyle = dict()
        for st in style:
            if not st.startswith('gwiki'):
                headerstyle[st] = style[st]

        if name:
            if not (value, name) in header_cells:
                header_cells.append((value, name))
        else:
            showvalue = metas_to_abs_links(request, page, [value])
            if not (value, showvalue[0]) in header_cells:
                header_cells.append((value, showvalue[0]))

    for value, showvalue in header_cells:
        out += t_cell(macro, [showvalue], style=headerstyle)

    out += macro.formatter.table_row(0)

    tmp_page = request.page

    f = macro.formatter

    # Table
    row_cells = list()
    row_values = list()
    for oval, x_value, page in sorted(
        (ordervalue(x), x, page) for page, x in x_values):
        if not (oval, x_value) in row_values:
            row_cells.append((oval, x_value, page))
            row_values.append((oval, x_value))

    for oval, x_value, page in row_cells:
        row = row + 1

        if row % 2:
            out += f.table_row(1, {'rowclass': 'metamatrix-odd-row'})
        else:
            out += f.table_row(1, {'rowclass': 'metamatrix-even-row'})
        value = metas_to_abs_links(request, page, [x_value])
        out += t_cell(macro, value)

        for y_value, showvalue in header_cells:
            style = styles.get(y_value, dict())

            if not 'class' in style:
                style['class'] = 'meta_cell'

            out += f.table_cell(1, attrs=style)

            for page in pagelist:
                pageobj = Page(request, page)

                if (x_value in page_vals[page].get(x_key, set())
                        and y_value in page_vals[page].get(y_key, set())):

                    result = ''

                    args = {'class': 'metamatrix_link'}

                    for key in metakeys:
                        for val in page_vals[page].get(key, list()):

                            # Strip ugly brackets from bracketed links
                            val = val.lstrip('[').strip(']')

                            result += f.listitem(1, **entryfmt)

                            result += pageobj.link_to(request,
                                                      text=val,
                                                      **args)
                            result += f.listitem(0)

                    if addpagename:
                        result += f.listitem(1, **entryfmt)
                        result += pageobj.link_to(request, **args)
                        result += f.listitem(0)

                    out += result

        out += macro.formatter.table_row(0)

    request.page = tmp_page
    request.formatter.page = tmp_page

    out += macro.formatter.table(0)
    out += u'</div>'

    return out
コード例 #36
0
ファイル: text_xml.py プロジェクト: aahlad/soar
 def pagelink(self, on, pagename='', page=None, **kw):
     FormatterBase.pagelink(self, on, pagename, page, **kw)
     if page is None:
         page = Page(self.request, pagename, formatter=self)
     return page.link_to(self.request, on=on, **kw)
コード例 #37
0
def checkSpelling(page, request, own_form=1):
    """ Do spell checking, return a tuple with the result.
    """
    _ = request.getText

    # first check to see if we we're called with a "newwords" parameter
    if 'button_newwords' in request.form:
        _addLocalWords(request)

    # load words
    wordsdict = _loadDict(request)

    localwords = {}
    lsw_page = Page(request, request.cfg.page_local_spelling_words)
    if lsw_page.exists():
        _loadWordsPage(request, localwords, lsw_page)

    # init status vars & load page
    request.clock.start('spellcheck')
    badwords = {}
    text = page.get_raw_body()

    # checker regex and matching substitute function
    word_re = re.compile(
        r'([%s]?[%s]+)' % (config.chars_upper, config.chars_lower), re.UNICODE)

    def checkword(match,
                  wordsdict=wordsdict,
                  badwords=badwords,
                  localwords=localwords,
                  num_re=re.compile(r'^\d+$', re.UNICODE)):
        word = match.group(1)
        if len(word) == 1:
            return ""
        w_enc = word.encode(config.charset)
        wl_enc = word.lower().encode(config.charset)
        if not (w_enc in wordsdict or wl_enc in wordsdict
                or w_enc in localwords or wl_enc in localwords):
            if not num_re.match(word):
                badwords[word] = 1
        return ""

    # do the checking
    for line in text.split('\n'):
        if line == '' or line[0] == '#':
            continue
        word_re.sub(checkword, line)

    if badwords:
        badwords = badwords.keys()
        badwords.sort(lambda x, y: cmp(x.lower(), y.lower()))

        # build regex recognizing the bad words
        badwords_re = r'(^|(?<!\w))(%s)(?!\w)'
        badwords_re = badwords_re % ("|".join(
            [re.escape(bw) for bw in badwords]), )
        badwords_re = re.compile(badwords_re, re.UNICODE)

        lsw_msg = ''
        if localwords:
            lsw_msg = ' ' + _('(including %(localwords)d %(pagelink)s)') % {
                'localwords': len(localwords),
                'pagelink': lsw_page.link_to(request)
            }
        msg = _(
            'The following %(badwords)d words could not be found in the dictionary of '
            '%(totalwords)d words%(localwords)s and are highlighted below:') % {
                'badwords': len(badwords),
                'totalwords': len(wordsdict) + len(localwords),
                'localwords': lsw_msg
            } + "<br>"

        # figure out what this action is called
        action_name = os.path.splitext(os.path.basename(__file__))[0]

        # add a form containing the bad words
        if own_form:
            msg = msg + ('<form method="post" action="%s">\n'
                         '<input type="hidden" name="action" value="%s">\n'
                         ) % (request.href(page.page_name), action_name)

        checkbox = '<input type="checkbox" name="newwords" value="%(word)s">%(word)s&nbsp;&nbsp;'
        msg = msg + (" ".join([
            checkbox % {
                'word': wikiutil.escape(w, True),
            } for w in badwords
        ]) + '<p><input type="submit" name="button_newwords" value="%s"></p>' %
                     _('Add checked words to dictionary'))
        if own_form:
            msg = msg + '</form>'
    else:
        badwords_re = None
        msg = _("No spelling errors found!")

    request.clock.stop('spellcheck')

    return badwords, badwords_re, msg
コード例 #38
0
ファイル: Include.py プロジェクト: aahlad/soar
def execute(macro, text, args_re=re.compile(_args_re_pattern), title_re=re.compile(_title_re, re.M)):
    request = macro.request
    _ = request.getText

    # return immediately if getting links for the current page
    if request.mode_getpagelinks:
        return ''

    # parse and check arguments
    args = text and args_re.match(text)
    if not args:
        return (_sysmsg % ('error', _('Invalid include arguments "%s"!')) % (text, ))

    # prepare including page
    result = []
    print_mode = request.action in ("print", "format")
    this_page = macro.formatter.page
    if not hasattr(this_page, '_macroInclude_pagelist'):
        this_page._macroInclude_pagelist = {}

    # get list of pages to include
    inc_name = wikiutil.AbsPageName(this_page.page_name, args.group('name'))
    pagelist = [inc_name]
    if inc_name.startswith("^"):
        try:
            inc_match = re.compile(inc_name)
        except re.error:
            pass # treat as plain page name
        else:
            # Get user filtered readable page list
            pagelist = request.rootpage.getPageList(filter=inc_match.match)

    # sort and limit page list
    pagelist.sort()
    sort_dir = args.group('sort')
    if sort_dir == 'descending':
        pagelist.reverse()
    max_items = args.group('items')
    if max_items:
        pagelist = pagelist[:int(max_items)]

    skipitems = 0
    if args.group("skipitems"):
        skipitems = int(args.group("skipitems"))
    titlesonly = args.group('titlesonly')
    editlink = args.group('editlink')

    # iterate over pages
    for inc_name in pagelist:
        if not request.user.may.read(inc_name):
            continue
        if inc_name in this_page._macroInclude_pagelist:
            result.append(u'<p><strong class="error">Recursive include of "%s" forbidden</strong></p>' % (inc_name, ))
            continue
        if skipitems:
            skipitems -= 1
            continue
        fmt = macro.formatter.__class__(request, is_included=True)
        fmt._base_depth = macro.formatter._base_depth
        inc_page = Page(request, inc_name, formatter=fmt)
        if not inc_page.exists():
            continue
        inc_page._macroInclude_pagelist = this_page._macroInclude_pagelist

        # check for "from" and "to" arguments (allowing partial includes)
        body = inc_page.get_raw_body() + '\n'
        from_pos = 0
        to_pos = -1
        from_re = args.group('from')
        if from_re:
            try:
                from_match = re.compile(from_re, re.M).search(body)
            except re.error:
                ##result.append("*** fe=%s ***" % e)
                from_match = re.compile(re.escape(from_re), re.M).search(body)
            if from_match:
                from_pos = from_match.end()
            else:
                result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % from_re)
        to_re = args.group('to')
        if to_re:
            try:
                to_match = re.compile(to_re, re.M).search(body, from_pos)
            except re.error:
                to_match = re.compile(re.escape(to_re), re.M).search(body, from_pos)
            if to_match:
                to_pos = to_match.start()
            else:
                result.append(_sysmsg % ('warning', 'Include: ' + _('Nothing found for "%s"!')) % to_re)

        if titlesonly:
            levelstack = []
            for title, level in extract_titles(body[from_pos:to_pos], title_re):
                if levelstack:
                    if level > levelstack[-1]:
                        result.append(macro.formatter.bullet_list(1))
                        levelstack.append(level)
                    else:
                        while levelstack and level < levelstack[-1]:
                            result.append(macro.formatter.bullet_list(0))
                            levelstack.pop()
                        if not levelstack or level != levelstack[-1]:
                            result.append(macro.formatter.bullet_list(1))
                            levelstack.append(level)
                else:
                    result.append(macro.formatter.bullet_list(1))
                    levelstack.append(level)
                result.append(macro.formatter.listitem(1))
                result.append(inc_page.link_to(request, title))
                result.append(macro.formatter.listitem(0))
            while levelstack:
                result.append(macro.formatter.bullet_list(0))
                levelstack.pop()
            continue

        if from_pos or to_pos != -1:
            inc_page.set_raw_body(body[from_pos:to_pos], modified=True)
        ##result.append("*** f=%s t=%s ***" % (from_re, to_re))
        ##result.append("*** f=%d t=%d ***" % (from_pos, to_pos))

        if not hasattr(request, "_Include_backto"):
            request._Include_backto = this_page.page_name

        # do headings
        level = None
        if args.group('heading') and args.group('hquote'):
            heading = args.group('htext') or inc_page.split_title()
            level = 1
            if args.group('level'):
                level = int(args.group('level'))
            if print_mode:
                result.append(macro.formatter.heading(1, level) +
                              macro.formatter.text(heading) +
                              macro.formatter.heading(0, level))
            else:
                url = inc_page.url(request)
                result.extend([
                    macro.formatter.heading(1, level, id=heading),
                    macro.formatter.url(1, url, css="include-heading-link"),
                    macro.formatter.text(heading),
                    macro.formatter.url(0),
                    macro.formatter.heading(0, level),
                ])

        # set or increment include marker
        this_page._macroInclude_pagelist[inc_name] = \
            this_page._macroInclude_pagelist.get(inc_name, 0) + 1

        # output the included page
        strfile = StringIO.StringIO()
        request.redirect(strfile)
        try:
            inc_page.send_page(content_only=True,
                               omit_footnotes=True,
                               count_hit=False)
            result.append(strfile.getvalue())
        finally:
            request.redirect()

        # decrement or remove include marker
        if this_page._macroInclude_pagelist[inc_name] > 1:
            this_page._macroInclude_pagelist[inc_name] = \
                this_page._macroInclude_pagelist[inc_name] - 1
        else:
            del this_page._macroInclude_pagelist[inc_name]

        # if no heading and not in print mode, then output a helper link
        if editlink and not (level or print_mode):
            result.extend([
                macro.formatter.div(1, css_class="include-link"),
                inc_page.link_to(request, '[%s]' % (inc_name, ), css_class="include-page-link"),
                inc_page.link_to(request, '[%s]' % (_('edit'), ), css_class="include-edit-link", querystr={'action': 'edit', 'backto': request._Include_backto}),
                macro.formatter.div(0),
            ])
        # XXX page.link_to is wrong now, it escapes the edit_icon html as it escapes normal text

    # return include text
    return ''.join(result)
コード例 #39
0
def format_page_edits(macro, lines, bookmark_usecs):
    request = macro.request
    _ = request.getText
    d = {} # dict for passing stuff to theme
    line = lines[0]
    pagename = line.pagename
    rev = int(line.rev)
    tnow = time.time()
    is_new = lines[-1].action == 'SAVENEW'
    is_renamed = lines[-1].action == 'SAVE/RENAME'
    # check whether this page is newer than the user's bookmark
    hilite = line.ed_time_usecs > (bookmark_usecs or line.ed_time_usecs)
    page = Page(request, pagename)

    html_link = ''
    if not page.exists():
        img = request.theme.make_icon('deleted')
        revbefore = rev - 1
        if revbefore and page.exists(rev=revbefore, domain='standard'):
            # indicate page was deleted and show diff to last existing revision of it
            html_link = page.link_to_raw(request, img, querystr={'action': 'diff'}, rel='nofollow')
        else:
            # just indicate page was deleted
            html_link = img
    elif page.isConflict():
        img = request.theme.make_icon('conflict')
        html_link = page.link_to_raw(request, img, querystr={'action': 'edit'}, rel='nofollow')
    elif hilite:
        # show special icons if change was after the user's bookmark
        if is_new:
            img = 'new'
        elif is_renamed:
            img = 'renamed'
        else:
            img = 'updated'
        img = request.theme.make_icon(img)
        html_link = page.link_to_raw(request, img, querystr={'action': 'diff', 'date': '%d' % bookmark_usecs}, rel='nofollow')
    else:
        # show "DIFF" icon else
        img = request.theme.make_icon('diffrc')
        html_link = page.link_to_raw(request, img, querystr={'action': 'diff'}, rel='nofollow')

    # print name of page, with a link to it
    force_split = len(page.page_name) > _MAX_PAGENAME_LENGTH

    d['icon_html'] = html_link
    d['pagelink_html'] = page.link_to(request, text=page.split_title(force=force_split))

    # print time of change
    d['time_html'] = None
    if request.cfg.changed_time_fmt:
        tdiff = long(tnow - wikiutil.version2timestamp(long(line.ed_time_usecs))) / 60 # has to be long for py 2.2.x
        if tdiff < 100:
            d['time_html'] = _("%(mins)dm ago") % {
                'mins': tdiff}
        else:
            d['time_html'] = time.strftime(request.cfg.changed_time_fmt, line.time_tuple)

    # print editor name or IP
    d['editors'] = None
    if request.cfg.show_names:
        if len(lines) > 1:
            counters = {}
            for idx in range(len(lines)):
                name = lines[idx].getEditor(request)
                if not name in counters:
                    counters[name] = []
                counters[name].append(idx+1)
            poslist = [(v, k) for k, v in counters.items()]
            poslist.sort()
            d['editors'] = []
            for positions, name in poslist:
                d['editors'].append("%s&nbsp;[%s]" % (
                    name, util.rangelist(positions)))
        else:
            d['editors'] = [line.getEditor(request)]

    comments = []
    for idx in range(len(lines)):
        comment = format_comment(request, lines[idx])
        if comment:
            comments.append((idx+1, wikiutil.escape(comment)))

    d['changecount'] = len(lines)
    d['comments'] = comments

    img = request.theme.make_icon('info')
    d['info_html'] = page.link_to_raw(request, img, querystr={'action': 'info'}, rel='nofollow')

    return request.theme.recentchanges_entry(d)
コード例 #40
0
ファイル: admin.py プロジェクト: steveyen/moingo
def do_user_browser(request):
    """ Browser for SystemAdmin macro. """
    _ = request.getText

    data = TupleDataset()
    data.columns = [
        Column('name', label=_('Username')),
        Column('groups', label=_('Member of Groups')),
        Column('email', label=_('Email')),
        Column('jabber', label=_('Jabber')),
        Column('action', label=_('Action')),
    ]

    isgroup = request.cfg.cache.page_group_regexact.search
    groupnames = request.rootpage.getPageList(user='', filter=isgroup)

    # Iterate over users
    for uid in user.getUserList(request):
        account = user.User(request, uid)

        grouppage_links = ', '.join([Page(request, groupname).link_to(request)
                                     for groupname in groupnames
                                     if request.dicts.has_member(groupname, account.name)])

        userhomepage = Page(request, account.name)
        if userhomepage.exists():
            namelink = userhomepage.link_to(request)
        else:
            namelink = wikiutil.escape(account.name)

        # creates the POST data for account disable/enable
        val = "1"
        text=_('Disable user')
        if account.disabled:
            text=_('Enable user')
            val = "0"
            namelink += " (%s)" % _("disabled")

        url = request.page.url(request)
        ret = html.FORM(action=url)
        ret.append(html.INPUT(type='hidden', name='action', value='userprofile'))
        ret.append(html.INPUT(type='hidden', name='name', value=account.name))
        ret.append(html.INPUT(type='hidden', name='key', value="disabled"))
        ret.append(html.INPUT(type='hidden', name='val', value=val))
        ret.append(html.INPUT(type='submit', name='userprofile', value=text))
        enable_disable_link = unicode(unicode(ret))

        # creates the POST data for recoverpass
        url = request.page.url(request)
        ret = html.FORM(action=url)
        ret.append(html.INPUT(type='hidden', name='action', value='recoverpass'))
        ret.append(html.INPUT(type='hidden', name='email', value=account.email))
        ret.append(html.INPUT(type='hidden', name='account_sendmail', value="1"))
        ret.append(html.INPUT(type='hidden', name='sysadm', value="users"))
        ret.append(html.INPUT(type='submit', name='recoverpass', value=_('Mail account data')))
        recoverpass_link =  unicode(unicode(ret))

        if account.email:
            email_link = (request.formatter.url(1, 'mailto:' + account.email, css='mailto') +
                          request.formatter.text(account.email) +
                          request.formatter.url(0))
        else:
            email_link = ''

        if account.jid:
            jabber_link = (request.formatter.url(1, 'xmpp:' + account.jid, css='mailto') +
                           request.formatter.text(account.jid) +
                           request.formatter.url(0))
        else:
            jabber_link = ''

        data.addRow((
            request.formatter.rawHTML(namelink),
            request.formatter.rawHTML(grouppage_links),
            email_link,
            jabber_link,
            recoverpass_link + enable_disable_link
        ))

    if data:
        from MoinMoin.widget.browser import DataBrowserWidget

        browser = DataBrowserWidget(request)
        browser.setData(data)
        return browser.render()

    # No data
    return ''
コード例 #41
0
ファイル: text_xml.py プロジェクト: imosts/flume
 def pagelink(self, on, pagename='', page=None, **kw):
     apply(FormatterBase.pagelink, (self, on, pagename, page), kw)
     if page is None:
         page = Page(self.request, pagename, formatter=self)
     return page.link_to(self.request, on=on, **kw)
コード例 #42
0
def format_page_edits(macro, lines, bookmark_usecs):
    request = macro.request
    _ = request.getText
    d = {}  # dict for passing stuff to theme
    line = lines[0]
    pagename = line.pagename
    rev = int(line.rev)
    tnow = time.time()
    is_new = lines[-1].action == "SAVENEW"
    is_renamed = lines[-1].action == "SAVE/RENAME"
    # check whether this page is newer than the user's bookmark
    hilite = line.ed_time_usecs > (bookmark_usecs or line.ed_time_usecs)
    page = Page(request, pagename)

    html_link = ""
    if not page.exists():
        img = request.theme.make_icon("deleted")
        revbefore = rev - 1
        if revbefore and page.exists(rev=revbefore, domain="standard"):
            # indicate page was deleted and show diff to last existing revision of it
            html_link = page.link_to_raw(request, img, querystr={"action": "diff"}, rel="nofollow")
        else:
            # just indicate page was deleted
            html_link = img
    elif page.isConflict():
        img = request.theme.make_icon("conflict")
        html_link = page.link_to_raw(request, img, querystr={"action": "edit"}, rel="nofollow")
    elif hilite:
        # show special icons if change was after the user's bookmark
        if is_new:
            img = "new"
        elif is_renamed:
            img = "renamed"
        else:
            img = "updated"
        img = request.theme.make_icon(img)
        html_link = page.link_to_raw(
            request, img, querystr={"action": "diff", "date": "%d" % bookmark_usecs}, rel="nofollow"
        )
    else:
        # show "DIFF" icon else
        img = request.theme.make_icon("diffrc")
        html_link = page.link_to_raw(request, img, querystr={"action": "diff"}, rel="nofollow")

    # print name of page, with a link to it
    force_split = len(page.page_name) > _MAX_PAGENAME_LENGTH

    d["icon_html"] = html_link
    d["pagelink_html"] = page.link_to(request, text=page.split_title(force=force_split))

    # print time of change
    d["time_html"] = None
    if request.cfg.changed_time_fmt:
        tdiff = long(tnow - wikiutil.version2timestamp(long(line.ed_time_usecs))) / 60  # has to be long for py 2.2.x
        if tdiff < 100:
            d["time_html"] = _("%(mins)dm ago") % {"mins": tdiff}
        else:
            d["time_html"] = time.strftime(request.cfg.changed_time_fmt, line.time_tuple)

    # print editor name or IP
    d["editors"] = None
    if request.cfg.show_names:
        if len(lines) > 1:
            counters = {}
            editorcache = {}
            for idx in range(len(lines)):
                editorkey = lines[idx].addr, lines[idx].hostname, lines[idx].userid
                if editorkey not in editorcache:
                    editorcache[editorkey] = lines[idx].getEditor(request)
                name = editorcache[editorkey]
                if not name in counters:
                    counters[name] = []
                counters[name].append(idx + 1)
            poslist = [(v, k) for k, v in counters.items()]
            poslist.sort()
            d["editors"] = []
            for positions, name in poslist:
                d["editors"].append("%s&nbsp;[%s]" % (name, util.rangelist(positions)))
        else:
            d["editors"] = [line.getEditor(request)]

    comments = []
    for idx in range(len(lines)):
        comment = format_comment(request, lines[idx])
        if comment:
            comments.append((idx + 1, wikiutil.escape(comment)))

    d["changecount"] = len(lines)
    d["comments"] = comments

    img = request.theme.make_icon("info")
    d["info_html"] = page.link_to_raw(request, img, querystr={"action": "info"}, rel="nofollow")

    return request.theme.recentchanges_entry(d)
コード例 #43
0
ファイル: admin.py プロジェクト: execgit/gwiki-with-moin
def do_user_browser(request):
    """ Browser for SystemAdmin macro. """
    _ = request.getText
    groups = request.groups

    data = TupleDataset()
    data.columns = [
        Column('name', label=_('Username')),
        Column('groups', label=_('Member of Groups')),
        Column('email', label=_('Email')),
        Column('jabber', label=_('Jabber')),
        Column('action', label=_('Action')),
    ]

    class UserAccount(object):
        # namedtuple is >= 2.6 :-(
        def __init__(self, **kw):
            for k, v in kw.items():
                setattr(self, k, v)
        def __repr__(self):
            return "<UserAccount %r>" % self.__dict__

    accounts = []
    for uid in user.getUserList(request):
        # be careful and just create a list of what we really need,
        # not sure if we can keep lots of User objects instantiated
        # in parallel (open files? too big?)
        u = user.User(request, uid)
        accounts.append(UserAccount(name=u.name, email=u.email, jid=u.jid, disabled=u.disabled))

    def sortkey(account):
        # enabled accounts at top, sorted by name
        return (account.disabled, account.name)

    # Iterate over user accounts
    for account in sorted(accounts, key=sortkey):
        account_groups = set(groups.groups_with_member(account.name))
        wiki_groups = set([group for group in account_groups if isinstance(groups[group], WikiGroup)])
        other_groups = list(account_groups - wiki_groups)

        # First show groups that are defined in wikipages linking to it
        # after show groups from other backends.
        grouppage_links = ', '.join([Page(request, group_name).link_to(request) for group_name in wiki_groups] +
                                    other_groups)

        userhomepage = Page(request, account.name)
        if userhomepage.exists():
            namelink = userhomepage.link_to(request)
        else:
            namelink = wikiutil.escape(account.name)

        # creates the POST data for account disable/enable
        val = "1"
        text=_('Disable user')
        if account.disabled:
            text=_('Enable user')
            val = "0"
            namelink += " (%s)" % _("disabled")

        url = request.page.url(request)
        ret = html.FORM(action=url)
        ret.append(html.INPUT(type='hidden', name='action', value='userprofile'))
        ticket = wikiutil.createTicket(request, action='userprofile')
        ret.append(html.INPUT(type="hidden", name="ticket", value="%s" % ticket))
        ret.append(html.INPUT(type='hidden', name='name', value=account.name))
        ret.append(html.INPUT(type='hidden', name='key', value="disabled"))
        ret.append(html.INPUT(type='hidden', name='val', value=val))
        ret.append(html.INPUT(type='submit', name='userprofile', value=text))
        enable_disable_link = unicode(unicode(ret))

        # creates the POST data for recoverpass
        url = request.page.url(request)
        ret = html.FORM(action=url)
        ret.append(html.INPUT(type='hidden', name='action', value='recoverpass'))
        ret.append(html.INPUT(type='hidden', name='email', value=account.email))
        ret.append(html.INPUT(type='hidden', name='account_sendmail', value="1"))
        ret.append(html.INPUT(type='hidden', name='sysadm', value="users"))
        ret.append(html.INPUT(type='submit', name='recoverpass', value=_('Mail account data')))
        recoverpass_link =  unicode(unicode(ret))

        if account.email:
            email_link = (request.formatter.url(1, 'mailto:' + account.email, css='mailto') +
                          request.formatter.text(account.email) +
                          request.formatter.url(0))
        else:
            email_link = ''

        if account.jid:
            jabber_link = (request.formatter.url(1, 'xmpp:' + account.jid, css='mailto') +
                           request.formatter.text(account.jid) +
                           request.formatter.url(0))
        else:
            jabber_link = ''

        data.addRow((
            (request.formatter.rawHTML(namelink), account.name),
            request.formatter.rawHTML(grouppage_links),
            email_link,
            jabber_link,
            recoverpass_link + enable_disable_link
        ))

    if data:
        from MoinMoin.widget.browser import DataBrowserWidget

        browser = DataBrowserWidget(request)
        browser.setData(data)
        return browser.render()

    # No data
    return ''