Example #1
0
    def wiki_tree_html(self, node_name, path=[]):
        """ Return wiki sub tree html code with the node as root

        The path contains the path of nodes from the root to the current node.
        This is used to avoid recursion in the wiki tree.
        
        @param node_name: root of the sub tree
        @param path: list of nodes up to this one
        @rtype: unicode
        @return: wiki tree html
        """
        items = []
        node = self.wiki_tree[node_name]
        html = node['html']
        if node_name == self.page_name:
            html = html.replace('class="node"', 'class="node_selected"')
        sub_nodes = node['categories']+node['pages']+node['attachments']
        if not sub_nodes:
            items = [u'<li class="no_toggle">' + html]  # Display the node
        else:
            node_id = wikiutil.url_quote(u''.join(path),'')
            display_subtree = (not path) or (node_id in self.cookies) 
            if path:  #  If it's not the root node (which is not diplayed)
                toggle_icon_url = [self.expand_icon_url, self.collapse_icon_url][display_subtree]
                toggle_icon_alt = ["[+]", "[-]"][display_subtree]
                items = [u'<li><img class="toggle" alt="%s" title="%s" src="%s" onclick="toggle_display_element(this, \'%s\');">%s' % (toggle_icon_alt, self.ui_text["toggle_title"], toggle_icon_url, node_id, html)]
            if display_subtree:
                items.append(u'<ul class="wiki_tree" id="%s">' % node_id)
                for sub_node_name in sub_nodes:
                    items.append(self.wiki_tree_html(sub_node_name, path+[sub_node_name]))
                items.append(u'</ul>')
        return u'\n'.join(items)
Example #2
0
    def init_wiki_tree(self):
        """ Init the wiki tree structure and wiki tree info (or loads it from cache)

        See build_wiki_tree for the wiki_tree and wiki_tree_info data structure.
        """
        request = self.request
        self.wiki_tree = {}
        # Init wiki tree cache
        cache = caching.CacheEntry(
            request, 'wiki_tree',
            "%s@%s" % (wikiutil.url_quote(self.root), request.user.id))
        if self.is_moin_1_5:
            refresh = request.form.get('action', ['show'])[0] == 'refresh'
        else:
            refresh = request.action == 'refresh'
        # Check if there's a cached wiki tree and no refresh action invoked
        if cache.exists() and not refresh:
            version, data = cPickle.loads(cache.content())
            if version == self.release:
                # Only use cached data if it correspondents to the theme version
                # This avoids errors when data structure changed
                self.wiki_tree, self.wiki_info = data

        if not self.wiki_tree:
            self.build_wiki_tree()
            # Cache the wiki tree
            cache.update(
                cPickle.dumps([self.release, [self.wiki_tree,
                                              self.wiki_info]]))
        return
Example #3
0
    def init_wiki_tree(self):
        """ Init the wiki tree structure and wiki tree info (or loads it from cache)

        See build_wiki_tree for the wiki_tree and wiki_tree_info data structure.
        """
        request = self.request
        self.wiki_tree = {}
        # Init wiki tree cache
        cache = caching.CacheEntry(request, 'wiki_tree', "%s@%s"% (wikiutil.url_quote(self.root), request.user.id))
        if self.is_moin_1_5:
            refresh = request.form.get('action', ['show'])[0] == 'refresh'
        else:
            refresh = request.action == 'refresh'
        # Check if there's a cached wiki tree and no refresh action invoked
        if cache.exists() and not refresh:
            version, data = cPickle.loads(cache.content())
            if version == self.release:
                # Only use cached data if it correspondents to the theme version
                # This avoids errors when data structure changed
                self.wiki_tree, self.wiki_info = data

        if not self.wiki_tree:
            self.build_wiki_tree()
            # Cache the wiki tree
            cache.update(cPickle.dumps([self.release, [self.wiki_tree, self.wiki_info]]))
        return                    
Example #4
0
def getAttachUrl(pagename, filename, request, addts=0, escaped=0, do='get'):
    """ Get URL that points to attachment `filename` of page `pagename`.

        If 'addts' is true, a timestamp with the file's modification time
        is added, so that browsers reload a changed file.
    """
    if htdocs_access(request):
        # direct file access via webserver
        timestamp = ''
        if addts:
            try:
                timestamp = '?ts=%s' % os.path.getmtime(
                    getFilename(request, pagename, filename))
            except IOError:
                pass

        url = "%s/%s/attachments/%s%s" % (
            request.cfg.attachments['url'], wikiutil.quoteWikinameFS(pagename),
            wikiutil.url_quote(filename), timestamp)
    else:
        # send file via CGI
        if do not in ['get', 'view']:
            do = 'get'

        url = "%s/%s?action=%s&do=%s&target=%s" % (
            request.getScriptname(), wikiutil.quoteWikinameURL(pagename),
            action_name, do, wikiutil.url_quote_plus(filename))
    if escaped:
        url = wikiutil.escape(url)
    return url
Example #5
0
    def _outstr(self, text):
        """ Convert string from internal representation to xmlrpc

        @param text: unicode or string in config.charset
        @rtype: str
        @return: text encoded in utf-8 and quoted
        """
        return wikiutil.url_quote(text) # config.charset must be utf-8
Example #6
0
    def _outstr(self, text):
        """
        Convert string from internal representation to xmlrpc

        @param text: unicode or string in config.charset
        @rtype: str
        @return: text encoded in utf-8 and quoted
        """
        return wikiutil.url_quote(text)  # config.charset must be utf-8
Example #7
0
def _addLogEntry(request, action, pagename, filename):
    """ Add an entry to the edit log on uploads and deletes.

        `action` should be "ATTNEW" or "ATTDEL"
    """
    from MoinMoin.logfile import editlog
    t = wikiutil.timestamp2version(time.time())
    fname = wikiutil.url_quote(filename)

    # Write to global log
    log = editlog.EditLog(request)
    log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)

    # Write to local log
    log = editlog.EditLog(request, rootpagename=pagename)
    log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)
Example #8
0
def _addLogEntry(request, action, pagename, filename):
    """ Add an entry to the edit log on uploads and deletes.

        `action` should be "ATTNEW" or "ATTDEL"
    """
    from MoinMoin.logfile import editlog
    t = wikiutil.timestamp2version(time.time())
    fname = wikiutil.url_quote(filename)

    # Write to global log
    log = editlog.EditLog(request)
    log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)

    # Write to local log
    log = editlog.EditLog(request, rootpagename=pagename)
    log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)
Example #9
0
def _addLogEntry(request, action, pagename, filename):
    """ Add an entry to the edit log on uploads and deletes.

        `action` should be "ATTNEW" or "ATTDEL"
    """
    from MoinMoin.logfile import editlog
    t = wikiutil.timestamp2version(time.time())
    fname = wikiutil.url_quote(filename, want_unicode=True)

    # TODO: for now we simply write 2 logs, maybe better use some multilog stuff
    # Write to global log
    log = editlog.EditLog(request)
    log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)

    # Write to local log
    log = editlog.EditLog(request, rootpagename=pagename)
    log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)
Example #10
0
def show_pages(request, pagename, editor, timestamp):
    _ = request.getText

    timestamp = int(timestamp * 1000000)
    log = editlog.EditLog(request)
    pages = {}
    #  mimic macro object for use of RecentChanges subfunctions
    macro = tmp()
    macro.request = request
    macro.formatter = request.html_formatter

    request.write("<table>")
    for line in log.reverse():
        if line.ed_time_usecs < timestamp:
            break

        if not request.user.may.read(line.pagename):
            continue

        if not line.pagename in pages:
            pages[line.pagename] = 1
            if repr(line.getInterwikiEditorData(request)) == editor:
                line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs))
                request.write(RecentChanges.format_page_edits(macro, [line], timestamp))

    request.write(
        """
</table>
<p>
<form method="post" action="%(url)s">
<input type="hidden" name="action" value="Despam">
<input type="hidden" name="ticket" value="%(ticket)s">
<input type="hidden" name="editor" value="%(editor)s">
<input type="submit" name="ok" value="%(label)s">
</form>
</p>
"""
        % dict(
            url=request.href(pagename),
            ticket=wikiutil.createTicket(request),
            editor=wikiutil.url_quote(editor),
            label=_("Revert all!"),
        )
    )
Example #11
0
def show_pages(request, pagename, editor, timestamp):
    _ = request.getText

    timestamp = int(timestamp * 1000000)
    log = editlog.EditLog(request)
    pages = {}
    #  mimic macro object for use of RecentChanges subfunctions
    macro = tmp()
    macro.request = request
    macro.formatter = request.html_formatter

    request.write("<table>")
    for line in log.reverse():
        if line.ed_time_usecs < timestamp:
            break

        if not request.user.may.read(line.pagename):
            continue

        if not line.pagename in pages:
            pages[line.pagename] = 1
            if repr(line.getInterwikiEditorData(request)) == editor:
                line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs))
                request.write(RecentChanges.format_page_edits(macro, [line], timestamp))

    request.write('''
</table>
<p>
<form method="post" action="%(url)s">
<input type="hidden" name="action" value="Despam">
<input type="hidden" name="ticket" value="%(ticket)s">
<input type="hidden" name="editor" value="%(editor)s">
<input type="submit" name="ok" value="%(label)s">
</form>
</p>
''' % dict(
        url=request.href(pagename),
        ticket=wikiutil.createTicket(request),
        editor=wikiutil.url_quote(editor),
        label=_("Revert all!"),
    ))
Example #12
0
def _attachment(request, pagename, filename, outputdir, **kw):
    filename = filename.encode(config.charset)
    source_dir = AttachFile.getAttachDir(request, pagename)
    source_file = os.path.join(source_dir, filename)
    dest_dir = os.path.join(outputdir, "attachments", wikiutil.quoteWikinameFS(pagename))
    dest_file = os.path.join(dest_dir, filename)
    dest_url = "attachments/%s/%s" % (wikiutil.quoteWikinameFS(pagename), wikiutil.url_quote(filename))
    if os.access(source_file, os.R_OK):
        if not os.access(dest_dir, os.F_OK):
            try:
                os.makedirs(dest_dir)
            except:
                script.fatal("Cannot create attachment directory '%s'" % dest_dir)
        elif not os.path.isdir(dest_dir):
            script.fatal("'%s' is not a directory" % dest_dir)

        shutil.copyfile(source_file, dest_file)
        script.log('Writing "%s"...' % dest_url)
        return dest_url
    else:
        return ""
Example #13
0
def _attachment(request, pagename, filename, outputdir, **kw):
	filename = filename.encode(config.charset)
	source_dir = AttachFile.getAttachDir(request, pagename)
	source_file = os.path.join(source_dir, filename)
	dest_dir = os.path.join(outputdir, "attachments", wikiutil.quoteWikinameFS(pagename))
	dest_file = os.path.join(dest_dir, filename)
	dest_url = "attachments/%s/%s" % (wikiutil.quoteWikinameFS(pagename), wikiutil.url_quote(filename))
	if os.access(source_file, os.R_OK):
		if not os.access(dest_dir, os.F_OK):
			try:
				os.makedirs(dest_dir)
			except:
				script.fatal("Cannot create attachment directory '%s'" % dest_dir)
		elif not os.path.isdir(dest_dir):
			script.fatal("'%s' is not a directory" % dest_dir)

		shutil.copyfile(source_file, dest_file)
		script.log('Writing "%s"...' % dest_url)
		return dest_url
	else:
		return ""
Example #14
0
    def wiki_tree_html(self, node_name, path=[]):
        """ Return wiki sub tree html code with the node as root

        The path contains the path of nodes from the root to the current node.
        This is used to avoid recursion in the wiki tree.
        
        @param node_name: root of the sub tree
        @param path: list of nodes up to this one
        @rtype: unicode
        @return: wiki tree html
        """
        items = []
        node = self.wiki_tree[node_name]
        html = node['html']
        if node_name == self.page_name:
            html = html.replace('class="node"', 'class="node_selected"')
        sub_nodes = node['categories'] + node['pages'] + node['attachments']
        if not sub_nodes:
            items = [u'<li class="no_toggle">' + html]  # Display the node
        else:
            node_id = wikiutil.url_quote(u''.join(path), '')
            display_subtree = (not path) or (node_id in self.cookies)
            if path:  #  If it's not the root node (which is not diplayed)
                toggle_icon_url = [
                    self.expand_icon_url, self.collapse_icon_url
                ][display_subtree]
                toggle_icon_alt = ["[+]", "[-]"][display_subtree]
                items = [
                    u'<li><img class="toggle" alt="%s" title="%s" src="%s" onclick="toggle_display_element(this, \'%s\');">%s'
                    % (toggle_icon_alt, self.ui_text["toggle_title"],
                       toggle_icon_url, node_id, html)
                ]
            if display_subtree:
                items.append(u'<ul class="wiki_tree" id="%s">' % node_id)
                for sub_node_name in sub_nodes:
                    items.append(
                        self.wiki_tree_html(sub_node_name,
                                            path + [sub_node_name]))
                items.append(u'</ul>')
        return u'\n'.join(items)
Example #15
0
def execute(pagename, request):
    """ show misc. infos about a page """
    if not request.user.may.read(pagename):
        Page(request, pagename).send_page()
        return

    def general(page, pagename, request):
        _ = request.getText
        f = request.formatter

        request.write(f.heading(1, 1),
                      f.text(_('General Information')),
                      f.heading(0, 1))

        request.write(f.paragraph(1),
                      f.text(_("Page size: %d") % page.size()),
                      f.paragraph(0))

        from MoinMoin.support.python_compatibility import hash_new
        digest = hash_new('sha1', page.get_raw_body().encode(config.charset)).hexdigest().upper()
        request.write(f.paragraph(1),
                      f.rawHTML('%(label)s <tt>%(value)s</tt>' % {
                          'label': _("SHA digest of this page's content is:"),
                          'value': digest, }),
                      f.paragraph(0))

        # show attachments (if allowed)
        attachment_info = action.getHandler(request, 'AttachFile', 'info')
        if attachment_info:
            request.write(attachment_info(pagename, request))

        # show subscribers
        subscribers = page.getSubscribers(request, include_self=1, return_users=1)
        if subscribers:
            request.write(f.paragraph(1))
            request.write(f.text(_('The following users subscribed to this page:')))
            for lang in subscribers:
                request.write(f.linebreak(), f.text('[%s] ' % lang))
                for user in subscribers[lang]:
                    # do NOT disclose email addr, only WikiName
                    userhomepage = Page(request, user.name)
                    if userhomepage.exists():
                        request.write(f.rawHTML(userhomepage.link_to(request) + ' '))
                    else:
                        request.write(f.text(user.name + ' '))
            request.write(f.paragraph(0))

        # show links
        links = page.getPageLinks(request)
        if links:
            request.write(f.paragraph(1))
            request.write(f.text(_('This page links to the following pages:')))
            request.write(f.linebreak())
            for linkedpage in links:
                request.write(f.rawHTML("%s%s " % (Page(request, linkedpage).link_to(request), ",."[linkedpage == links[-1]])))
            request.write(f.paragraph(0))

    def history(page, pagename, request):
        # show history as default
        _ = request.getText
        default_count, limit_max_count = request.cfg.history_count[0:2]
        paging = request.cfg.history_paging

        try:
            max_count = int(request.values.get('max_count', default_count))
        except ValueError:
            max_count = default_count
        max_count = max(1, min(max_count, limit_max_count))

        # read in the complete log of this page
        log = editlog.EditLog(request, rootpagename=pagename)

        offset = 0
        paging_info_html = ""
        paging_nav_html = ""
        count_select_html = ""

        f = request.formatter

        if paging:
            log_size = log.lines()

            try:
                offset = int(request.values.get('offset', 0))
            except ValueError:
                offset = 0
            offset = max(min(offset, log_size - 1), 0)

            paging_info_html += f.paragraph(1, css_class="searchstats info-paging-info") + _("Showing page edit history entries from '''%(start_offset)d''' to '''%(end_offset)d''' out of '''%(total_count)d''' entries total.", wiki=True) % {
                'start_offset': log_size - min(log_size, offset + max_count) + 1,
                'end_offset': log_size - offset,
                'total_count': log_size,
            } + f.paragraph(0)

            # generating offset navigating links
            if max_count < log_size or offset != 0:
                offset_links = []
                cur_offset = max_count
                near_count = 5 # request.cfg.pagination_size

                min_offset = max(0, (offset + max_count - 1) / max_count - near_count)
                max_offset = min((log_size - 1) / max_count, offset / max_count + near_count)
                offset_added = False

                def add_offset_link(offset, caption=None):
                    offset_links.append(f.table_cell(1, css_class="info-offset-item") +
                        page.link_to(request, on=1, querystr={
                            'action': 'info',
                            'offset': str(offset),
                            'max_count': str(max_count),
                            }, css_class="info-offset-nav-link", rel="nofollow") + f.text(caption or str(log_size - offset)) + page.link_to(request, on=0) +
                        f.table_cell(0)
                    )

                # link to previous page - only if not at start
                if offset > 0:
                    add_offset_link(((offset - 1) / max_count) * max_count, _("Newer"))

                # link to beggining of event log - if min_offset is not minimal
                if min_offset > 0:
                    add_offset_link(0)
                    # adding gap only if min_offset not explicitly following beginning
                    if min_offset > 1:
                        offset_links.append(f.table_cell(1, css_class="info-offset-gap") + f.text(u'\u2026') + f.table_cell(0))

                # generating near pages links
                for cur_offset in range(min_offset, max_offset + 1):
                    # note that current offset may be not multiple of max_count,
                    # so we check whether we should add current offset marker like this
                    if not offset_added and offset <= cur_offset * max_count:
                        # current info history view offset
                        offset_links.append(f.table_cell(1, css_class="info-offset-item info-cur-offset") + f.text(str(log_size - offset)) + f.table_cell(0))
                        offset_added = True

                    # add link, if not at this offset
                    if offset != cur_offset * max_count:
                        add_offset_link(cur_offset * max_count)

                # link to the last page of event log
                if max_offset < (log_size - 1) / max_count:
                    if max_offset < (log_size - 1) / max_count - 1:
                        offset_links.append(f.table_cell(1, css_class="info-offset-gap") + f.text(u'\u2026') + f.table_cell(0))
                    add_offset_link(((log_size - 1) / max_count) * max_count)

                # special case - if offset is greater than max_offset * max_count
                if offset > max_offset * max_count:
                    offset_links.append(f.table_cell(1, css_class="info-offset-item info-cur-offset") + f.text(str(log_size - offset)) + f.table_cell(0))

                # link to next page
                if offset < (log_size - max_count):
                    add_offset_link(((offset + max_count) / max_count) * max_count, _("Older"))

                # generating html
                paging_nav_html += "".join([
                    f.table(1, css_class="searchpages"),
                    f.table_row(1),
                    "".join(offset_links),
                    f.table_row(0),
                    f.table(0),
                ])

        # generating max_count switcher
        # we do it only in case history_count has additional values
        if len(request.cfg.history_count) > 2:
            max_count_possibilities = list(set(request.cfg.history_count))
            max_count_possibilities.sort()
            max_count_html = []
            cur_count_added = False


            for count in max_count_possibilities:
                # max count value can be not in list of predefined values
                if max_count <= count and not cur_count_added:
                    max_count_html.append("".join([
                        f.span(1, css_class="info-count-item info-cur-count"),
                        f.text(str(max_count)),
                        f.span(0),
                    ]))
                    cur_count_added = True

                # checking for limit_max_count to prevent showing unavailable options
                if max_count != count and count <= limit_max_count:
                    max_count_html.append("".join([
                        f.span(1, css_class="info-count-item"),
                        page.link_to(request, on=1, querystr={
                            'action': 'info',
                            'offset': str(offset),
                            'max_count': str(count),
                            }, css_class="info-count-link", rel="nofollow"),
                        f.text(str(count)),
                        page.link_to(request, on=0),
                        f.span(0),
                    ]))

            count_select_html += "".join([
                f.span(1, css_class="info-count-selector"),
                    f.text(" ("),
                    f.text(_("%s items per page")) % (f.span(1, css_class="info-count-selector info-count-selector-divider") + f.text(" | ") + f.span(0)).join(max_count_html),
                    f.text(")"),
                f.span(0),
            ])

        # open log for this page
        from MoinMoin.util.dataset import TupleDataset, Column

        history = TupleDataset()
        history.columns = [
            Column('rev', label='#', align='right'),
            Column('mtime', label=_('Date'), align='right'),
            Column('size', label=_('Size'), align='right'),
            Column('diff', label='<input type="submit" value="%s">' % (_("Diff"))),
            Column('editor', label=_('Editor'), hidden=not request.cfg.show_names),
            Column('comment', label=_('Comment')),
            Column('action', label=_('Action')),
            ]

        # generate history list

        def render_action(text, query, **kw):
            kw.update(dict(rel='nofollow'))
            return page.link_to(request, text, querystr=query, **kw)

        def render_file_action(text, pagename, filename, request, do):
            url = AttachFile.getAttachUrl(pagename, filename, request, do=do)
            if url:
                f = request.formatter
                link = f.url(1, url) + f.text(text) + f.url(0)
                return link

        may_write = request.user.may.write(pagename)
        may_delete = request.user.may.delete(pagename)

        count = 0
        pgactioncount = 0
        for line in log.reverse():
            count += 1

            if paging and count <= offset:
                continue

            rev = int(line.rev)
            actions = []
            if line.action in ('SAVE', 'SAVENEW', 'SAVE/REVERT', 'SAVE/RENAME', ):
                size = page.size(rev=rev)
                actions.append(render_action(_('view'), {'action': 'recall', 'rev': '%d' % rev}))
                if pgactioncount == 0:
                    rchecked = ' checked="checked"'
                    lchecked = ''
                elif pgactioncount == 1:
                    lchecked = ' checked="checked"'
                    rchecked = ''
                else:
                    lchecked = rchecked = ''
                diff = '<input type="radio" name="rev1" value="%d"%s><input type="radio" name="rev2" value="%d"%s>' % (rev, lchecked, rev, rchecked)
                if rev > 1:
                    diff += render_action(' ' + _('to previous'), {'action': 'diff', 'rev1': rev-1, 'rev2': rev})
                comment = line.comment
                if not comment:
                    if '/REVERT' in line.action:
                        comment = _("Revert to revision %(rev)d.") % {'rev': int(line.extra)}
                    elif '/RENAME' in line.action:
                        comment = _("Renamed from '%(oldpagename)s'.") % {'oldpagename': line.extra}
                pgactioncount += 1
            else: # ATT*
                rev = '-'
                diff = '-'

                filename = wikiutil.url_unquote(line.extra)
                comment = "%s: %s %s" % (line.action, filename, line.comment)
                if AttachFile.exists(request, pagename, filename):
                    size = AttachFile.size(request, pagename, filename)
                    actions.append(render_file_action(_('view'), pagename, filename, request, do='view'))
                    actions.append(render_file_action(_('get'), pagename, filename, request, do='get'))
                    if may_delete:
                        actions.append(render_file_action(_('del'), pagename, filename, request, do='del'))
                    if may_write:
                        actions.append(render_file_action(_('edit'), pagename, filename, request, do='modify'))
                else:
                    size = 0

            history.addRow((
                rev,
                request.user.getFormattedDateTime(wikiutil.version2timestamp(line.ed_time_usecs)),
                str(size),
                diff,
                line.getEditor(request) or _("N/A"),
                wikiutil.escape(comment) or '&nbsp;',
                "&nbsp;".join(a for a in actions if a),
            ))
            if (count >= max_count + offset) or (paging and count >= log_size):
                break

        # print version history
        from MoinMoin.widget.browser import DataBrowserWidget

        request.write(unicode(html.H2().append(_('Revision History'))))

        if not count: # there was no entry in logfile
            request.write(_('No log entries found.'))
            return

        history_table = DataBrowserWidget(request)
        history_table.setData(history)

        div = html.DIV(id="page-history")
        div.append(html.INPUT(type="hidden", name="action", value="diff"))
        div.append(history_table.render(method="GET"))

        form = html.FORM(method="GET", action="")
        if paging:
            form.append(f.div(1, css_class="info-paging-info") + paging_info_html + count_select_html + f.div(0))
            form.append("".join([
                f.div(1, css_class="info-paging-nav info-paging-nav-top"),
                paging_nav_html,
                f.div(0),
            ]))
        form.append(div)
        if paging:
            form.append("".join([
                f.div(1, css_class="info-paging-nav info-paging-nav-bottom"),
                paging_nav_html,
                f.div(0)
            ]))
        request.write(unicode(form))

    # main function
    _ = request.getText
    page = Page(request, pagename)
    title = page.split_title()

    request.setContentLanguage(request.lang)
    f = request.formatter

    request.theme.send_title(_('Info for "%s"') % (title, ), page=page)
    menu_items = [
        (_('Show "%(title)s"') % {'title': _('Revision History')},
         {'action': 'info'}),
        (_('Show "%(title)s"') % {'title': _('General Page Infos')},
         {'action': 'info', 'general': '1'}),
        (_('Show "%(title)s"') % {'title': _('Page hits and edits')},
         {'action': 'info', 'hitcounts': '1'}),
    ]
    request.write(f.div(1, id="content")) # start content div
    request.write(f.paragraph(1))
    for text, querystr in menu_items:
        request.write("[%s] " % page.link_to(request, text=text, querystr=querystr, rel='nofollow'))
    request.write(f.paragraph(0))

    show_hitcounts = int(request.values.get('hitcounts', 0)) != 0
    show_general = int(request.values.get('general', 0)) != 0

    if show_hitcounts:
        from MoinMoin.stats import hitcounts
        request.write(hitcounts.linkto(pagename, request, 'page=' + wikiutil.url_quote(pagename)))
    elif show_general:
        general(page, pagename, request)
    else:
        history(page, pagename, request)

    request.write(f.div(0)) # end content div
    request.theme.send_footer(pagename)
    request.theme.send_closing_html()
Example #16
0
def execute(pagename, request):
    """ show misc. infos about a page """
    if not request.user.may.read(pagename):
        Page(request, pagename).send_page()
        return

    def general(page, pagename, request):
        _ = request.getText
        f = request.formatter

        request.write(f.heading(1, 1),
                      f.text(_('General Information')),
                      f.heading(0, 1))

        request.write(f.paragraph(1),
                      f.text(_("Page size: %d") % page.size()),
                      f.paragraph(0))

        digest = hashlib.new('sha1', page.get_raw_body().encode(config.charset)).hexdigest().upper()
        request.write(f.paragraph(1),
                      f.rawHTML('%(label)s <tt>%(value)s</tt>' % {
                          'label': _("SHA digest of this page's content is:"),
                          'value': digest, }),
                      f.paragraph(0))

        # show attachments (if allowed)
        attachment_info = action.getHandler(request, 'AttachFile', 'info')
        if attachment_info:
            request.write(attachment_info(pagename, request))

        # show subscribers
        subscribers = page.getSubscribers(request, include_self=1, return_users=1)
        if subscribers:
            request.write(f.paragraph(1))
            request.write(f.text(_('The following users subscribed to this page:')))
            for lang in subscribers:
                request.write(f.linebreak(), f.text('[%s] ' % lang))
                for user in subscribers[lang]:
                    # do NOT disclose email addr, only WikiName
                    userhomepage = Page(request, user.name)
                    if userhomepage.exists():
                        request.write(f.rawHTML(userhomepage.link_to(request) + ' '))
                    else:
                        request.write(f.text(user.name + ' '))
            request.write(f.paragraph(0))

        # show links
        links = page.getPageLinks(request)
        if links:
            request.write(f.paragraph(1))
            request.write(f.text(_('This page links to the following pages:')))
            request.write(f.linebreak())
            for linkedpage in links:
                request.write(f.rawHTML("%s%s " % (Page(request, linkedpage).link_to(request), ",."[linkedpage == links[-1]])))
            request.write(f.paragraph(0))

    def history(page, pagename, request):
        # show history as default
        _ = request.getText
        default_count, limit_max_count = request.cfg.history_count[0:2]
        paging = request.cfg.history_paging

        try:
            max_count = int(request.values.get('max_count', default_count))
        except ValueError:
            max_count = default_count
        max_count = max(1, min(max_count, limit_max_count))

        # read in the complete log of this page
        log = editlog.EditLog(request, rootpagename=pagename)

        offset = 0
        paging_info_html = ""
        paging_nav_html = ""
        count_select_html = ""

        f = request.formatter

        if paging:
            log_size = log.lines()

            try:
                offset = int(request.values.get('offset', 0))
            except ValueError:
                offset = 0
            offset = max(min(offset, log_size - 1), 0)

            paging_info_html += f.paragraph(1, css_class="searchstats info-paging-info") + _("Showing page edit history entries from '''%(start_offset)d''' to '''%(end_offset)d''' out of '''%(total_count)d''' entries total.", wiki=True) % {
                'start_offset': log_size - min(log_size, offset + max_count) + 1,
                'end_offset': log_size - offset,
                'total_count': log_size,
            } + f.paragraph(0)

            # generating offset navigating links
            if max_count < log_size or offset != 0:
                offset_links = []
                cur_offset = max_count
                near_count = 5 # request.cfg.pagination_size

                min_offset = max(0, (offset + max_count - 1) / max_count - near_count)
                max_offset = min((log_size - 1) / max_count, offset / max_count + near_count)
                offset_added = False

                def add_offset_link(offset, caption=None):
                    offset_links.append(f.table_cell(1, css_class="info-offset-item") +
                        page.link_to(request, on=1, querystr={
                            'action': 'info',
                            'offset': str(offset),
                            'max_count': str(max_count),
                            }, css_class="info-offset-nav-link", rel="nofollow") + f.text(caption or str(log_size - offset)) + page.link_to(request, on=0) +
                        f.table_cell(0)
                    )

                # link to previous page - only if not at start
                if offset > 0:
                    add_offset_link(((offset - 1) / max_count) * max_count, _("Newer"))

                # link to beggining of event log - if min_offset is not minimal
                if min_offset > 0:
                    add_offset_link(0)
                    # adding gap only if min_offset not explicitly following beginning
                    if min_offset > 1:
                        offset_links.append(f.table_cell(1, css_class="info-offset-gap") + f.text(u'\u2026') + f.table_cell(0))

                # generating near pages links
                for cur_offset in range(min_offset, max_offset + 1):
                    # note that current offset may be not multiple of max_count,
                    # so we check whether we should add current offset marker like this
                    if not offset_added and offset <= cur_offset * max_count:
                        # current info history view offset
                        offset_links.append(f.table_cell(1, css_class="info-offset-item info-cur-offset") + f.text(str(log_size - offset)) + f.table_cell(0))
                        offset_added = True

                    # add link, if not at this offset
                    if offset != cur_offset * max_count:
                        add_offset_link(cur_offset * max_count)

                # link to the last page of event log
                if max_offset < (log_size - 1) / max_count:
                    if max_offset < (log_size - 1) / max_count - 1:
                        offset_links.append(f.table_cell(1, css_class="info-offset-gap") + f.text(u'\u2026') + f.table_cell(0))
                    add_offset_link(((log_size - 1) / max_count) * max_count)

                # special case - if offset is greater than max_offset * max_count
                if offset > max_offset * max_count:
                    offset_links.append(f.table_cell(1, css_class="info-offset-item info-cur-offset") + f.text(str(log_size - offset)) + f.table_cell(0))

                # link to next page
                if offset < (log_size - max_count):
                    add_offset_link(((offset + max_count) / max_count) * max_count, _("Older"))

                # generating html
                paging_nav_html += "".join([
                    f.table(1, css_class="searchpages"),
                    f.table_row(1),
                    "".join(offset_links),
                    f.table_row(0),
                    f.table(0),
                ])

        # generating max_count switcher
        # we do it only in case history_count has additional values
        if len(request.cfg.history_count) > 2:
            max_count_possibilities = list(set(request.cfg.history_count))
            max_count_possibilities.sort()
            max_count_html = []
            cur_count_added = False


            for count in max_count_possibilities:
                # max count value can be not in list of predefined values
                if max_count <= count and not cur_count_added:
                    max_count_html.append("".join([
                        f.span(1, css_class="info-count-item info-cur-count"),
                        f.text(str(max_count)),
                        f.span(0),
                    ]))
                    cur_count_added = True

                # checking for limit_max_count to prevent showing unavailable options
                if max_count != count and count <= limit_max_count:
                    max_count_html.append("".join([
                        f.span(1, css_class="info-count-item"),
                        page.link_to(request, on=1, querystr={
                            'action': 'info',
                            'offset': str(offset),
                            'max_count': str(count),
                            }, css_class="info-count-link", rel="nofollow"),
                        f.text(str(count)),
                        page.link_to(request, on=0),
                        f.span(0),
                    ]))

            count_select_html += "".join([
                f.span(1, css_class="info-count-selector"),
                    f.text(" ("),
                    f.text(_("%s items per page")) % (f.span(1, css_class="info-count-selector info-count-selector-divider") + f.text(" | ") + f.span(0)).join(max_count_html),
                    f.text(")"),
                f.span(0),
            ])

        # open log for this page
        from MoinMoin.util.dataset import TupleDataset, Column

        history = TupleDataset()
        history.columns = [
            Column('rev', label='#', align='right'),
            Column('mtime', label=_('Date'), align='right'),
            Column('size', label=_('Size'), align='right'),
            Column('diff', label='<input type="submit" value="%s">' % (_("Diff"))),
            Column('editor', label=_('Editor'), hidden=not request.cfg.show_names),
            Column('comment', label=_('Comment')),
            Column('action', label=_('Action')),
            ]

        # generate history list

        def render_action(text, query, **kw):
            kw.update(dict(rel='nofollow'))
            return page.link_to(request, text, querystr=query, **kw)

        def render_file_action(text, pagename, filename, request, do):
            url = AttachFile.getAttachUrl(pagename, filename, request, do=do)
            if url:
                f = request.formatter
                link = f.url(1, url) + f.text(text) + f.url(0)
                return link

        may_write = request.user.may.write(pagename)
        may_delete = request.user.may.delete(pagename)

        count = 0
        pgactioncount = 0
        for line in log.reverse():
            count += 1

            if paging and count <= offset:
                continue

            rev = int(line.rev)
            actions = []
            if line.action in ('SAVE', 'SAVENEW', 'SAVE/REVERT', 'SAVE/RENAME', ):
                size = page.size(rev=rev)
                actions.append(render_action(_('view'), {'action': 'recall', 'rev': '%d' % rev}))
                if pgactioncount == 0:
                    rchecked = ' checked="checked"'
                    lchecked = ''
                elif pgactioncount == 1:
                    lchecked = ' checked="checked"'
                    rchecked = ''
                else:
                    lchecked = rchecked = ''
                diff = '<input type="radio" name="rev1" value="%d"%s><input type="radio" name="rev2" value="%d"%s>' % (rev, lchecked, rev, rchecked)
                if rev > 1:
                    diff += render_action(' ' + _('to previous'), {'action': 'diff', 'rev1': rev-1, 'rev2': rev})
                comment = line.comment
                if not comment:
                    if '/REVERT' in line.action:
                        comment = _("Revert to revision %(rev)d.") % {'rev': int(line.extra)}
                    elif '/RENAME' in line.action:
                        comment = _("Renamed from '%(oldpagename)s'.") % {'oldpagename': line.extra}
                pgactioncount += 1
            else: # ATT*
                rev = '-'
                diff = '-'

                filename = wikiutil.url_unquote(line.extra)
                comment = "%s: %s %s" % (line.action, filename, line.comment)
                if AttachFile.exists(request, pagename, filename):
                    size = AttachFile.size(request, pagename, filename)
                    actions.append(render_file_action(_('view'), pagename, filename, request, do='view'))
                    actions.append(render_file_action(_('get'), pagename, filename, request, do='get'))
                    if may_delete:
                        actions.append(render_file_action(_('del'), pagename, filename, request, do='del'))
                    if may_write:
                        actions.append(render_file_action(_('edit'), pagename, filename, request, do='modify'))
                else:
                    size = 0

            history.addRow((
                rev,
                request.user.getFormattedDateTime(wikiutil.version2timestamp(line.ed_time_usecs)),
                str(size),
                diff,
                line.getEditor(request) or _("N/A"),
                wikiutil.escape(comment) or '&nbsp;',
                "&nbsp;".join(a for a in actions if a),
            ))
            if (count >= max_count + offset) or (paging and count >= log_size):
                break

        # print version history
        from MoinMoin.widget.browser import DataBrowserWidget

        request.write(unicode(html.H2().append(_('Revision History'))))

        if not count: # there was no entry in logfile
            request.write(_('No log entries found.'))
            return

        history_table = DataBrowserWidget(request)
        history_table.setData(history)

        div = html.DIV(id="page-history")
        div.append(html.INPUT(type="hidden", name="action", value="diff"))
        div.append(history_table.render(method="GET"))

        form = html.FORM(method="GET", action="")
        if paging:
            form.append(f.div(1, css_class="info-paging-info") + paging_info_html + count_select_html + f.div(0))
            form.append("".join([
                f.div(1, css_class="info-paging-nav info-paging-nav-top"),
                paging_nav_html,
                f.div(0),
            ]))
        form.append(div)
        if paging:
            form.append("".join([
                f.div(1, css_class="info-paging-nav info-paging-nav-bottom"),
                paging_nav_html,
                f.div(0)
            ]))
        request.write(unicode(form))

    # main function
    _ = request.getText
    page = Page(request, pagename)
    title = page.split_title()

    request.setContentLanguage(request.lang)
    f = request.formatter

    request.theme.send_title(_('Info for "%s"') % (title, ), page=page)
    menu_items = [
        (_('Show "%(title)s"') % {'title': _('Revision History')},
         {'action': 'info'}),
        (_('Show "%(title)s"') % {'title': _('General Page Infos')},
         {'action': 'info', 'general': '1'}),
        (_('Show "%(title)s"') % {'title': _('Page hits and edits')},
         {'action': 'info', 'hitcounts': '1'}),
    ]
    request.write(f.div(1, id="content")) # start content div
    request.write(f.paragraph(1))
    for text, querystr in menu_items:
        request.write("[%s] " % page.link_to(request, text=text, querystr=querystr, rel='nofollow'))
    request.write(f.paragraph(0))

    show_hitcounts = int(request.values.get('hitcounts', 0)) != 0
    show_general = int(request.values.get('general', 0)) != 0

    if show_hitcounts:
        from MoinMoin.stats import hitcounts
        request.write(hitcounts.linkto(pagename, request, 'page=' + wikiutil.url_quote(pagename)))
    elif show_general:
        general(page, pagename, request)
    else:
        history(page, pagename, request)

    request.write(f.div(0)) # end content div
    request.theme.send_footer(pagename)
    request.theme.send_closing_html()
Example #17
0
    def render(self, formatter):
        """ renders formular  """

        _ = self._

        # checks if initializing of all attributes in __init__ was done
        if not self.init_settings:
            return

        text = """%(mathtran_server)s?;D=%(scale_factor)s;tex=%(formular)s""" % {
                                                                                 "mathtran_server": self.mathtran_server,
                                                                                 "formular": wikiutil.url_quote(self.raw),
                                                                                 "scale_factor": self.scale_factor,
                                                                                }
        key = cache.key(self.request, itemname=self.pagename, content=text)
        if not cache.exists(self.request, key) and urllib:
            # ToDo add a timeout
            image = urllib.urlopen(text)
            cache.put(self.request, key, image.read(),
                      content_type="image/png")
        if cache.exists(self.request, key):
            credits = """<a href="http://www.mathtran.org/" title="MoinMoin uses the Mathtran Online translation of mathematical content software.">MathTran Powered</a>"""
            if not credits in self.request.cfg.page_credits:
                self.request.cfg.page_credits.append(credits)
                # ToDo show the credits only on pages using the parser extension
        return formatter.image(src=cache.url(self.request, key), alt=self.raw)
Example #18
0
    def format(self, formatter, **kw):
        """ Send the text. """
        _ = self._

        if not self.request.cfg.allow_xslt:
            # use plain text parser if XSLT is not allowed
            # can be activated in wikiconfig.py
            from MoinMoin.parser.text import Parser as TextParser
            self.request.write(formatter.sysmsg(1) +
                               formatter.rawHTML(_('XSLT option disabled, please look at HelpOnConfiguration.', wiki=True)) +
                               formatter.sysmsg(0))
            TextParser(self.raw, self.request).format(formatter)
            return

        try:
            # try importing Ft from 4suite
            # if import fails or its version is not 1.x, error msg
            from Ft.Xml import __version__ as ft_version
            assert ft_version.startswith('1.')
        except (ImportError, AssertionError):
            self.request.write(self.request.formatter.sysmsg(1) +
                               self.request.formatter.text(_('XSLT processing is not available, please install 4suite 1.x.')) +
                               self.request.formatter.sysmsg(0))
        else:
            from Ft.Lib import Uri
            from Ft.Xml import InputSource
            from Ft.Xml.Xslt.Processor import Processor
            from Ft import FtException

            msg = None

            try:
                # location of SchemeRegisteryResolver has changed since 1.0a4
                if ft_version >= "1.0a4" or ft_version == "1.0" or ("1.0.1" <= ft_version <= "1.0.9"):
                    # such version numbers suck!
                    import Ft.Lib.Resolvers # Do not remove! it looks unused, but breaks when removed!!!

                class MoinResolver(Uri.SchemeRegistryResolver):
                    """ supports resolving self.base_uri for actual pages in MoinMoin """
                    def __init__(self, handlers, base_scheme):
                        Uri.SchemeRegistryResolver.__init__(self, handlers)
                        self.supportedSchemes.append(base_scheme)

                # setting up vars for xslt Processor
                out_file = StringIO.StringIO()
                wiki_resolver = MoinResolver(
                                    handlers={self.base_scheme: self._resolve_page, },
                                    base_scheme=self.base_scheme)
                input_factory = InputSource.InputSourceFactory(resolver=wiki_resolver)

                page_uri = self.base_uri + wikiutil.url_quote(formatter.page.page_name)
                # 4Suite needs an utf-8 encoded byte string instead of an unicode object
                raw = self.raw.strip().encode('utf-8')
                self.processor = Processor()
                self.append_stylesheet() # hook, for extending this parser
                self.processor.run(
                    input_factory.fromString(raw, uri=page_uri),
                    outputStream=out_file)
                # Convert utf-8 encoded byte string into unicode
                result = out_file.getvalue().decode('utf-8')
                result = self.parse_result(result) # hook, for extending this parser

            except FtException, msg:
                etype = "XSLT"
            except Uri.UriException, msg:
                etype = "XSLT"
Example #19
0
    def format(self, formatter):
        """ Send the text. """
        _ = self._

        if not self.request.cfg.allow_xslt:
            # use plain text parser if XSLT is not allowed
            # can be activated in wikiconfig.py
            from MoinMoin.parser.text import Parser as TextParser
            self.request.write(
                formatter.sysmsg(1) + formatter.rawHTML(
                    _('XSLT option disabled, please look at HelpOnConfiguration.',
                      wiki=True)) + formatter.sysmsg(0))
            TextParser(self.raw, self.request).format(formatter)
            return

        try:
            # try importing Ft from 4suite
            # if import fails or its version is not 1.x, error msg
            from Ft.Xml import __version__ as ft_version
            assert ft_version.startswith('1.')
        except (ImportError, AssertionError):
            self.request.write(
                self.request.formatter.sysmsg(1) + self.request.formatter.text(
                    _('XSLT processing is not available, please install 4suite 1.x.'
                      )) + self.request.formatter.sysmsg(0))
        else:
            from Ft.Lib import Uri
            from Ft.Xml import InputSource
            from Ft.Xml.Xslt.Processor import Processor
            from Ft import FtException

            msg = None

            try:
                # location of SchemeRegisteryResolver has changed since 1.0a4
                if ft_version >= "1.0a4" or ft_version == "1.0" or (
                        "1.0.1" <= ft_version <= "1.0.9"):
                    # such version numbers suck!
                    import Ft.Lib.Resolvers  # Do not remove! it looks unused, but breaks when removed!!!

                class MoinResolver(Uri.SchemeRegistryResolver):
                    """ supports resolving self.base_uri for actual pages in MoinMoin """
                    def __init__(self, handlers, base_scheme):
                        Uri.SchemeRegistryResolver.__init__(self, handlers)
                        self.supportedSchemes.append(base_scheme)

                # setting up vars for xslt Processor
                out_file = StringIO.StringIO()
                wiki_resolver = MoinResolver(handlers={
                    self.base_scheme:
                    self._resolve_page,
                },
                                             base_scheme=self.base_scheme)
                input_factory = InputSource.InputSourceFactory(
                    resolver=wiki_resolver)

                page_uri = self.base_uri + wikiutil.url_quote(
                    formatter.page.page_name)
                # 4Suite needs an utf-8 encoded byte string instead of an unicode object
                raw = self.raw.strip().encode('utf-8')
                self.processor = Processor()
                self.append_stylesheet()  # hook, for extending this parser
                self.processor.run(input_factory.fromString(raw, uri=page_uri),
                                   outputStream=out_file)
                # Convert utf-8 encoded byte string into unicode
                result = out_file.getvalue().decode('utf-8')
                result = self.parse_result(
                    result)  # hook, for extending this parser

            except FtException, msg:
                etype = "XSLT"
            except Uri.UriException, msg:
                etype = "XSLT"