Beispiel #1
0
def text(pagename, request):
    from MoinMoin.util.dataset import TupleDataset, Column
    from MoinMoin.widget.browser import DataBrowserWidget

    _ = request.getText

    data = get_data(request)

    total = 0.0
    for cnt, ua in data:
        total += cnt


    agents = TupleDataset()
    agents.columns = [Column('agent', label=_("User agent"), align='left'),
                      Column('value', label='%', align='right')]

    cnt_printed = 0
    data = data[:10]

    if total:
        for cnt, ua in data:
            try:
                ua = unicode(ua)
                agents.addRow((ua, "%.2f" % (100.0 * cnt / total)))
                cnt_printed += cnt
            except UnicodeError:
                pass
        if total > cnt_printed:
            agents.addRow((_('Others'), "%.2f" % (100 * (total - cnt_printed) / total)))

    table = DataBrowserWidget(request)
    table.setData(agents)
    return table.render(method="GET")
Beispiel #2
0
    def setup_class(self):
        # check if state of example changes during tests
        example = [
            [
                'L1', (u'c', u'c'), (u'1', u'1'), (u'2', u'2'), (u'a', u'a'),
                (u'4', u'4'), (u'5', u'5')
            ],
            [
                'L2', (u'b', u'b'), (u'10', u'10'), (u'21', u'21'),
                (u'B', u'B'), (u'40', u'40'), (u'10', u'10')
            ],
            [
                'L3', (u'b', u'b'), (u'2', u'2'), (u'3.14', u'3.14'),
                (u'c', u'c'), (u'54', u'54'), (u'50', u'50')
            ],
            [
                'L4', (u'b', u'b'), (u'90', u'90'), (u'-2.240', u'-2.240'),
                (u'D', u'D'), (u'40', u'40'), (u'5', u'5')
            ],
            [
                'L5', (u'a', u'a'), (u'95', u'95'), (u'20', u'20'),
                (u'e', u'e'), (u'40', u'40'), (u'10', u'10')
            ],
        ]
        self.example = example
        data = TupleDataset()
        data.columns = []
        data.columns.extend([Column('TEST', label='TEST')])

        for line in self.example:
            data.addRow([line[0]] + line[1:])
            data.columns.extend([Column(line[0], label=line[0])])

        self.data = data
        self.table = DataBrowserWidget(self.request)
Beispiel #3
0
def used_languages(request):
    from MoinMoin.util.dataset import TupleDataset, Column
    from MoinMoin.widget.browser import DataBrowserWidget

    _ = request.getText

    data = get_data(request)

    total = 0.0
    for cnt, lang in data:
        total += cnt

    languages = TupleDataset()
    languages.columns = [
        Column('language', label=_("Language"), align='left'),
        Column('value', label='%', align='right')
    ]

    cnt_printed = 0
    data = data[:10]

    # Preparing "<Browser setting>"
    browserlang = _('<Browser setting>')
    browserlang = browserlang[1:len(browserlang) - 1].capitalize()
    if total:
        for cnt, lang in data:
            try:
                if lang == u'browser':
                    languages.addRow(
                        (browserlang, "%(percent).2f%% (%(count)d)" % {
                            'percent': 100.0 * cnt / total,
                            'count': cnt
                        }))
                else:
                    lang = i18n.wikiLanguages()[lang]['x-language-in-english']
                    languages.addRow((lang, "%(percent).2f%% (%(count)d)" % {
                        'percent': 100.0 * cnt / total,
                        'count': cnt
                    }))
                cnt_printed += cnt
            except UnicodeError:
                pass
        if total > cnt_printed:
            languages.addRow((_('Others'), "%(percent).2f%% (%(count)d)" % {
                'percent': 100.0 * (total - cnt_printed) / total,
                'count': total - cnt_printed
            }))

    else:  # If we don't have any users, we can safely assume that the only real user is the visitor (who is normally ignored, though) who is using "Browser setting"
        languages.addRow((browserlang, "100% (1)"))

    table = DataBrowserWidget(request)
    table.setData(languages)
    return table.render(method="GET")
Beispiel #4
0
def text(pagename, request, params=''):
    from MoinMoin.util.dataset import TupleDataset, Column
    from MoinMoin.widget.browser import DataBrowserWidget
    _ = request.getText

    # check params
    filterpage = None
    if params.startswith('page='):
        filterpage = wikiutil.url_unquote(params[len('page='):])

    if request and request.values and 'page' in request.values:
        filterpage = request.values['page']

    days, views, edits = get_data(pagename, request, filterpage)

    hits = TupleDataset()
    hits.columns = [Column('day', label=_("Date"), align='left'),
                    Column('views', label=_("Views/day"), align='right'),
                    Column('edits', label=_("Edits/day"), align='right'),
                    ]

    maxentries = 30

    if maxentries < len(days):
        step = float(len(days))/ maxentries
    else:
        step = 1

    sv = 0.0
    se = 0.0
    sd = 0.0
    cnt = 0

    for i in xrange(len(days)-1, -1, -1):
        d, v, e = days[i], views[i], edits[i]
        # sum up views and edits to step days
        sd += 1
        cnt += 1
        sv += v
        se += e
        if cnt >= step:
            cnt -= step
            hits.addRow((d, "%.1f" % (sv/sd), "%.1f" % (se/sd)))
            sv = 0.0
            se = 0.0
            sd = 0.0

    table = DataBrowserWidget(request)
    table.setData(hits)
    return table.render(method="GET")
Beispiel #5
0
def show_editors(request, pagename, timestamp):
    _ = request.getText

    timestamp = int(timestamp * 1000000)
    log = editlog.EditLog(request)
    editors = {}
    pages = {}
    for line in log.reverse():
        if line.ed_time_usecs < timestamp:
            break

        if not request.user.may.read(line.pagename):
            continue

        editor = line.getInterwikiEditorData(request)
        if not line.pagename in pages:
            pages[line.pagename] = 1
            editors[editor] = editors.get(editor, 0) + 1

    editors = [(nr, editor) for editor, nr in editors.iteritems()]
    editors.sort()
    editors.reverse()

    pg = Page.Page(request, pagename)

    dataset = TupleDataset()
    dataset.columns = [Column('editor', label=_("Editor"), align='left'),
                       Column('pages', label=_("Pages"), align='right'),
                       Column('link', label='', align='left')]
    for nr, editor in editors:
        dataset.addRow((render(editor), unicode(nr),
            pg.link_to(request, text=_("Select Author"),
                querystr={
                    'action': 'Despam',
                    'editor': repr(editor),
                })))

    table = DataBrowserWidget(request)
    table.setData(dataset)
    return table.render(method="GET")
Beispiel #6
0
def do_admin_browser(request):
    """ Browser for SystemAdmin macro. """
    from MoinMoin.util.dataset import TupleDataset, Column
    _ = request.getText

    data = TupleDataset()
    data.columns = [
        Column('page', label=('Page')),
        Column('file', label=('Filename')),
        Column('size', label=_('Size'), align='right'),
    ]

    # iterate over pages that might have attachments
    pages = request.rootpage.getPageList()
    for pagename in pages:
        # check for attachments directory
        page_dir = getAttachDir(request, pagename)
        if os.path.isdir(page_dir):
            # iterate over files of the page
            files = os.listdir(page_dir)
            for filename in files:
                filepath = os.path.join(page_dir, filename)
                data.addRow((
                    (Page(request,
                          pagename).link_to(request,
                                            querystr="action=AttachFile"),
                     wikiutil.escape(pagename, 1)),
                    wikiutil.escape(filename.decode(config.charset)),
                    os.path.getsize(filepath),
                ))

    if data:
        from MoinMoin.widget.browser import DataBrowserWidget

        browser = DataBrowserWidget(request)
        browser.setData(data, sort_columns=[0, 1])
        return browser.render(method="GET")

    return ''
Beispiel #7
0
def macro_ShowSmileys(macro):
    _ = macro.request.getText
    fmt = macro.formatter

    # create data description
    data = TupleDataset()
    data.columns = []
    for dummy in range(COLUMNS):
        data.columns.extend([
            Column('markup', label=_('Markup')),
            Column('image', label=_('Display'), align='center'),
            Column('', label=''),
        ])
    data.columns[-1].hidden = 1

    # iterate over smileys, in groups of size COLUMNS
    smileys = config.smileys
    for idx in range(0, len(smileys), COLUMNS):
        row = []
        for off in range(COLUMNS):
            if idx + off < len(smileys):
                markup = smileys[idx + off]
                row.extend([
                    fmt.code(1) + fmt.text(markup) + fmt.code(0),
                    fmt.smiley(markup),
                    '',
                ])
            else:
                row.extend(['&nbsp;'] * 3)
        data.addRow(tuple(row))

    # display table
    if data:
        browser = DataBrowserWidget(macro.request)
        browser.setData(data)
        return browser.render(method="GET")

    return ''
Beispiel #8
0
def do_user_browser(request):
    """ Browser for SystemAdmin macro. """
    _ = request.getText
    groups = request.groups

    data = TupleDataset()
    data.columns = [
        Column('name', label=_('Username')),
        Column('groups', label=_('Member of Groups')),
        Column('email', label=_('Email')),
        Column('jabber', label=_('Jabber')),
        Column('action', label=_('Action')),
    ]

    # Iterate over users
    for uid in user.getUserList(request):
        account = user.User(request, uid)

        account_groups = set(groups.groups_with_member(account.name))
        wiki_groups = set([group for group in account_groups if isinstance(groups[group], WikiGroup)])
        other_groups = list(account_groups - wiki_groups)

        # First show groups that are defined in wikipages linking to it
        # after show groups from other backends.
        grouppage_links = ', '.join([Page(request, group_name).link_to(request) for group_name in wiki_groups] +
                                    other_groups)

        userhomepage = Page(request, account.name)
        if userhomepage.exists():
            namelink = userhomepage.link_to(request)
        else:
            namelink = wikiutil.escape(account.name)

        # creates the POST data for account disable/enable
        val = "1"
        text=_('Disable user')
        if account.disabled:
            text=_('Enable user')
            val = "0"
            namelink += " (%s)" % _("disabled")

        url = request.page.url(request)
        ret = html.FORM(action=url)
        ret.append(html.INPUT(type='hidden', name='action', value='userprofile'))
        ticket = wikiutil.createTicket(request, action='userprofile')
        ret.append(html.INPUT(type="hidden", name="ticket", value="%s" % ticket))
        ret.append(html.INPUT(type='hidden', name='name', value=account.name))
        ret.append(html.INPUT(type='hidden', name='key', value="disabled"))
        ret.append(html.INPUT(type='hidden', name='val', value=val))
        ret.append(html.INPUT(type='submit', name='userprofile', value=text))
        enable_disable_link = unicode(unicode(ret))

        # creates the POST data for recoverpass
        url = request.page.url(request)
        ret = html.FORM(action=url)
        ret.append(html.INPUT(type='hidden', name='action', value='recoverpass'))
        ret.append(html.INPUT(type='hidden', name='email', value=account.email))
        ret.append(html.INPUT(type='hidden', name='account_sendmail', value="1"))
        ret.append(html.INPUT(type='hidden', name='sysadm', value="users"))
        ret.append(html.INPUT(type='submit', name='recoverpass', value=_('Mail account data')))
        recoverpass_link =  unicode(unicode(ret))

        if account.email:
            email_link = (request.formatter.url(1, 'mailto:' + account.email, css='mailto') +
                          request.formatter.text(account.email) +
                          request.formatter.url(0))
        else:
            email_link = ''

        if account.jid:
            jabber_link = (request.formatter.url(1, 'xmpp:' + account.jid, css='mailto') +
                           request.formatter.text(account.jid) +
                           request.formatter.url(0))
        else:
            jabber_link = ''

        data.addRow((
            (request.formatter.rawHTML(namelink), account.name),
            request.formatter.rawHTML(grouppage_links),
            email_link,
            jabber_link,
            recoverpass_link + enable_disable_link
        ))

    if data:
        from MoinMoin.widget.browser import DataBrowserWidget

        browser = DataBrowserWidget(request)
        browser.setData(data, sort_columns=[0])
        return browser.render()

    # No data
    return ''
Beispiel #9
0
def execute(pagename, request):
    _ = request.getText
    if not request.user or not request.user.isSuperUser():
        msg = _('Only superuser is allowed to use this action.')
        request.theme.add_msg(msg, "error")
        request.page.send_page()
        return ''
    fmt = request.html_formatter
    language_setup_page = 'LanguageSetup'
    not_translated_system_pages = 'not_translated_system_pages.zip'
    files = AttachFile._get_files(request, language_setup_page)
    if not files:
        msg = _('No page packages found.')
        request.theme.add_msg(msg, "error")
        request.page.send_page()
        return ''
    wiki_languages = list(
        set([lang_file.split('--')[0]
             for lang_file in files]) - set(['00_needs_fixing.zip']))
    wiki_languages.sort()

    lang = request.values.get('language') or 'English'
    target = request.values.get('target') or ''
    msg = ''
    # if target is given it tries to install the package.
    if target:
        dummy_pagename, dummy_target, targetpath = AttachFile._access_file(
            language_setup_page, request)
        package = packages.ZipPackage(request, targetpath)
        if package.isPackage():
            if package.installPackage():
                msg = _("Attachment '%(filename)s' installed.") % {
                    'filename': target
                }
            else:
                msg = _("Installation of '%(filename)s' failed.") % {
                    'filename': target
                }
        else:
            msg = _('The file %s is not a MoinMoin package file.') % target

    data = TupleDataset()
    data.columns = [
        Column('page package', label=_('page package')),
        Column('action', label=_('install')),
    ]

    label_install = _("install")
    for pageset_name in i18n.strings.pagesets:
        attachment = "%s--%s.zip" % (lang, pageset_name)
        # not_translated_system_pages are in english
        if attachment.endswith(not_translated_system_pages):
            attachment = 'English_not_translated_system_pages.zip'
        install_link = ''
        querystr = {
            'action': 'language_setup',
            'target': attachment,
            'language': lang
        }
        if AttachFile.exists(request, language_setup_page, attachment):
            install_link = request.page.link_to(request,
                                                label_install,
                                                querystr=querystr)
        data.addRow((pageset_name, install_link))

    table = DataBrowserWidget(request)
    table.setData(data)
    page_table = ''.join(table.format(method='GET'))

    fmt = request.formatter
    lang_links = [
        request.page.link_to_raw(request,
                                 _lang,
                                 querystr={
                                     'action': 'language_setup',
                                     'language': _lang,
                                     'pageset': pageset_name,
                                 }) for _lang in wiki_languages
    ]

    lang_selector = u''.join([
        fmt.paragraph(1),
        _("Choose:"), ' ', ' '.join(lang_links),
        fmt.paragraph(0)
    ])

    title = _("Install language packs for '%s'") % wikiutil.escape(lang)
    request.theme.add_msg(msg, "info")
    request.theme.send_title(title, page=request.page, pagename=pagename)
    request.write(request.formatter.startContent("content"))
    request.write(lang_selector)
    request.write(page_table)
    request.write(request.formatter.endContent())
    request.theme.send_footer(pagename)
    request.theme.send_closing_html()
Beispiel #10
0
def do_user_browser(request):
    """ Browser for SystemAdmin macro. """
    _ = request.getText
    groups = request.groups

    data = TupleDataset()
    data.columns = [
        Column('name', label=_('Username')),
        Column('groups', label=_('Member of Groups')),
        Column('email', label=_('Email')),
        Column('jabber', label=_('Jabber')),
        Column('action', label=_('Action')),
    ]

    class UserAccount(object):
        # namedtuple is >= 2.6 :-(
        def __init__(self, **kw):
            for k, v in kw.items():
                setattr(self, k, v)
        def __repr__(self):
            return "<UserAccount %r>" % self.__dict__

    accounts = []
    for uid in user.getUserList(request):
        # be careful and just create a list of what we really need,
        # not sure if we can keep lots of User objects instantiated
        # in parallel (open files? too big?)
        u = user.User(request, uid)
        accounts.append(UserAccount(name=u.name, email=u.email, jid=u.jid, disabled=u.disabled))

    def sortkey(account):
        # enabled accounts at top, sorted by name
        return (account.disabled, account.name)

    # Iterate over user accounts
    for account in sorted(accounts, key=sortkey):
        account_groups = set(groups.groups_with_member(account.name))
        wiki_groups = set([group for group in account_groups if isinstance(groups[group], WikiGroup)])
        other_groups = list(account_groups - wiki_groups)

        # First show groups that are defined in wikipages linking to it
        # after show groups from other backends.
        grouppage_links = ', '.join([Page(request, group_name).link_to(request) for group_name in wiki_groups] +
                                    other_groups)

        userhomepage = Page(request, account.name)
        if userhomepage.exists():
            namelink = userhomepage.link_to(request)
        else:
            namelink = wikiutil.escape(account.name)

        # creates the POST data for account disable/enable
        val = "1"
        text=_('Disable user')
        if account.disabled:
            text=_('Enable user')
            val = "0"
            namelink += " (%s)" % _("disabled")

        url = request.page.url(request)
        ret = html.FORM(action=url)
        ret.append(html.INPUT(type='hidden', name='action', value='userprofile'))
        ticket = wikiutil.createTicket(request, action='userprofile')
        ret.append(html.INPUT(type="hidden", name="ticket", value="%s" % ticket))
        ret.append(html.INPUT(type='hidden', name='name', value=account.name))
        ret.append(html.INPUT(type='hidden', name='key', value="disabled"))
        ret.append(html.INPUT(type='hidden', name='val', value=val))
        ret.append(html.INPUT(type='submit', name='userprofile', value=text))
        enable_disable_link = unicode(unicode(ret))

        # creates the POST data for recoverpass
        url = request.page.url(request)
        ret = html.FORM(action=url)
        ret.append(html.INPUT(type='hidden', name='action', value='recoverpass'))
        ret.append(html.INPUT(type='hidden', name='email', value=account.email))
        ret.append(html.INPUT(type='hidden', name='account_sendmail', value="1"))
        ret.append(html.INPUT(type='hidden', name='sysadm', value="users"))
        ret.append(html.INPUT(type='submit', name='recoverpass', value=_('Mail account data')))
        recoverpass_link =  unicode(unicode(ret))

        if account.email:
            email_link = (request.formatter.url(1, 'mailto:' + account.email, css='mailto') +
                          request.formatter.text(account.email) +
                          request.formatter.url(0))
        else:
            email_link = ''

        if account.jid:
            jabber_link = (request.formatter.url(1, 'xmpp:' + account.jid, css='mailto') +
                           request.formatter.text(account.jid) +
                           request.formatter.url(0))
        else:
            jabber_link = ''

        data.addRow((
            (request.formatter.rawHTML(namelink), account.name),
            request.formatter.rawHTML(grouppage_links),
            email_link,
            jabber_link,
            recoverpass_link + enable_disable_link
        ))

    if data:
        from MoinMoin.widget.browser import DataBrowserWidget

        browser = DataBrowserWidget(request)
        browser.setData(data)
        return browser.render()

    # No data
    return ''
Beispiel #11
0
    def __init__(self, raw, request, **kw):
        self.request = request
        self._first_row = None
        formatter = request.formatter

        # workaround csv.reader deficiency by encoding to utf-8
        # removes empty lines in front of the csv table
        data = raw.encode('utf-8').lstrip('\n').split('\n')

        delimiter = ';'
        # Previous versions of this parser have used only the delimiter ";" (by default).
        # This version now tries to sniff the delimiter from the list preferred_delimiters
        # Although the Python csv sniffer had quite some changes from py 2.3 to 2.5.1, we try
        # to avoid problems for the case it does not find a delimiter in some given data.
        # Newer versions of the sniffer do raise an _csv.Error while older versions do
        # return a whitespace as delimiter.
        if data[0]:
            try:
                preferred_delimiters = [',', '\t', ';', ' ', ':']
                delimiter = Sniffer().sniff(
                    data[0], preferred_delimiters).delimiter or ';'
            except Error:
                pass

        visible = None
        hiddenindexes = []
        hiddencols = []
        autofiltercols = []
        staticcols = []
        staticvals = []
        linkcols = []
        quotechar = '\x00'  # can't be entered
        quoting = QUOTE_NONE
        name = None
        hdr = reader([kw.get('format_args', '').strip().encode('utf-8')],
                     delimiter=" ")
        args = hdr.next()

        for arg in args:
            arg = arg.decode('utf-8')
            try:
                key, val = arg.split('=', 1)
            except:
                # handle compatibility with original 'csv' parser
                if arg.startswith('-'):
                    try:
                        hiddenindexes.append(int(arg[1:]) - 1)
                    except ValueError:
                        pass
                else:
                    delimiter = arg.encode('utf-8')
                continue
            if key == 'separator' or key == 'delimiter':
                delimiter = val.encode('utf-8')
            if key == 'quotechar':
                if val == val.encode('utf-8'):
                    quotechar = val.encode('utf-8')
                    quoting = QUOTE_MINIMAL
            elif key == 'show':
                visible = val.split(',')
            elif key == 'hide':
                hiddencols = val.split(',')
            elif key == 'autofilter':
                autofiltercols = val.split(',')
            elif key == 'name':
                name = val
            elif key == 'static_cols':
                staticcols = val.split(',')
            elif key == 'static_vals':
                staticvals = val.split(',')
            elif key == 'link':
                linkcols = val.split(',')

        if len(staticcols) > len(staticvals):
            staticvals.extend([''] * (len(staticcols) - len(staticvals)))
        elif len(staticcols) < len(staticvals):
            staticvals = staticvals[:len(staticcols)]

        r = reader(data,
                   delimiter=delimiter,
                   quotechar=quotechar,
                   quoting=quoting)
        cols = map(lambda x: x.decode('utf-8'), r.next()) + staticcols

        self._show_header = True

        if cols == staticcols:
            try:
                self._first_row = map(lambda x: x.decode('utf-8'), r.next())
                cols = [None] * len(self._first_row) + staticcols
                self._show_header = False
            except StopIteration:
                pass

        num_entry_cols = len(cols) - len(staticcols)

        if not visible is None:
            for col in cols:
                if not col in visible:
                    hiddencols.append(col)

        linkparse = [False] * len(cols)

        data = TupleDataset(name)
        for colidx in range(len(cols)):
            col = cols[colidx]
            autofilter = col in autofiltercols
            hidden = col in hiddencols or colidx in hiddenindexes
            data.columns.append(
                Column(col, autofilter=autofilter, hidden=hidden))

            linkparse[colidx] = col in linkcols

        for row in self._read_rows(r):
            row = map(lambda x: x.decode('utf-8'), row)
            if len(row) > num_entry_cols:
                row = row[:num_entry_cols]
            elif len(row) < num_entry_cols:
                row.extend([''] * (num_entry_cols - len(row)))
            row += staticvals
            for colidx in range(len(row)):
                item = row[colidx]
                if linkparse[colidx]:
                    try:
                        url, item = item.split(' ', 1)
                        if url == '':
                            display = escape(item)
                        else:
                            display = ''.join([
                                formatter.url(1, url=url),
                                formatter.text(item),
                                formatter.url(0)
                            ])
                    except ValueError:
                        display = escape(item)
                else:
                    display = escape(item)
                row[colidx] = (display, item)
            data.addRow(tuple(row))
        self.data = data
Beispiel #12
0
    def history(page, pagename, request):
        # show history as default
        _ = request.getText
        default_count, limit_max_count = request.cfg.history_count[0:2]
        paging = request.cfg.history_paging

        try:
            max_count = int(request.values.get('max_count', default_count))
        except ValueError:
            max_count = default_count
        max_count = max(1, min(max_count, limit_max_count))

        # read in the complete log of this page
        log = editlog.EditLog(request, rootpagename=pagename)

        offset = 0
        paging_info_html = ""
        paging_nav_html = ""
        count_select_html = ""

        f = request.formatter

        if paging:
            log_size = log.lines()

            try:
                offset = int(request.values.get('offset', 0))
            except ValueError:
                offset = 0
            offset = max(min(offset, log_size - 1), 0)

            paging_info_html += f.paragraph(1, css_class="searchstats info-paging-info") + _("Showing page edit history entries from '''%(start_offset)d''' to '''%(end_offset)d''' out of '''%(total_count)d''' entries total.", wiki=True) % {
                'start_offset': log_size - min(log_size, offset + max_count) + 1,
                'end_offset': log_size - offset,
                'total_count': log_size,
            } + f.paragraph(0)

            # generating offset navigating links
            if max_count < log_size or offset != 0:
                offset_links = []
                cur_offset = max_count
                near_count = 5 # request.cfg.pagination_size

                min_offset = max(0, (offset + max_count - 1) / max_count - near_count)
                max_offset = min((log_size - 1) / max_count, offset / max_count + near_count)
                offset_added = False

                def add_offset_link(offset, caption=None):
                    offset_links.append(f.table_cell(1, css_class="info-offset-item") +
                        page.link_to(request, on=1, querystr={
                            'action': 'info',
                            'offset': str(offset),
                            'max_count': str(max_count),
                            }, css_class="info-offset-nav-link", rel="nofollow") + f.text(caption or str(log_size - offset)) + page.link_to(request, on=0) +
                        f.table_cell(0)
                    )

                # link to previous page - only if not at start
                if offset > 0:
                    add_offset_link(((offset - 1) / max_count) * max_count, _("Newer"))

                # link to beggining of event log - if min_offset is not minimal
                if min_offset > 0:
                    add_offset_link(0)
                    # adding gap only if min_offset not explicitly following beginning
                    if min_offset > 1:
                        offset_links.append(f.table_cell(1, css_class="info-offset-gap") + f.text(u'\u2026') + f.table_cell(0))

                # generating near pages links
                for cur_offset in range(min_offset, max_offset + 1):
                    # note that current offset may be not multiple of max_count,
                    # so we check whether we should add current offset marker like this
                    if not offset_added and offset <= cur_offset * max_count:
                        # current info history view offset
                        offset_links.append(f.table_cell(1, css_class="info-offset-item info-cur-offset") + f.text(str(log_size - offset)) + f.table_cell(0))
                        offset_added = True

                    # add link, if not at this offset
                    if offset != cur_offset * max_count:
                        add_offset_link(cur_offset * max_count)

                # link to the last page of event log
                if max_offset < (log_size - 1) / max_count:
                    if max_offset < (log_size - 1) / max_count - 1:
                        offset_links.append(f.table_cell(1, css_class="info-offset-gap") + f.text(u'\u2026') + f.table_cell(0))
                    add_offset_link(((log_size - 1) / max_count) * max_count)

                # special case - if offset is greater than max_offset * max_count
                if offset > max_offset * max_count:
                    offset_links.append(f.table_cell(1, css_class="info-offset-item info-cur-offset") + f.text(str(log_size - offset)) + f.table_cell(0))

                # link to next page
                if offset < (log_size - max_count):
                    add_offset_link(((offset + max_count) / max_count) * max_count, _("Older"))

                # generating html
                paging_nav_html += "".join([
                    f.table(1, css_class="searchpages"),
                    f.table_row(1),
                    "".join(offset_links),
                    f.table_row(0),
                    f.table(0),
                ])

        # generating max_count switcher
        # we do it only in case history_count has additional values
        if len(request.cfg.history_count) > 2:
            max_count_possibilities = list(set(request.cfg.history_count))
            max_count_possibilities.sort()
            max_count_html = []
            cur_count_added = False


            for count in max_count_possibilities:
                # max count value can be not in list of predefined values
                if max_count <= count and not cur_count_added:
                    max_count_html.append("".join([
                        f.span(1, css_class="info-count-item info-cur-count"),
                        f.text(str(max_count)),
                        f.span(0),
                    ]))
                    cur_count_added = True

                # checking for limit_max_count to prevent showing unavailable options
                if max_count != count and count <= limit_max_count:
                    max_count_html.append("".join([
                        f.span(1, css_class="info-count-item"),
                        page.link_to(request, on=1, querystr={
                            'action': 'info',
                            'offset': str(offset),
                            'max_count': str(count),
                            }, css_class="info-count-link", rel="nofollow"),
                        f.text(str(count)),
                        page.link_to(request, on=0),
                        f.span(0),
                    ]))

            count_select_html += "".join([
                f.span(1, css_class="info-count-selector"),
                    f.text(" ("),
                    f.text(_("%s items per page")) % (f.span(1, css_class="info-count-selector info-count-selector-divider") + f.text(" | ") + f.span(0)).join(max_count_html),
                    f.text(")"),
                f.span(0),
            ])

        # open log for this page
        from MoinMoin.util.dataset import TupleDataset, Column

        history = TupleDataset()
        history.columns = [
            Column('rev', label='#', align='right'),
            Column('mtime', label=_('Date'), align='right'),
            Column('size', label=_('Size'), align='right'),
            Column('diff', label='<input type="submit" value="%s">' % (_("Diff"))),
            Column('editor', label=_('Editor'), hidden=not request.cfg.show_names),
            Column('comment', label=_('Comment')),
            Column('action', label=_('Action')),
            ]

        # generate history list

        def render_action(text, query, **kw):
            kw.update(dict(rel='nofollow'))
            return page.link_to(request, text, querystr=query, **kw)

        def render_file_action(text, pagename, filename, request, do):
            url = AttachFile.getAttachUrl(pagename, filename, request, do=do)
            if url:
                f = request.formatter
                link = f.url(1, url) + f.text(text) + f.url(0)
                return link

        may_write = request.user.may.write(pagename)
        may_delete = request.user.may.delete(pagename)

        count = 0
        pgactioncount = 0
        for line in log.reverse():
            count += 1

            if paging and count <= offset:
                continue

            rev = int(line.rev)
            actions = []
            if line.action in ('SAVE', 'SAVENEW', 'SAVE/REVERT', 'SAVE/RENAME', ):
                size = page.size(rev=rev)
                actions.append(render_action(_('view'), {'action': 'recall', 'rev': '%d' % rev}))
                if pgactioncount == 0:
                    rchecked = ' checked="checked"'
                    lchecked = ''
                elif pgactioncount == 1:
                    lchecked = ' checked="checked"'
                    rchecked = ''
                else:
                    lchecked = rchecked = ''
                diff = '<input type="radio" name="rev1" value="%d"%s><input type="radio" name="rev2" value="%d"%s>' % (rev, lchecked, rev, rchecked)
                if rev > 1:
                    diff += render_action(' ' + _('to previous'), {'action': 'diff', 'rev1': rev-1, 'rev2': rev})
                comment = line.comment
                if not comment:
                    if '/REVERT' in line.action:
                        comment = _("Revert to revision %(rev)d.") % {'rev': int(line.extra)}
                    elif '/RENAME' in line.action:
                        comment = _("Renamed from '%(oldpagename)s'.") % {'oldpagename': line.extra}
                pgactioncount += 1
            else: # ATT*
                rev = '-'
                diff = '-'

                filename = wikiutil.url_unquote(line.extra)
                comment = "%s: %s %s" % (line.action, filename, line.comment)
                if AttachFile.exists(request, pagename, filename):
                    size = AttachFile.size(request, pagename, filename)
                    actions.append(render_file_action(_('view'), pagename, filename, request, do='view'))
                    actions.append(render_file_action(_('get'), pagename, filename, request, do='get'))
                    if may_delete:
                        actions.append(render_file_action(_('del'), pagename, filename, request, do='del'))
                    if may_write:
                        actions.append(render_file_action(_('edit'), pagename, filename, request, do='modify'))
                else:
                    size = 0

            history.addRow((
                rev,
                request.user.getFormattedDateTime(wikiutil.version2timestamp(line.ed_time_usecs)),
                str(size),
                diff,
                line.getEditor(request) or _("N/A"),
                wikiutil.escape(comment) or '&nbsp;',
                "&nbsp;".join(a for a in actions if a),
            ))
            if (count >= max_count + offset) or (paging and count >= log_size):
                break

        # print version history
        from MoinMoin.widget.browser import DataBrowserWidget

        request.write(unicode(html.H2().append(_('Revision History'))))

        if not count: # there was no entry in logfile
            request.write(_('No log entries found.'))
            return

        history_table = DataBrowserWidget(request)
        history_table.setData(history)

        div = html.DIV(id="page-history")
        div.append(html.INPUT(type="hidden", name="action", value="diff"))
        div.append(history_table.render(method="GET"))

        form = html.FORM(method="GET", action="")
        if paging:
            form.append(f.div(1, css_class="info-paging-info") + paging_info_html + count_select_html + f.div(0))
            form.append("".join([
                f.div(1, css_class="info-paging-nav info-paging-nav-top"),
                paging_nav_html,
                f.div(0),
            ]))
        form.append(div)
        if paging:
            form.append("".join([
                f.div(1, css_class="info-paging-nav info-paging-nav-bottom"),
                paging_nav_html,
                f.div(0)
            ]))
        request.write(unicode(form))