Exemple #1
0
    def _format_reminder(self,
                         req,
                         ticket,
                         id,
                         time,
                         author,
                         origin,
                         description,
                         delete_button=True):
        now = to_datetime(None)
        time = to_datetime(time)
        if now >= time:
            when = tag(tag.strong("Right now"), " (pending)")
        else:
            when = tag("In ", tag.strong(pretty_timedelta(time)), " (",
                       format_date(time), ")")

        if description:
            context = Context.from_request(req, ticket.resource)
            desc = tag.div(format_to_oneliner(self.env, context, description),
                           class_="description")
        else:
            desc = tag()

        return tag(
            self._reminder_delete_form(req, id) if delete_button else None,
            when, " - added by ",
            tag.em(Chrome(self.env).authorinfo(req, author)), " ",
            tag.span(pretty_timedelta(origin),
                     title=format_datetime(
                         origin, req.session.get('datefmt', 'iso8601'),
                         req.tz)), " ago.", desc)
Exemple #2
0
    def get_data(self, limit=100, offset=0):
        cursor = self.env.get_db_cnx().cursor()
        cursor2 = self.env.get_db_cnx().cursor()

        cursor.execute(
            "SELECT buildqueue.id, owner, replace(replace(browseurl, '%OWNER%', buildqueue.owner), '%REVISION%', revision), revision, status, startdate, CASE WHEN enddate < startdate THEN startdate ELSE enddate END, description FROM buildqueue, portrepositories WHERE repository = portrepositories.id AND buildqueue.status >= 10 "
            + self._get_filter() +
            " ORDER BY buildqueue.id DESC LIMIT %s OFFSET %s", (limit, offset))

        for queueid, owner, repository, revision, status, startdate, enddate, description in cursor:
            build = Build(self.env)
            build.queueid = queueid
            build.owner = owner
            build.repository = repository
            build.revision = revision
            build.setStatus(status)
            build.runtime = pretty_timedelta(from_utimestamp(startdate),
                                             from_utimestamp(enddate))
            build.startdate = startdate
            build.enddate = enddate
            build.description = description

            cursor2.execute(
                "SELECT id, buildgroup, portname, pkgversion, status, buildstatus, buildreason, buildlog, wrkdir, startdate, CASE WHEN enddate < startdate THEN extract(epoch from now())*1000000 ELSE enddate END FROM builds WHERE queueid = %s ORDER BY id",
                (queueid, ))

            lastport = None
            for id, group, portname, pkgversion, status, buildstatus, buildreason, buildlog, wrkdir, startdate, enddate in cursor2:
                port = Port(self.env)
                port.id = id
                port.group = group
                port.portname = portname
                port.pkgversion = pkgversion
                port.buildstatus = buildstatus
                port.buildlog = buildlog
                port.wrkdir = wrkdir
                port.runtime = pretty_timedelta(from_utimestamp(startdate),
                                                from_utimestamp(enddate))
                port.startdate = startdate
                port.enddate = enddate
                port.directory = '/~%s/%s-%s' % (owner, queueid, id)

                if buildstatus:
                    port.buildstatus = buildstatus.lower()
                if buildstatus and not buildreason:
                    buildreason = buildstatus.lower()

                port.setStatus(status, buildreason)

                if self.uniqueports and lastport == portname:
                    continue

                if lastport != portname:
                    port.head = True
                    lastport = portname

                build.ports.append(port)

            yield build
    def get_work_log(self, pid, username=None, mode='all'):
        db = self.env.get_read_db()
        cursor = db.cursor()
        if mode == 'user':
            assert username is not None
            cursor.execute('SELECT wl.worker, wl.starttime, wl.endtime, wl.ticket, t.summary, t.status, wl.comment '
                           'FROM work_log wl '
                           'JOIN ticket t ON wl.ticket=t.id '
                           'WHERE t.project_id=%s AND wl.worker=%s '
                           'ORDER BY wl.lastchange DESC',
                           (pid, username))
        elif mode == 'latest':
            cursor.execute('''
                SELECT worker, starttime, endtime, ticket, summary, status, comment 
                FROM (
                    SELECT wl.worker, wl.starttime, wl.endtime, wl.ticket, wl.comment, wl.lastchange,
                    MAX(wl.lastchange) OVER (PARTITION BY wl.worker) latest,
                    t.summary, t.status
                    FROM work_log wl
                    JOIN ticket t ON wl.ticket=t.id AND project_id=%s
                ) wll
                WHERE lastchange=latest
                ORDER BY lastchange DESC, worker
               ''', (pid,))
        else:
            cursor.execute('SELECT wl.worker, wl.starttime, wl.endtime, wl.ticket, t.summary, t.status, wl.comment '
                           'FROM work_log wl '
                           'JOIN ticket t ON wl.ticket=t.id '
                           'WHERE t.project_id=%s '
                           'ORDER BY wl.lastchange DESC, wl.worker',
                           (pid,))

        rv = []
        for user,starttime,endtime,ticket,summary,status,comment in cursor:
            started = to_datetime(starttime)

            if endtime != 0:
                finished = to_datetime(endtime)
                delta = 'Worked for %s (between %s and %s)' % (
                         pretty_timedelta(started, finished),
                         format_datetime(started), format_datetime(finished))
            else:
                finished = 0
                delta = 'Started %s ago (%s)' % (
                         pretty_timedelta(started),
                         format_datetime(started))

            rv.append({'user': user,
                       'starttime': started,
                       'endtime': finished,
                       'delta': delta,
                       'ticket': ticket,
                       'summary': summary,
                       'status': status,
                       'comment': comment})
        return rv
        
Exemple #4
0
def milestone_to_hdf(env, db, req, milestone):
    hdf = {'name': milestone.name,
           'href': req.href.milestone(milestone.name)}
    if milestone.description:
        hdf['description'] = milestone.description
    if milestone.due:
        hdf['due'] = milestone.due
        hdf['due_date'] = format_date(milestone.due)
        hdf['due_delta'] = pretty_timedelta(milestone.due + timedelta(60))
        hdf['late'] = milestone.is_late
    if milestone.completed:
        hdf['completed'] = milestone.completed
        hdf['completed_date'] = format_datetime(milestone.completed)
        hdf['completed_delta'] = pretty_timedelta(milestone.completed)
    return hdf
Exemple #5
0
def milestone_to_hdf(env, db, req, milestone):
    hdf = {'name': milestone.name, 'href': req.href.milestone(milestone.name)}
    if milestone.description:
        hdf['description_source'] = milestone.description
        hdf['description'] = wiki_to_html(milestone.description, env, req, db)
    if milestone.due:
        hdf['due'] = milestone.due
        hdf['due_date'] = format_date(milestone.due)
        hdf['due_delta'] = pretty_timedelta(milestone.due + 86400)
        hdf['late'] = milestone.is_late
    if milestone.completed:
        hdf['completed'] = milestone.completed
        hdf['completed_date'] = format_datetime(milestone.completed)
        hdf['completed_delta'] = pretty_timedelta(milestone.completed)
    return hdf
Exemple #6
0
    def _render_history(self, req, db, page):
        """Extract the complete history for a given page and stores it in the
        HDF.

        This information is used to present a changelog/history for a given
        page.
        """
        req.perm.assert_permission('WIKI_VIEW')

        if not page.exists:
            raise TracError, "Page %s does not exist" % page.name

        self._set_title(req, page, u'历史')

        history = []
        for version, t, author, comment, ipnr in page.get_history():
            history.append({
                'url':
                req.href.wiki(page.name, version=version),
                'diff_url':
                req.href.wiki(page.name, version=version, action='diff'),
                'version':
                version,
                'time':
                format_datetime(t),
                'time_delta':
                pretty_timedelta(t),
                'author':
                author,
                'comment':
                wiki_to_oneliner(comment or '', self.env, db),
                'ipaddr':
                ipnr
            })
        req.hdf['wiki.history'] = history
Exemple #7
0
def attachment_to_hdf(env, req, db, attachment):
    """
    This function have been removed from 0.11, this is copied from 0.10, then modified to 
    work with 0.11
    """
    if not db:
        db = env.get_db_cnx()
    hdf = {
        'filename':
        attachment.filename,
        'description':
        wiki_to_oneliner(attachment.description, env, db, req=req),
        'author':
        attachment.author,
        'ipnr':
        attachment.ipnr,
        'size':
        pretty_size(attachment.size),
        'time':
        format_datetime(attachment.date),
        'age':
        pretty_timedelta(attachment.date),
        'href':
        AttachmentModule(env).get_resource_url(attachment.resource, req.href)
    }
    return hdf
Exemple #8
0
    def _render_history(self, req, db, page):
        """Extract the complete history for a given page and stores it in the
        HDF.

        This information is used to present a changelog/history for a given
        page.
        """
        req.perm.assert_permission('WIKI_VIEW')

        if not page.exists:
            raise TracError, "Page %s does not exist" % page.name

        self._set_title(req, page, u'历史')

        history = []
        for version, t, author, comment, ipnr in page.get_history():
            history.append({
                'url': req.href.wiki(page.name, version=version),
                'diff_url': req.href.wiki(page.name, version=version,
                                          action='diff'),
                'version': version,
                'time': format_datetime(t),
                'time_delta': pretty_timedelta(t),
                'author': author,
                'comment': wiki_to_oneliner(comment or '', self.env, db),
                'ipaddr': ipnr
            })
        req.hdf['wiki.history'] = history
Exemple #9
0
 def pretty_dateinfo(date, format=None, dateonly=False):
     if not date:
         return ''
     if format == 'date':
         absolute = user_time(req, format_date, date)
     else:
         absolute = user_time(req, format_datetime, date)
     now = datetime.datetime.now(localtz)
     relative = pretty_timedelta(date, now)
     if not format:
         format = req.session.get('dateinfo',
                                  self.default_dateinfo_format)
     in_or_ago = _("in %(relative)s", relative=relative) \
                 if date > now else \
                 _("%(relative)s ago", relative=relative)
     if format == 'relative':
         label = in_or_ago if not dateonly else relative
         title = absolute
     else:
         if dateonly:
             label = absolute
         elif req.lc_time == 'iso8601':
             label = _("at %(iso8601)s", iso8601=absolute)
         else:
             label = _("on %(date)s at %(time)s",
                       date=user_time(req, format_date, date),
                       time=user_time(req, format_time, date))
         title = in_or_ago
     return tag.span(label, title=title)
Exemple #10
0
 def pretty_dateinfo(date, format=None, dateonly=False):
     absolute = user_time(req, format_datetime, date)
     relative = pretty_timedelta(date)
     if not format:
         format = req.session.get(
             'dateinfo',
             Chrome(self.env).default_dateinfo_format)
     if format == 'absolute':
         if dateonly:
             label = absolute
         elif req.lc_time == 'iso8601':
             label = _("at %(iso8601)s", iso8601=absolute)
         else:
             label = _("on %(date)s at %(time)s",
                       date=user_time(req, format_date, date),
                       time=user_time(req, format_time, date))
         title = _("See timeline %(relativetime)s ago",
                   relativetime=relative)
     else:
         label = _("%(relativetime)s ago", relativetime=relative) \
                 if not dateonly else relative
         title = _("See timeline at %(absolutetime)s",
                   absolutetime=absolute)
     return self.get_timeline_link(req,
                                   date,
                                   label,
                                   precision='second',
                                   title=title)
Exemple #11
0
 def _populate_summaries(self):
     """Returns all summary records for the given changeset."""
     summaries = []
     cursor = self.db.cursor()
     cursor.execute("""
         SELECT status, reviewer, summary, time
         FROM codereviewer
         WHERE repo='%s' AND changeset='%s'
         ORDER BY time ASC;
         """ % (self.repo,self.changeset))
     for status,reviewer,summary,when in cursor:
         pretty_when = time.strftime('%Y-%m-%d %H:%M',
             time.localtime(long(when) / self.EPOCH_MULTIPLIER))
         pretty_when += ' (%s ago)' % pretty_timedelta(when)
         summaries.append({
             'repo': self.repo,
             'changeset': self.changeset,
             'status': self.decode(status) or '',
             'reviewer': reviewer,
             'summary': summary,
             'html_summary': self._wiki_to_html(summary),
             'when': when,
             'pretty_when': pretty_when,
         })
     self._summaries = summaries
Exemple #12
0
 def _render_link(self, context, name, label, extra=''):
     if not (name or extra):
         return tag()
     try:
         milestone = Milestone(self.env, name)
     except ResourceNotFound:
         milestone = None
     # Note: the above should really not be needed, `Milestone.exists`
     # should simply be false if the milestone doesn't exist in the db
     # (related to #4130)
     href = context.href.milestone(name)
     exists = milestone and milestone.exists
     if exists:
         if 'MILESTONE_VIEW' in context.perm(milestone.resource):
             title = None
             if hasattr(context, 'req'):
                 if milestone.is_completed:
                     title = _("Completed %(duration)s ago (%(date)s)",
                               duration=pretty_timedelta(
                                   milestone.completed),
                               date=user_time(context.req, format_datetime,
                                              milestone.completed))
                 elif milestone.is_late:
                     title = _("%(duration)s late (%(date)s)",
                               duration=pretty_timedelta(milestone.due),
                               date=user_time(context.req, format_datetime,
                                              milestone.due))
                 elif milestone.due:
                     title = _("Due in %(duration)s (%(date)s)",
                               duration=pretty_timedelta(milestone.due),
                               date=user_time(context.req, format_datetime,
                                              milestone.due))
                 else:
                     title = _("No date set")
             closed = 'closed ' if milestone.is_completed else ''
             return tag.a(label,
                          class_='%smilestone' % closed,
                          href=href + extra,
                          title=title)
     elif 'MILESTONE_CREATE' in context.perm(self.realm, name):
         return tag.a(label,
                      class_='missing milestone',
                      href=href + extra,
                      rel='nofollow')
     return tag.a(label, class_=classes('milestone', missing=not exists))
 def render_timeline_event(self, context, field, event):
     name, url, result, message, started, completed = event[3]
     if field == 'title':
         return tag('Build "', tag.em(name), '" (%s)' % result.lower())
     elif field == 'description':
         return "%s duration %s" % \
                (message, pretty_timedelta(started, completed))
     elif field == 'url':
         return url
Exemple #14
0
    def _render_view(self, req, db, page):
        page_name = self._set_title(req, page, '')
        if page.name == 'WikiStart':
            req.hdf['title'] = ''

        version = req.args.get('version')
        if version:
            # Ask web spiders to not index old versions
            req.hdf['html.norobots'] = 1

        # Add registered converters
        for conversion in Mimeview(
                self.env).get_supported_conversions('text/x-trac-wiki'):
            conversion_href = req.href.wiki(page.name,
                                            version=version,
                                            format=conversion[0])
            add_link(req, 'alternate', conversion_href, conversion[1],
                     conversion[3])

        latest_page = WikiPage(self.env, page.name)
        req.hdf['wiki'] = {
            'exists': page.exists,
            'version': page.version,
            'latest_version': latest_page.version,
            'readonly': page.readonly
        }
        if page.exists:
            req.hdf['wiki'] = {
                'page_html':
                wiki_to_html(page.text, self.env, req),
                'history_href':
                req.href.wiki(page.name, action='history'),
                'last_change_href':
                req.href.wiki(page.name, action='diff', version=page.version)
            }
            if version:
                req.hdf['wiki'] = {
                    'comment_html':
                    wiki_to_oneliner(page.comment or '--', self.env, db),
                    'author':
                    page.author,
                    'age':
                    pretty_timedelta(page.time)
                }
        else:
            if not req.perm.has_permission('WIKI_CREATE'):
                raise HTTPNotFound('Page %s not found', page.name)
            req.hdf['wiki.page_html'] = html.P('Describe "%s" here' %
                                               page_name)

        # Show attachments
        req.hdf['wiki.attachments'] = attachments_to_hdf(
            self.env, req, db, 'wiki', page.name)
        if req.perm.has_permission('WIKI_MODIFY'):
            attach_href = req.href.attachment('wiki', page.name)
            req.hdf['wiki.attach_href'] = attach_href
Exemple #15
0
    def _process_page(self, req, page):
        post_content = content_from_wiki_markup(page.text)
        creation_date = creation_date_of_page(page)

        return dict(
            title=self._blogpost_title_html(req, page),
            url=req.href.wiki(page.name),
            creation_date=format_datetime(creation_date),
            delta=pretty_timedelta(creation_date, now()),
            content=self._blogpost_to_html(req, page),
        )
Exemple #16
0
 def _format_screenshot(self, context, screenshot):
     screenshot['author'] = format_to_oneliner(self.env, context,
                                               screenshot['author'])
     screenshot['name'] = format_to_oneliner(self.env, context,
                                             screenshot['name'])
     screenshot['description'] = format_to_oneliner(
         self.env, context, screenshot['description'])
     screenshot['width'] = int(screenshot['width'])
     screenshot['height'] = int(screenshot['height'])
     screenshot['time'] = pretty_timedelta(
         to_datetime(screenshot['time'], utc))
     return screenshot
 def _render_link(self, context, name, label, extra=''):
     if not (name or extra):
         return tag()
     try:
         milestone = Milestone(self.env, name)
     except ResourceNotFound:
         milestone = None
     # Note: the above should really not be needed, `Milestone.exists`
     # should simply be false if the milestone doesn't exist in the db
     # (related to #4130)
     href = context.href.milestone(name)
     if milestone and milestone.exists:
         if 'MILESTONE_VIEW' in context.perm(milestone.resource):
             title = None
             if hasattr(context, 'req'):
                 if milestone.is_completed:
                     title = _(
                         'Completed %(duration)s ago (%(date)s)',
                         duration=pretty_timedelta(milestone.completed),
                         date=user_time(context.req, format_datetime,
                                        milestone.completed))
                 elif milestone.is_late:
                     title = _('%(duration)s late (%(date)s)',
                               duration=pretty_timedelta(milestone.due),
                               date=user_time(context.req, format_datetime,
                                              milestone.due))
                 elif milestone.due:
                     title = _('Due in %(duration)s (%(date)s)',
                               duration=pretty_timedelta(milestone.due),
                               date=user_time(context.req, format_datetime,
                                              milestone.due))
                 else:
                     title = _('No date set')
             closed = 'closed ' if milestone.is_completed else ''
             return tag.a(label, class_='%smilestone' % closed,
                          href=href + extra, title=title)
     elif 'MILESTONE_CREATE' in context.perm(self.realm, name):
         return tag.a(label, class_='missing milestone', href=href + extra,
                      rel='nofollow')
     return tag.a(label, class_='missing milestone')
Exemple #18
0
def attachment_to_hdf(env, req, db, attachment):
    if not db:
        db = env.get_db_cnx()
    hdf = {
        'filename': attachment.filename,
        'description': wiki_to_oneliner(attachment.description, env, db),
        'author': attachment.author,
        'ipnr': attachment.ipnr,
        'size': pretty_size(attachment.size),
        'time': format_datetime(attachment.time),
        'age': pretty_timedelta(attachment.time),
        'href': attachment.href(req)
    }
    return hdf
Exemple #19
0
 def _pretty_dateinfo(date, format=None, dateonly=False):
     absolute = format_datetime(date, tzinfo=req.tz)
     relative = pretty_timedelta(date)
     if format == 'absolute':
         label = absolute
         # TRANSLATOR: Sync with same msgid in Trac 0.13, please.
         title = _("%(relativetime)s ago", relativetime=relative)
     else:
         if dateonly:
             label = relative
         else:
             label = _("%(relativetime)s ago", relativetime=relative)
         title = absolute
     return tag.span(label, title=title)
Exemple #20
0
 def _pretty_dateinfo(date, format=None, dateonly=False):
     absolute = format_datetime(date, tzinfo=req.tz)
     relative = pretty_timedelta(date)
     if format == 'absolute':
         label = absolute
         # TRANSLATOR: Sync with same msgid in Trac 0.13, please.
         title = _("%(relativetime)s ago", relativetime=relative)
     else:
         if dateonly:
             label = relative
         else:
             label = _("%(relativetime)s ago", relativetime=relative)
         title = absolute
     return tag.span(label, title=title)
Exemple #21
0
def attachment_to_hdf(env, req, db, attachment):
    if not db:
        db = env.get_db_cnx()
    hdf = {
        'filename': attachment.filename,
        'description': wiki_to_oneliner(attachment.description, env, db),
        'author': attachment.author,
        'ipnr': attachment.ipnr,
        'size': pretty_size(attachment.size),
        'time': format_datetime(attachment.time),
        'age': pretty_timedelta(attachment.time),
        'href': attachment.href(req)
    }
    return hdf
Exemple #22
0
def get_changes(env, repos, revs, full=None, req=None, format=None):
    db = env.get_db_cnx()
    changes = {}
    for rev in revs:
        try:
            changeset = repos.get_changeset(rev)
        except NoSuchChangeset:
            changes[rev] = {}
            continue

        wiki_format = env.config['changeset'].getbool('wiki_format_messages')
        message = changeset.message or '--'
        absurls = (format == 'rss')
        if wiki_format:
            shortlog = wiki_to_oneliner(message,
                                        env,
                                        db,
                                        shorten=True,
                                        absurls=absurls)
        else:
            shortlog = Markup.escape(shorten_line(message))

        if full:
            if wiki_format:
                message = wiki_to_html(message,
                                       env,
                                       req,
                                       db,
                                       absurls=absurls,
                                       escape_newlines=True)
            else:
                message = html.PRE(message)
        else:
            message = shortlog

        if format == 'rss':
            if isinstance(shortlog, Markup):
                shortlog = shortlog.plaintext(keeplinebreaks=False)
            message = unicode(message)

        changes[rev] = {
            'date_seconds': changeset.date,
            'date': format_datetime(changeset.date),
            'age': pretty_timedelta(changeset.date),
            'author': changeset.author or 'anonymous',
            'message': message,
            'shortlog': shortlog,
        }
    return changes
Exemple #23
0
    def _format_reminder(self, req, ticket, id, time, author, origin, description, delete_button=True):
        now = to_datetime(None)
        time = to_datetime(time)
        if now >= time:
            when = tag(tag.strong("Right now"), " (pending)")
        else:
            when = tag("In ", tag.strong(pretty_timedelta(time)), " (", format_date(time), ")")

        if description:
            context = Context.from_request(req, ticket.resource)
            desc = tag.div(format_to_oneliner(self.env, context, description), class_="description")
        else:
            desc = tag()

        return tag(self._reminder_delete_form(req, id) if delete_button else None, when, " - added by ", tag.em(Chrome(self.env).authorinfo(req, author)), " ", tag.span(pretty_timedelta(origin), title=format_datetime(origin, req.session.get('datefmt', 'iso8601'), req.tz)), " ago.", desc)
Exemple #24
0
    def _render_view(self, req, db, page):
        page_name = self._set_title(req, page, '')
        if page.name == 'WikiStart':
            req.hdf['title'] = ''

        version = req.args.get('version')
        if version:
            # Ask web spiders to not index old versions
            req.hdf['html.norobots'] = 1

        # Add registered converters
        for conversion in Mimeview(self.env).get_supported_conversions(
                                             'text/x-trac-wiki'):
            conversion_href = req.href.wiki(page.name, version=version,
                                            format=conversion[0])
            add_link(req, 'alternate', conversion_href, conversion[1],
                     conversion[3])

        latest_page = WikiPage(self.env, page.name)
        req.hdf['wiki'] = {'exists': page.exists,
                           'version': page.version,
                           'latest_version': latest_page.version,
                           'readonly': page.readonly}
        if page.exists:
            req.hdf['wiki'] = {
                'page_html': wiki_to_html(page.text, self.env, req),
                'history_href': req.href.wiki(page.name, action='history'),
                'last_change_href': req.href.wiki(page.name, action='diff',
                                                  version=page.version)
                }
            if version:
                req.hdf['wiki'] = {
                    'comment_html': wiki_to_oneliner(page.comment or '--',
                                                     self.env, db),
                    'author': page.author,
                    'age': pretty_timedelta(page.time)
                    }
        else:
            if not req.perm.has_permission('WIKI_CREATE'):
                raise HTTPNotFound('Page %s not found', page.name)
            req.hdf['wiki.page_html'] = html.P(u'创建 "%s" ' % page_name)

        # Show attachments
        req.hdf['wiki.attachments'] = attachments_to_hdf(self.env, req, db,
                                                         'wiki', page.name)
        if req.perm.has_permission('WIKI_MODIFY'):
            attach_href = req.href.attachment('wiki', page.name)
            req.hdf['wiki.attach_href'] = attach_href
Exemple #25
0
def GlobalBuildqueueIterator(env, req):
    cursor = env.get_db_cnx().cursor()

    if req.args.get('group'):
        group = req.args.get('group')
    else:
        group = ''

    cursor.execute(
        "SELECT builds.id, builds.buildgroup, builds.portname, builds.pkgversion, builds.status, builds.buildstatus, builds.buildreason, builds.buildlog, builds.wrkdir, builds.startdate, CASE WHEN builds.enddate < builds.startdate THEN extract(epoch from now())*1000000 ELSE builds.enddate END, buildqueue.id, buildqueue.priority, buildqueue.owner FROM builds, buildqueue WHERE buildqueue.id = builds.queueid AND builds.status < 90 AND (builds.buildgroup = %s OR %s = '') ORDER BY builds.status DESC, buildqueue.priority, builds.id DESC LIMIT 50",
        (group, group))

    lastport = None
    for id, group, portname, pkgversion, status, buildstatus, buildreason, buildlog, wrkdir, startdate, enddate, queueid, priority, owner in cursor:
        port = Port(env)
        port.id = id
        port.group = group
        port.portname = portname
        port.pkgversion = pkgversion
        port.buildstatus = buildstatus
        port.buildlog = buildlog
        port.wrkdir = wrkdir
        port.runtime = pretty_timedelta(from_utimestamp(startdate),
                                        from_utimestamp(enddate))
        port.startdate = startdate
        port.enddate = enddate
        port.directory = '/~%s/%s-%s' % (owner, queueid, id)
        port.queueid = queueid
        port.owner = owner
        port.setPriority(priority)

        if buildstatus:
            port.buildstatus = buildstatus.lower()
        if buildstatus and not buildreason:
            buildreason = buildstatus.lower()

        if owner == req.authname or status != 20:
            port.highlight = True

        port.setStatus(status, buildreason)

        if lastport != portname:
            port.head = True
            lastport = portname

        yield port
Exemple #26
0
def attachment_to_hdf(env, req, db, attachment):
    """
    This function have been removed from 0.11, this is copied from 0.10, then modified to 
    work with 0.11
    """
    if not db:
        db = env.get_db_cnx()
    hdf = {
        'filename': attachment.filename,
        'description': wiki_to_oneliner(attachment.description, env, db, req=req),
        'author': attachment.author,
        'ipnr': attachment.ipnr,
        'size': pretty_size(attachment.size),
        'time': format_datetime(attachment.date),
        'age': pretty_timedelta(attachment.date),
        'href': AttachmentModule(env).get_resource_url(attachment.resource, req.href)
    }
    return hdf
Exemple #27
0
 def _process_log(self, req):
     """Handle AJAX log requests"""
     try:
         rev = int(req.args['logrev'])
         repos = self.env.get_repository(req.authname)
         chgset = repos.get_changeset(rev)
         wikimsg = wiki_to_html(chgset.message, self.env, req, None, 
                                True, False)
         data = {
             'chgset': True,
             'revision': rev,
             'time': format_datetime(chgset.date),
             'age': pretty_timedelta(chgset.date, None, 3600),
             'author': chgset.author or 'anonymous',
             'message': wikimsg, 
         }
         return 'revtree_log.html', {'log': data}, 'application/xhtml+xml'
     except Exception, e:
         raise TracError, "Invalid revision log request: %s" % e
Exemple #28
0
 def pretty_dateinfo(date, format=None, dateonly=False):
     if not date:
         return ''
     if format == 'date':
         absolute = user_time(req, format_date, date)
     else:
         absolute = user_time(req, format_datetime, date)
     now = datetime.now(localtz)
     relative = pretty_timedelta(date, now)
     if not format:
         format = req.session.get('dateinfo',
                      Chrome(self.env).default_dateinfo_format)
     if format == 'relative':
         if date > now:
             label = _("in %(relative)s", relative=relative) \
                     if not dateonly else relative
             title = _("on %(date)s at %(time)s",
                       date=user_time(req, format_date, date),
                       time=user_time(req, format_time, date))
             return tag.span(label, title=title)
         else:
             label = _("%(relative)s ago", relative=relative) \
                     if not dateonly else relative
             title = _("See timeline at %(absolutetime)s",
                       absolutetime=absolute)
     else:
         if dateonly:
             label = absolute
         elif req.lc_time == 'iso8601':
             label = _("at %(iso8601)s", iso8601=absolute)
         elif format == 'date':
             label = _("on %(date)s", date=absolute)
         else:
             label = _("on %(date)s at %(time)s",
                       date=user_time(req, format_date, date),
                       time=user_time(req, format_time, date))
         if date > now:
             title = _("in %(relative)s", relative=relative)
             return tag.span(label, title=title)
         title = _("See timeline %(relativetime)s ago",
                   relativetime=relative)
     return self.get_timeline_link(req, date, label,
                                   precision='second', title=title)
Exemple #29
0
 def _process_log(self, req):
     """Handle AJAX log requests"""
     try:
         rev = int(req.args['logrev'])
         repos = self.env.get_repository()
         chgset = repos.get_changeset(rev)
         wikimsg = wiki_to_html(chgset.message, self.env, req, None, True,
                                False)
         data = {
             'chgset': True,
             'revision': rev,
             'time': format_datetime(chgset.date),
             'age': pretty_timedelta(chgset.date, None, 3600),
             'author': chgset.author or 'anonymous',
             'message': wikimsg,
         }
         return 'revtree_log.html', {'log': data}, 'application/xhtml+xml'
     except Exception, e:
         raise TracError, "Invalid revision log request: %s" % e
Exemple #30
0
def get_changes(env, repos, revs, full=None, req=None, format=None):
    db = env.get_db_cnx()
    changes = {}
    for rev in revs:
        try:
            changeset = repos.get_changeset(rev)
        except NoSuchChangeset:
            changes[rev] = {}
            continue

        wiki_format = env.config['changeset'].getbool('wiki_format_messages')
        message = changeset.message or '--'
        absurls = (format == 'rss')
        if wiki_format:
            shortlog = wiki_to_oneliner(message, env, db,
                                        shorten=True, absurls=absurls)
        else:
            shortlog = Markup.escape(shorten_line(message))

        if full:
            if wiki_format:
                message = wiki_to_html(message, env, req, db,
                                       absurls=absurls, escape_newlines=True)
            else:
                message = html.PRE(message)
        else:
            message = shortlog

        if format == 'rss':
            if isinstance(shortlog, Markup):
                shortlog = shortlog.plaintext(keeplinebreaks=False)
            message = unicode(message)

        changes[rev] = {
            'date_seconds': changeset.date,
            'date': format_datetime(changeset.date),
            'age': pretty_timedelta(changeset.date),
            'author': changeset.author or 'anonymous',
            'message': message, 'shortlog': shortlog,
        }
    return changes
Exemple #31
0
 def _process_log(self, req):
     """Handle AJAX log requests"""
     try:
         rev = int(req.args["rev"])
         repos = self.env.get_repository(req.authname)
         chgset = repos.get_changeset(rev)
         wikimsg = wiki_to_html(chgset.message, self.env, req, None, True, False)
         # FIXME: check if there is a better way to discard ellipsis
         #        which are not valid in pure XML
         wikimsg = Markup(wikimsg.replace("...", ""))
         req.hdf["changeset"] = {
             "chgset": True,
             "revision": rev,
             "time": format_datetime(chgset.date),
             "age": pretty_timedelta(chgset.date, None, 3600),
             "author": chgset.author or "anonymous",
             "message": wikimsg,
         }
         return "revtree_log.cs", "application/xhtml+xml"
     except Exception, e:
         raise TracError, "Invalid revision log request: %s" % e
Exemple #32
0
 def _generate_attachmentflags_fieldset(self,
                                        readonly=True,
                                        current_flags=None,
                                        form=False):
     fields = Fragment()
     for flag in self.known_flags:
         flagid = 'flag_' + flag
         if current_flags and flag in current_flags:
             date = datetime.datetime.fromtimestamp(
                 current_flags[flag]["updated_on"], utc)
             text = tag.span(
                 tag.strong(flag), " set by ",
                 tag.em(current_flags[flag]["updated_by"]), ", ",
                 tag.span(pretty_timedelta(date),
                          title=format_datetime(date)), " ago")
             if readonly == True:
                 fields += tag.input(text, \
                                     type='checkbox', id=flagid, \
                                     name=flagid, checked="checked",
                                     disabled="true") + tag.br()
             else:
                 fields += tag.input(text, \
                                     type='checkbox', id=flagid, \
                                     name=flagid, checked="checked") + tag.br()
         else:
             if readonly == True:
                 fields += tag.input(flag, \
                                     type='checkbox', id=flagid, \
                                     name=flagid, disabled="true") + tag.br()
             else:
                 fields += tag.input(flag, \
                                     type='checkbox', id=flagid, \
                                     name=flagid) + tag.br()
     if form and not readonly:
         return tag.form(tag.fieldset(
             tag.legend("Attachment Flags") + fields,
             tag.input(type="hidden", name="action", value="update_flags"),
             tag.input(type="submit", value="Update flags")),
                         method="POST")
     return tag.fieldset(tag.legend("Attachment Flags") + fields)
Exemple #33
0
 def _process_log(self, req):
     """Handle AJAX log requests"""
     try:
         rev = int(req.args['rev'])
         repos = self.env.get_repository(req.authname)
         chgset = repos.get_changeset(rev)
         wikimsg = wiki_to_html(chgset.message, self.env, req, None, True,
                                False)
         # FIXME: check if there is a better way to discard ellipsis
         #        which are not valid in pure XML
         wikimsg = Markup(wikimsg.replace('...', ''))
         req.hdf['changeset'] = {
             'chgset': True,
             'revision': rev,
             'time': format_datetime(chgset.date),
             'age': pretty_timedelta(chgset.date, None, 3600),
             'author': chgset.author or 'anonymous',
             'message': wikimsg,
         }
         return 'revtree_log.cs', 'application/xhtml+xml'
     except Exception, e:
         raise TracError, "Invalid revision log request: %s" % e
Exemple #34
0
    def process_request(self, req):
        options = self._get_options()
        connector = self.get_connector()
        last_builds = dict()
        builders = req.args['builders'].split(',')

        for builder in builders:
            try:
                build = connector.get_build(builder, -1)
                if "finish" in build and build["finish"]:
                    build["duration"] = pretty_timedelta(build["finish"], build["start"])
                    build["finish"] = format_datetime(build['finish'], tzinfo=req.tz)
                build["start"] = format_datetime(build['start'], tzinfo=req.tz)
            except Exception as e:
                last_builds[builder] = str(e)
            else:
                last_builds[builder] = build

        content = json.dumps(last_builds)

        req.send_header('Content-Type', 'application/javascript')
        req.send_header('Content-Length', len(content))
        req.end_headers()
        req.write(content)
Exemple #35
0
    def _process_log_request(self, req):
        '''
        Process log request information.

        This method is invoked when cursor is over a RevTree changeset,
        returning corresponding revision log information.

        :param req: Trac request object
        :returns: template, template data, type of response
        '''

        try:
            rev = int(req.args['logrev'])
            repos = Repository.get_svn_repository(self.env)
            if not repos:
                raise TracError("Revtree only supports Subversion "
                                "repositories")
            chgset = repos.get_changeset(rev)
            wikimsg = wiki_to_html(to_unicode(chgset.message),
                                   self.env,
                                   req,
                                   None,
                                   True,
                                   False)
            data = {
                'chgset': True,
                'revision': rev,
                'time': format_datetime(chgset.date).replace('()', ''),
                'age': pretty_timedelta(chgset.date, None, 3600),
                'author': to_unicode(chgset.author) or u'anonymous',
                'message': wikimsg
            }

            return 'revtree_log.html', {'log': data}, 'application/xhtml+xml'
        except Exception as e:
            raise TracError("Invalid revision log request: %s" % e)
Exemple #36
0
 def test_relative(self):
     t = datetime_now(utc) - timedelta(days=1)
     label = '%s ago' % pretty_timedelta(t)
     self.assertEqual(label, self._format_chrome(t, 'relative', False))
     self.assertEqual(label, self._format_timeline(t, 'relative', False))
Exemple #37
0
    def _process_wiki(self, req, cursor, where, data):

        cursor.execute("""
        select min(%s),
               max(%s),
               count(*),
               count(distinct author) """ % (SECONDS, SECONDS) + """
        from wiki """ + where)
        mintime, maxtime, edits, editors = cursor.fetchall()[0]

        data['editors'] = editors
        if maxtime:
            data['maxtime'] = time.strftime('%a %m/%d/%Y %H:%M:%S %Z', time.localtime(maxtime))
        else:
            data['maxtime'] = 'N/A'
        if mintime:
            data['mintime'] = time.strftime('%a %m/%d/%Y %H:%M:%S %Z', time.localtime(mintime))
        else:
            data['mintime'] = 'N/A'

        if mintime and maxtime:
            age = float(maxtime - mintime)
        else:
            age = 0
        td = datetime.timedelta(seconds=age)
        years = td.days // 365
        days = (td.days % 365)
        hours = td.seconds // 3600
        data['age'] = '%d years, %d days, %d hours' % (years, days, hours)

        data['edits'] = edits
        if age:
            data['peryear'] = '%.2f' % (edits * 365 * 24 * 60 * 60. / age)
            data['permonth'] = '%.2f' % (edits * 30 * 24 * 60 * 60. / age) 
            data['perday'] = '%.2f' % (edits * 24 * 60 * 60. / age) 
            data['perhour'] = '%.2f' % (edits * 60 * 60. / age) 
        else:
            data['peryear'] = 0
            data['permonth'] = 0
            data['perday'] = 0
            data['perhour'] = 0

        cursor.execute("select name, author, count(*) from wiki " + where + " group by 1, 2")
        pages = cursor.fetchall()

        d = {}
        for name, author, count in pages:
            try:
                d[author][0] += count
                d[author][1].add(name)
            except KeyError:
                d[author] = [count, set([name])]
        total = float(sum(x[0] for x in d.values()))
        stats = []
        for k, v in sorted(d.items(), key=itemgetter(1), reverse=True):
            stats.append({'name': k, 
                          'url': req.href.stats("wiki", author=k),
                          'count': v[0],
                          'pages': len(v[1]),
                          'percent': '%.2f' % (100 * v[0] / total)})
        data['byauthor'] = stats

        cursor.execute("""
        select name, %s """ % SECONDS + """
        from wiki """ + where + """
        order by 2 asc
        """)
        history = cursor.fetchall()

        stats = []
        if not req.args.get('author', ''):
            d = {}
            total = set()
            for name, t in history:
                total.add(name)
                d[int(t * 1000)] = len(total)
            stats = []
            steps = max(len(d) / 10, 1)
            for k, v in sorted(d.iteritems(), key=itemgetter(0))[::steps]:
                stats.append({'x': k, 
                              'y': v,})
        data['history'] = stats

        d = {}
        for name, _, count in pages:
            try:
                d[name] += count
            except KeyError:
                d[name] = count
        total = float(sum(d.values()))
        stats = []
        for k, v in sorted(d.items(), key=itemgetter(1), reverse=True)[:10]:
            stats.append({'name': k, 
                          'url': req.href.wiki(k),
                          'count': v,
                          'percent': '%.2f' % (100 * v / total)})
        data['pages'] = stats

        cursor.execute("""
        select name, version, length(text)
        from wiki """ + where + """
        group by 1, 2, 3
        having version = max(version)
        order by 3 desc
        limit 10
        """)
        rows = cursor.fetchall()
        d = dict((name, int(size)) for name, _, size in rows)
        stats = []
        for k, v in sorted(d.items(), key=itemgetter(1), reverse=True):
            stats.append({'name': k, 
                          'url': req.href.wiki(k),
                          'size': v})
        data['largest'] = stats

        cursor.execute("""
        select name, version, author, %s """ % SECONDS + """
        from wiki """ + where + """
        order by 4 desc
        limit 10
        """)
        rows = cursor.fetchall()
        stats = []
        for name, version, author, t in rows:
            stats.append({'name': name, 
                          'author': author,
                          'url': req.href.wiki(name, version=version),
                          'url2': req.href.stats("wiki", author=author),
                          'time': pretty_timedelta(to_datetime(float(t))),})

        data['recent'] = stats

        return 'wiki.html', data, None
Exemple #38
0
    def _process_tickets(self, req, cursor, where, data):

        cursor.execute("""
        select
            min(%s),
            max(%s),
            count(*),
            count(distinct reporter) """ % (SECONDS, SECONDS) + """
        from ticket """ + where.replace('author', 'reporter'))
        mintime, maxtime, tickets, reporters = cursor.fetchall()[0]

        data['reporters'] = reporters
        if maxtime:
            data['maxtime'] = time.strftime('%a %m/%d/%Y %H:%M:%S %Z', time.localtime(maxtime))
        else:
            data['maxtime'] = 'N/A'
        if mintime:
            data['mintime'] = time.strftime('%a %m/%d/%Y %H:%M:%S %Z', time.localtime(mintime))
        else:
            data['mintime'] = 'N/A'

        if mintime and maxtime:
            age = float(maxtime - mintime)
        else:
            age = 0
        td = datetime.timedelta(seconds=age)
        years = td.days // 365
        days = (td.days % 365)
        hours = td.seconds // 3600
        data['age'] = '%d years, %d days, %d hours' % (years, days, hours)

        data['total'] = tickets
        if age:
            data['peryear'] = '%.2f' % (tickets * 365 * 24 * 60 * 60. / age)
            data['permonth'] = '%.2f' % (tickets * 30 * 24 * 60 * 60. / age)
            data['perday'] = '%.2f' % (tickets * 24 * 60 * 60. / age)
            data['perhour'] = '%.2f' % (tickets * 60 * 60. / age)
        else:
            data['peryear'] = 0
            data['permonth'] = 0
            data['perday'] = 0
            data['perhour'] = 0

        cursor.execute("""\
        select author, sum(reports), sum(changes)
        from (select reporter as author, count(*) as reports, 0 as changes
              from ticket """ + where.replace('author', 'reporter') + """
              group by 1
              union
              select author, 0 as reports, count(*) as changes
              from ticket_change """ + where + """
              group by 1
              ) as data
        group by 1 order by 2 desc
        """)
        rows = cursor.fetchall()
        d = dict((path, (int(x), int(y))) for path, x, y in rows)
        stats = []
        for k, v in sorted(d.items(), key=itemgetter(1), reverse=True):
            stats.append({'name': k, 
                          'url': req.href.stats("tickets", author=k),
                          'reports': v[0],
                          'changes': v[1]})
        data['byauthor'] = stats

        cursor.execute("""\
        select t.component, count(distinct t.id), count(distinct open.id)
        from ticket t
        join ticket open using (component)
        where (open.resolution is null or length(open.resolution) = 0) """ +
                       where.replace('where',
                                     'and').replace('time',
                                                    't.time').replace('author',
                                                                      't.reporter')+ """
        group by 1 order by 2 desc
        """)
        rows = cursor.fetchall()
        stats = []
        for component, total, open in rows:
            stats.append({'name': component, 
                          'url': req.href.query(status=("new", "opened", "resolved"), component=component, order="priority"),
                          'open' : open,
                          'total' : total,})
        data['bycomponent'] = stats

        cursor.execute("""\
        select t.milestone, count(distinct t.id), count(distinct open.id)
        from ticket t
        join ticket open using (milestone)
        where (open.resolution is null or length(open.resolution) = 0) """ +
                       where.replace('where',
                                     'and').replace('time',
                                                    't.time').replace('author',
                                                                      't.reporter')+ """
        group by 1 order by 2 desc
        """)
        rows = cursor.fetchall()
        stats = []
        for milestone, total, open in rows:
            stats.append({'name': milestone,
                          'url': req.href.query(status=("new", "opened", "resolved"), milestone=milestone, order="priority"),
                          'open' : open,
                          'total' : total,})
        data['bymilestone'] = stats

        stats = []
        if not req.args.get('author', ''):
            cursor.execute("""\
            select id, %s, 'none' as oldvalue, 'new' as newvalue
            from ticket """ % SECONDS + where + """
            union
            select ticket, %s, oldvalue, newvalue
            from ticket_change where field = 'status' """  % SECONDS +
                           where.replace('where', 'and'))
            rows = cursor.fetchall()
            d = {}
            opened = 0
            accepted = 0
            for ticket, t, oldvalue, newvalue in sorted(rows, key=itemgetter(1)):
                if newvalue == 'accepted' and oldvalue != 'accepted':
                    accepted += 1
                elif newvalue != 'accepted' and oldvalue == 'accepted':
                    accepted -= 1
                if newvalue in ("new", "reopened") and oldvalue not in ("new", "reopened"):
                    opened += 1
                elif newvalue == "closed" and oldvalue != "closed":
                    opened -= 1
                d[int(t * 1000)] = (opened, accepted)
            steps = max(len(d) / 10, 1)
            for k, v in sorted(d.iteritems(), key=itemgetter(0))[::steps]:
                stats.append({'x': k,
                              'opened': v[0],
                              'accepted': v[1],})
        data['history'] = stats

        cursor.execute("""\
        select tc.ticket, t.component, t.summary, count(*)
        from ticket_change tc
        join ticket t on t.id = tc.ticket """ + where.replace('time', 'tc.time') + """
        group by 1, 2, 3
        order by 3 desc
        limit 10
        """)
        rows = cursor.fetchall()
        total = float(sum(int(v) for _, _, _, v in rows))
        stats = []
        for ticket, component, summary, v in rows:
            stats.append({'name': summary, 
                          'id': ticket,
                          'component': component,
                          'url': req.href.ticket(ticket),
                          'url2': req.href.query(component=component, order="priority"),
                          'count': int(v),
                          'percent': '%.2f' % (100 * int(v) / total)})
        data['active'] = stats

        cursor.execute("""
        select id, component, summary, %s
        from ticket
        where status != 'closed' """ % SECONDS + 
                       where.replace('where',
                                     'and').replace('author',
                                                    'reporter') + """
        order by 4 asc
        limit 10
        """)
        rows = cursor.fetchall()
        stats = []
        for ticket, component, summary, t in rows:
            stats.append({'name': summary, 
                          'id': ticket,
                          'component': component,
                          'url': req.href.ticket(ticket),
                          'url2': req.href.query(component=component, order="priority"),
                          'time': pretty_timedelta(to_datetime(float(t))),})
        data['oldest'] = stats

        cursor.execute("""
        select id, component, summary, %s
        from ticket """ % SECONDS + where.replace('author', 'reporter') + """
        order by 4 desc
        limit 10
        """)
        rows = cursor.fetchall()
        stats = []
        for ticket, component, summary, t in rows:
            stats.append({'name': summary, 
                          'id': ticket,
                          'component': component,
                          'url': req.href.ticket(ticket),
                          'url2': req.href.query(component=component, order="priority"),
                          'time': pretty_timedelta(to_datetime(float(t))),})
        data['newest'] = stats

        cursor.execute("""
        select tc.ticket, t.component, t.summary, tc.%s
        from ticket_change tc
        join ticket t on t.id = tc.ticket """ % SECONDS +
                       where.replace('where', 'and').replace('time', 'tc.time') + """
        order by 4 desc
        limit 10
        """)
        rows = cursor.fetchall()
        stats = []
        for ticket, component, summary, t in rows:
            stats.append({'name': summary, 
                          'id': ticket,
                          'component': component,
                          'url': req.href.ticket(ticket),
                          'url2': req.href.query(component=component, order="priority"),
                          'time': pretty_timedelta(to_datetime(float(t))),})

        data['recent'] = stats

        return 'tickets.html', data, None
Exemple #39
0
    def _render_diff(self, req, db, page):
        req.perm.assert_permission('WIKI_VIEW')

        if not page.exists:
            raise TracError("Version %s of page %s does not exist" %
                            (req.args.get('version'), page.name))

        add_stylesheet(req, 'common/css/diff.css')

        self._set_title(req, page, u'变化')

        # Ask web spiders to not index old versions
        req.hdf['html.norobots'] = 1

        old_version = req.args.get('old_version')
        if old_version:
            old_version = int(old_version)
            if old_version == page.version:
                old_version = None
            elif old_version > page.version: # FIXME: what about reverse diffs?
                old_version, page = page.version, \
                                    WikiPage(self.env, page.name, old_version)
        latest_page = WikiPage(self.env, page.name)
        new_version = int(page.version)
        info = {
            'version': new_version,
            'latest_version': latest_page.version,
            'history_href': req.href.wiki(page.name, action='history')
        }

        num_changes = 0
        old_page = None
        prev_version = next_version = None
        for version,t,author,comment,ipnr in latest_page.get_history():
            if version == new_version:
                if t:
                    info['time'] = format_datetime(t)
                    info['time_delta'] = pretty_timedelta(t)
                info['author'] = author or 'anonymous'
                info['comment'] = wiki_to_html(comment or '--',
                                               self.env, req, db)
                info['ipnr'] = ipnr or ''
            else:
                if version < new_version:
                    num_changes += 1
                    if not prev_version:
                        prev_version = version
                    if (old_version and version == old_version) or \
                            not old_version:
                        old_page = WikiPage(self.env, page.name, version)
                        info['num_changes'] = num_changes
                        info['old_version'] = version
                        break
                else:
                    next_version = version
        req.hdf['wiki'] = info

        # -- prev/next links
        if prev_version:
            add_link(req, 'prev', req.href.wiki(page.name, action='diff',
                                                version=prev_version),
                     'Version %d' % prev_version)
        if next_version:
            add_link(req, 'next', req.href.wiki(page.name, action='diff',
                                                version=next_version),
                     'Version %d' % next_version)

        # -- text diffs
        diff_style, diff_options = get_diff_options(req)

        oldtext = old_page and old_page.text.splitlines() or []
        newtext = page.text.splitlines()
        context = 3
        for option in diff_options:
            if option.startswith('-U'):
                context = int(option[2:])
                break
        if context < 0:
            context = None
        changes = hdf_diff(oldtext, newtext, context=context,
                           ignore_blank_lines='-B' in diff_options,
                           ignore_case='-i' in diff_options,
                           ignore_space_changes='-b' in diff_options)
        req.hdf['wiki.diff'] = changes
Exemple #40
0
    def _process_code(self, req, cursor, where, data):

        root = self.config.get('stats', 'root', '')
        if root and not root.endswith('/'):
            root += '/'

        project = root + req.args.get('project', '')

        if project:
            cursor.execute("""
            select rev, %s, author, message, %s
            from revision r
            join (
               select rev
               from node_change
               where path like '%s%%'
               group by rev
            ) changes using (rev)
            """ % (SECONDS, REPOS, project) + where + " order by 2")
        else:
            cursor.execute("""
            select rev, %s, author, message, %s
            from revision r
            """ % (SECONDS, REPOS) + where + " order by 2")
        revisions = cursor.fetchall()

        if project:
            query = """
            select nc.rev, %s, nc.path, nc.change_type, r.author
            from node_change nc
            join revision r %s
            """ % (REPOS, USING) + where
            if where:
                query += " and nc.path like '%s%%'" % project
            else:
                query += " where nc.path like '%s%%'" % project
            cursor.execute(query)
        else:
            cursor.execute("""
            select nc.rev, %s, nc.path, nc.change_type, r.author
            from node_change nc
            join revision r %s
            """ % (REPOS, USING) + where)
        changes = cursor.fetchall()

        # In version 0.12, support for multiple repositories was
        # added.  We use the reponame to generate proper changeset links.
        if trac.__version__.startswith('0.12'):
            cursor.execute("""
            select id, value
            from repository
            where name = 'name'""")
            repositories = dict(cursor.fetchall())
        else:
            repositories = {}

        if revisions:
            head = revisions[0]
            tail = revisions[-1]
            minrev, mintime = head[0], head[1]
            maxrev, maxtime = tail[0], tail[1]
        else:
            minrev = maxrev = mintime = maxtime = 0

        commits = len(revisions)
        developers = len(set(author for _, _, author, _, _ in revisions))

        data['maxrev'] = maxrev
        data['minrev'] = minrev
        if maxtime:
            data['maxtime'] = time.strftime('%a %m/%d/%Y %H:%M:%S %Z', time.localtime(maxtime))
        else:
            data['maxtime'] = 'N/A'
        if mintime:
            data['mintime'] = time.strftime('%a %m/%d/%Y %H:%M:%S %Z', time.localtime(mintime))
        else:
            data['mintime'] = 'N/A'

        if mintime and maxtime:
            age = float(maxtime - mintime)
        else:
            age = 0
        td = datetime.timedelta(seconds=age)
        years = td.days // 365
        days = (td.days % 365)
        hours = td.seconds // 3600
        data['age'] = '%d years, %d days, %d hours' % (years, days, hours)

        data['developers'] = developers
        data['commits'] = commits
        if age:
            data['commitsperyear'] = '%.2f' % (commits * 365 * 24 * 60 * 60. / age)
            data['commitspermonth'] = '%.2f' % (commits * 30 * 24 * 60 * 60. / age)
            data['commitsperday'] = '%.2f' % (commits * 24 * 60 * 60. / age)
            data['commitsperhour'] = '%.2f' % (commits * 60 * 60. / age)
        else:
            data['commitsperyear'] = 0
            data['commitspermonth'] = 0
            data['commitsperday'] = 0
            data['commitsperhour'] = 0

        if revisions:
            avgsize = sum(len(msg) for _, _, _, msg, _ in revisions) / float(len(revisions))
            avgchanges = float(len(changes)) / len(revisions)
            data['logentry'] = '%d chars' % avgsize
            data['changes'] = '%.2f' % avgchanges
        else:
            data['logentry'] = 'N/A'
            data['changes'] = 'N/A'

        if self.db_type == 'sqlite':
            strftime = "strftime('%%Y-%%W', %s, 'unixepoch')" % SECONDS
        elif self.db_type == 'mysql':
            strftime = "date_format(from_unixtime(%s), '%%Y-%%u')" % SECONDS
        elif self.db_type == 'postgres':
            strftime = "to_char(to_timestamp(%s), 'YYYY-IW')" % SECONDS # FIXME: Not %Y-%W
        else:
            assert False

        now = time.time()
        start = now - (52 * 7 * 24 * 60 * 60)
        d = {}
        for _, t, author, _, _ in revisions:
            if t > start:
                week = time.strftime('%Y-%W', time.localtime(t))
                try:
                    d[author][week] += 1
                except KeyError:
                    d[author] = { week : 1 }

        stats = []
        for author in sorted(set(author for _, _, author, _, _ in revisions)):
            commits = len(set(x[0] for x in revisions if x[2] == author))
            mintime = min(x[1] for x in revisions if x[2] == author)
            maxtime = max(x[1] for x in revisions if x[2] == author)
            if maxtime > mintime:
                rate = commits * 24.0 * 60 * 60 / float(maxtime - mintime)
            else:
                rate = 0
            change = sum(1 for x in changes if x[4] == author)
            paths = len(set(x[2] for x in changes if x[4] == author))

            year, week = map(int, time.strftime('%Y %W').split())
            weeks = []
            while len(weeks) < 52:
                name = '%04d-%02d' % (year, week)
                try:
                    total = d[author][name]
                except KeyError:
                    total = 0
                weeks.append({'week': name,
                              'total': total})
                week -= 1
                if week < 0:
                    year -= 1
                    week = 52
            stats.append({'name': author, 
                          'url': req.href.stats("code", author=author),
                          'commits': commits,
                          'rate': '%.2f' % (rate and float(rate) or 0),
                          'changes': change,
                          'paths': paths,
                          'weeks': list(reversed(weeks)),})
        data['byauthors'] = stats

        stats = []
        for rev, t, author, msg, repos in reversed(revisions[-10:]):
            reponame = repositories.get(repos, '')
            stats.append({'name': msg,
                          'author' : author,
                          'rev': rev,
                          'url': req.href.changeset(rev, reponame),
                          'url2': req.href.stats("code", author=author),
                          'time': pretty_timedelta(to_datetime(float(t))),})
        data['recent'] = stats

        times = dict((rev, t) for rev, t, _, _, _ in revisions)

        stats = []
        if not req.args.get('author', ''):
            d = {}
            total = set()
            for rev, _, path, change_type, _ in sorted(changes, key=lambda x: (times[x[0]], x[1])):
                if change_type in ('A', 'C'):
                    total.add(path)
                elif change_type == 'D' and path in total:
                    total.remove(path)
                d[int(times[rev] * 1000)] = len(total)
            stats = []
            steps = max(len(d) / 50, 1)
            for k, v in sorted(d.iteritems(), key=itemgetter(0))[::steps]:
                stats.append({'x': k, 
                              'y': v,})
        data['totalfiles'] = stats

        d = {}
        total = 0
        for _, t, _, _, _ in sorted(revisions, key=lambda x: x[1]):
            total += 1
            d[int(t * 1000)] = total
        stats = []
        steps = max(len(d) / 50, 1)
        for k, v in sorted(d.iteritems(), key=itemgetter(0))[::steps]:
            stats.append({'x': k, 
                          'y': v,})
        data['totalcommits'] = stats

        times = dict((rev, t) for rev, t, _, _, _ in revisions)
        d = {}
        total = 0
        for rev, _, _, _, _ in sorted(changes, key=lambda x: times[x[0]]):
            total += 1
            d[int(times[rev] * 1000)] = total
        stats = []
        steps = max(len(d) / 50, 1)
        for k, v in sorted(d.iteritems(), key=itemgetter(0))[::steps]:
            stats.append({'x': k, 
                          'y': v,})
        data['totalchanges'] = stats

        d = {}
        for _, repos, path, _, _ in changes:
            path = path[len(root):]
            path = (repos, path)
            try:
                d[path] += 1
            except KeyError:
                d[path] = 1
        total = float(sum(d.itervalues()))
        stats = []
        for k, v in sorted(d.iteritems(), key=itemgetter(1), reverse=True)[:10]:
            repos, path = k
            reponame = repositories.get(repos, '')
            if reponame:
                path = reponame + ':' + path
            stats.append({'name': path,
                          'url': req.href.log(reponame, root + k[1]),
                          'count': v,
                          'percent': '%.2f' % (100 * v / total)})
        data['byfiles'] = stats

        d = {}
        for _, _, _, change_type, author in changes:
            try:
                d[author][change_type] += 1
            except KeyError:
                d[author] = {'A':0,'E':0,'M':0,'C':0,'D':0}
                d[author][change_type] += 1
        stats = []
        for k, v in sorted(d.iteritems()):
            total = sum(v.itervalues())
            adds = int(100.0 * v['A'] / total)
            copies = int(100.0 * v['C'] / total)
            deletes = int(100.0 * v['D'] / total)
            moves = int(100.0 * v['M'] / total)
            edits = int(100.0 * v['E'] / total)
            edits = 100 - (adds + copies + deletes + moves)
            stats.append({'name': k, 
                          'url': req.href.stats("code", author=k),
                          'adds': adds,
                          'copies': copies,
                          'deletes': deletes,
                          'edits': edits,
                          'moves': moves})
        data['bychangetypes'] = stats

        d = {}
        for _, repos, path, _, _ in changes:
            path = path[len(root):]
            slash = path.rfind('/')
            if slash > 0:
                path = path[:slash]
            path = (repos, path)
            try:
                d[path] += 1
            except KeyError:
                d[path] = 1
        total = float(sum(d.itervalues()))
        stats = []
        for k, v in sorted(d.iteritems(), key=itemgetter(1), reverse=True)[:10]:
            repos, path = k
            reponame = repositories.get(repos, '')
            if reponame:
                path = reponame + ':' + path
            stats.append({'name': path,
                          'url': req.href.log(reponame, root + k[1]),
                          'count': v,
                          'percent': '%.2f' % (100 * v / total)})
        data['bypaths'] = stats

        d = {}
        for _, _, path, _, _ in changes:
            path = path[len(root):]
            slash = path.rfind('/')
            if slash > 0:
                path = path[slash+1:]
            dot = path.rfind('.')
            if dot > 0:
                ext = path[dot:]
                try:
                    d[ext] += 1
                except KeyError:
                    d[ext] = 1
        total = float(sum(d.itervalues()))
        stats = []
        for k, v in sorted(d.iteritems(), key=itemgetter(1), reverse=True)[:10]:
            stats.append({'name': k,
                          'count': v,
                          'percent': '%.2f' % (100 * v / total)})
        data['byfiletypes'] = stats

        d = {}
        for rev, repos, path, _, _ in changes:
            path = path[len(root):]
            slash = path.find('/')
            if slash < 0:
                continue
            project = (repos, path[:slash] or 'None')
            try:
                d[project][0] += 1
                d[project][1].add(rev)
                d[project][2].add(path)
            except KeyError:
                d[project] = [1, set([rev]), set([path])]
        stats = []
        for k, v in sorted(d.iteritems(), key=lambda x: len(x[0][1]), reverse=True):
            repos, project = k
            reponame = repositories.get(repos, '')
            if reponame:
                project = reponame + ':' + project
            stats.append({'name': project,
                          'url': req.href.browser(reponame, root + k[1]),
                          'changes': v[0],
                          'commits': len(v[1]),
                          'paths': len(v[2]),})
        data['byproject'] = stats

        hours = ['0%d:00' % i for i in range(10)]
        hours += ['%d:00' % i for i in range(10, 24)]
        hours = dict((hour, i) for i, hour in enumerate(hours))
        d = dict((i, 0) for i in range(24))
        for rev, t, author, _, _ in revisions:
            hour = time.strftime('%H:00', time.localtime(t))
            d[hours[hour]] += 1
        stats = []
        for x, y in sorted(d.iteritems()):
            stats.append({'x': x,
                          'y': y,})
        data['byhour'] = stats

        d = dict((str(i), 0) for i in range(7))
        for rev, t, author, _, _ in revisions:
            day = time.strftime('%w', time.localtime(t))
            d[day] += 1
        stats = []
        for x, y in sorted(d.iteritems()):
            stats.append({'x': x, 
                          'y': y,})
        data['byday'] = stats

        d = {}
        for _, t, _, _, _ in revisions:
            t = time.localtime(t)
            t = (t[0], t[1], 0, 0, 0, 0, 0, 0, 0)
            t = time.mktime(t)
            try:
                d[t] += 1
            except KeyError:
                d[t] = 1
        if d:
            mintime = min(d.keys())
            maxtime = max(d.keys())
            t = time.localtime(mintime)
            while mintime < maxtime:
                t = (t[0], t[1]+1, 0, 0, 0, 0, 0, 0, 0)
                mintime = time.mktime(t)
                if mintime not in d:
                    d[mintime] = 0
        stats = []
        for k, v in sorted(d.iteritems()):
            stats.append({'x': int(k * 1000),
                          'y': v})
        data['bymonth'] = stats

        cursor.execute("select distinct(author) from revision")
        authors = set(s for s, in cursor.fetchall())

        projects = set(p[:p.find('/')] for _, _, p, _, _ in changes if p.find('/') != -1)

        ignore = set(stopwords)
        ignore.update(authors)
        ignore.update(projects)

        delete = dict((ord(k), u' ') for k in '.,;:!?-+/\\()<>{}[]=_~`|0123456789*')
        delete.update(dict((ord(k), None) for k in '\"\''))

        d = {}
        for _, _, _, msg, _ in revisions:
            msg = msg.lower()
            msg = msg.translate(delete)
            for word in msg.split():
                if word not in ignore and len(word) > 1:
                    try:
                        d[word] += 1
                    except KeyError:
                        d[word] = 1
        fonts = ['0.8em', '1.0em', '1.25em', '1.5em', '1.75em', '2.0em']
        items = sorted(d.iteritems(), key=itemgetter(1), reverse=True)[:200]
        min_count = items and min(map(itemgetter(1), items)) or 0
        max_count = items and max(map(itemgetter(1), items)) or 0
        stats = []
        for k, v in sorted(items):
             weight = (log(v) - log(min_count)) / max(log(max_count) - log(min_count), 1)
             index = int(floor(weight * len(fonts)))
             index = min(index, len(fonts) - 1)
             stats.append({'word': k,
                           'url': req.href.search(q=k, noquickjump=1,
                                                  changeset="on"),
                           'size': fonts[index]})
        data['cloud'] = stats

        return 'code.html', data, None
Exemple #41
0
 def dateinfo(date):
     return tag.span(pretty_timedelta(date),
                     title=format_datetime(date))
Exemple #42
0
    def _process_wiki(self, req, cursor, where, since, data):

        cursor.execute("""
        select min(%s),
               max(%s),
               count(*),
               count(distinct author) """ % (SECONDS, SECONDS) + """
        from wiki """ + where)
        mintime, maxtime, edits, editors = cursor.fetchall()[0]

        data['editors'] = editors
        if maxtime:
            data['maxtime'] = time.strftime('%a %m/%d/%Y %H:%M:%S %Z',
                                            time.localtime(maxtime))
        else:
            data['maxtime'] = 'N/A'
        if mintime:
            data['mintime'] = time.strftime('%a %m/%d/%Y %H:%M:%S %Z',
                                            time.localtime(mintime))
        else:
            data['mintime'] = 'N/A'

        if mintime and maxtime:
            age = float(maxtime - mintime)
        else:
            age = 0
        td = datetime.timedelta(seconds=age)
        years = td.days // 365
        days = (td.days % 365)
        hours = td.seconds // 3600
        data['age'] = '%d years, %d days, %d hours' % (years, days, hours)

        data['edits'] = edits
        if age:
            data['peryear'] = '%.2f' % (edits * 365 * 24 * 60 * 60. / age)
            data['permonth'] = '%.2f' % (edits * 30 * 24 * 60 * 60. / age)
            data['perday'] = '%.2f' % (edits * 24 * 60 * 60. / age)
            data['perhour'] = '%.2f' % (edits * 60 * 60. / age)
        else:
            data['peryear'] = 0
            data['permonth'] = 0
            data['perday'] = 0
            data['perhour'] = 0

        cursor.execute("select name, author, count(*) from wiki " + where +
                       " group by 1, 2")
        pages = cursor.fetchall()

        d = {}
        for name, author, count in pages:
            try:
                d[author][0] += count
                d[author][1].add(name)
            except KeyError:
                d[author] = [count, set([name])]
        total = float(sum(x[0] for x in d.values()))
        stats = []
        for k, v in sorted(d.items(), key=itemgetter(1), reverse=True):
            stats.append({
                'name': k,
                'url': req.href.stats("wiki", author=k),
                'count': v[0],
                'pages': len(v[1]),
                'percent': '%.2f' % (100 * v[0] / total)
            })
        data['byauthor'] = stats

        __where = where.replace('where %s > %s' % (SECONDS, since), '')
        __where = __where.replace('and %s > %s' % (SECONDS, since), '')
        cursor.execute("""
        select name, %s """ % SECONDS + """
        from wiki """ + __where + """
        order by 2 asc
        """)
        history = cursor.fetchall()

        stats = []
        if not req.args.get('author', ''):
            d = {}
            total = set()
            for name, t in history:
                total.add(name)
                d[int(t)] = len(total)
            stats = []
            steps = max(len(d) / 250, 1)
            for k, v in sorted(d.iteritems(), key=itemgetter(0))[::steps]:
                if k > since:
                    stats.append({
                        'x': k * 1000,
                        'y': v,
                    })
        data['history'] = stats

        d = {}
        for name, _, count in pages:
            try:
                d[name] += count
            except KeyError:
                d[name] = count
        total = float(sum(d.values()))
        stats = []
        for k, v in sorted(d.items(), key=itemgetter(1), reverse=True)[:10]:
            stats.append({
                'name': k,
                'url': req.href.wiki(k),
                'count': v,
                'percent': '%.2f' % (100 * v / total)
            })
        data['pages'] = stats

        cursor.execute("""
        select name, version, length(text)
        from wiki """ + where + """
        group by 1, 2, 3
        having version = max(version)
        order by 3 desc
        limit 10
        """)
        rows = cursor.fetchall()
        d = dict((name, int(size)) for name, _, size in rows)
        stats = []
        for k, v in sorted(d.items(), key=itemgetter(1), reverse=True):
            stats.append({'name': k, 'url': req.href.wiki(k), 'size': v})
        data['largest'] = stats

        cursor.execute("""
        select name, version, author, %s """ % SECONDS + """
        from wiki """ + where + """
        order by 4 desc
        limit 10
        """)
        rows = cursor.fetchall()
        stats = []
        for name, version, author, t in rows:
            stats.append({
                'name': name,
                'author': author,
                'url': req.href.wiki(name, version=version),
                'url2': req.href.stats("wiki", author=author),
                'time': pretty_timedelta(to_datetime(float(t))),
            })

        data['recent'] = stats

        return 'wiki.html', data, None
Exemple #43
0
 def pretty_lock_time(self, user, next = False):
     """Convenience method for formatting lock time to string."""
     t_lock = self.lock_time(user, next)
     return (t_lock > 0) and pretty_timedelta(to_datetime(None) - \
         timedelta(microseconds = t_lock)) or None
Exemple #44
0
    def _process_tickets(self, req, cursor, where, since, data):

        cursor.execute("""
        select
            min(%s),
            max(%s),
            count(*),
            count(distinct reporter) """ % (SECONDS, SECONDS) + """
        from ticket """ + where.replace('author', 'reporter'))
        mintime, maxtime, tickets, reporters = cursor.fetchall()[0]

        data['reporters'] = reporters
        if maxtime:
            data['maxtime'] = time.strftime('%a %m/%d/%Y %H:%M:%S %Z',
                                            time.localtime(maxtime))
        else:
            data['maxtime'] = 'N/A'
        if mintime:
            data['mintime'] = time.strftime('%a %m/%d/%Y %H:%M:%S %Z',
                                            time.localtime(mintime))
        else:
            data['mintime'] = 'N/A'

        if mintime and maxtime:
            age = float(maxtime - mintime)
        else:
            age = 0
        td = datetime.timedelta(seconds=age)
        years = td.days // 365
        days = (td.days % 365)
        hours = td.seconds // 3600
        data['age'] = '%d years, %d days, %d hours' % (years, days, hours)

        data['total'] = tickets
        if age:
            data['peryear'] = '%.2f' % (tickets * 365 * 24 * 60 * 60. / age)
            data['permonth'] = '%.2f' % (tickets * 30 * 24 * 60 * 60. / age)
            data['perday'] = '%.2f' % (tickets * 24 * 60 * 60. / age)
            data['perhour'] = '%.2f' % (tickets * 60 * 60. / age)
        else:
            data['peryear'] = 0
            data['permonth'] = 0
            data['perday'] = 0
            data['perhour'] = 0

        cursor.execute("""\
        select author, sum(reports), sum(changes)
        from (select reporter as author, count(*) as reports, 0 as changes
              from ticket """ + where.replace('author', 'reporter') + """
              group by 1
              union
              select author, 0 as reports, count(*) as changes
              from ticket_change """ + where + """
              group by 1
              ) as data
        group by 1 order by 2 desc
        """)
        rows = cursor.fetchall()
        d = dict((path, (int(x), int(y))) for path, x, y in rows)
        stats = []
        for k, v in sorted(d.items(), key=itemgetter(1), reverse=True):
            stats.append({
                'name': k,
                'url': req.href.stats("tickets", author=k),
                'reports': v[0],
                'changes': v[1]
            })
        data['byauthor'] = stats

        cursor.execute(
            """\
        select t.component, count(distinct t.id), open.total
        from ticket t
        join (
              select component, count(distinct id) as total
              from ticket
              where (resolution is null or length(resolution) = 0) """ +
            where.replace('where', 'and').replace('author', 'reporter') + """
              group by 1
        ) as open using (component) """ +
            where.replace('time', 't.time').replace('author', 't.reporter') +
            """
        group by 1, 3 order by 2 desc
        """)
        rows = cursor.fetchall()
        stats = []
        for component, total, open in rows:
            stats.append({
                'name':
                component,
                'url':
                req.href.query(status=("new", "opened", "resolved"),
                               component=component,
                               order="priority"),
                'open':
                open,
                'total':
                total,
            })
        data['bycomponent'] = stats

        cursor.execute(
            """\
        select t.milestone, count(distinct t.id), open.total
        from ticket t
        join (
              select milestone, count(distinct id) as total
              from ticket
              where (resolution is null or length(resolution) = 0) """ +
            where.replace('where', 'and').replace('author', 'reporter') + """
              group by 1
        ) as open using (milestone) """ +
            where.replace('time', 't.time').replace('author', 't.reporter') +
            """
        group by 1, 3 order by 2 desc
        """)
        rows = cursor.fetchall()
        stats = []
        for milestone, total, open in rows:
            stats.append({
                'name':
                milestone,
                'url':
                req.href.query(status=("new", "opened", "resolved"),
                               milestone=milestone,
                               order="priority"),
                'open':
                open,
                'total':
                total,
            })
        data['bymilestone'] = stats

        stats = []
        if not req.args.get('author', ''):
            __where = where.replace('where %s > %s' % (SECONDS, since), '')
            __where = __where.replace('and %s > %s' % (SECONDS, since), '')
            cursor.execute("""\
            select id, %s, 'none' as oldvalue, 'new' as newvalue
            from ticket """ % SECONDS + __where + """
            union
            select ticket, %s, oldvalue, newvalue
            from ticket_change where field = 'status' """ % SECONDS +
                           __where.replace('where', 'and'))
            rows = cursor.fetchall()
            d = {}
            opened = 0
            accepted = 0
            for ticket, t, oldvalue, newvalue in sorted(rows,
                                                        key=itemgetter(1)):
                if newvalue == 'accepted' and oldvalue != 'accepted':
                    accepted += 1
                elif newvalue != 'accepted' and oldvalue == 'accepted':
                    accepted -= 1
                if newvalue in ("new",
                                "reopened") and oldvalue not in ("new",
                                                                 "reopened"):
                    opened += 1
                elif newvalue == "closed" and oldvalue != "closed":
                    opened -= 1
                d[int(t)] = (opened, accepted)
            steps = max(len(d) / 250, 1)
            for k, v in sorted(d.iteritems(), key=itemgetter(0))[::steps]:
                if k > since:
                    stats.append({
                        'x': k * 1000,
                        'opened': v[0],
                        'accepted': v[1],
                    })
        data['history'] = stats

        cursor.execute("""\
        select tc.ticket, t.component, t.summary, count(*)
        from ticket_change tc
        join ticket t on t.id = tc.ticket """ +
                       where.replace('time', 'tc.time') + """
        group by 1, 2, 3
        order by 3 desc
        limit 10
        """)
        rows = cursor.fetchall()
        total = float(sum(int(v) for _, _, _, v in rows))
        stats = []
        for ticket, component, summary, v in rows:
            stats.append({
                'name':
                summary,
                'id':
                ticket,
                'component':
                component,
                'url':
                req.href.ticket(ticket),
                'url2':
                req.href.query(component=component, order="priority"),
                'count':
                int(v),
                'percent':
                '%.2f' % (100 * int(v) / total)
            })
        data['active'] = stats

        cursor.execute(
            """
        select id, component, summary, %s
        from ticket
        where status != 'closed' """ % SECONDS +
            where.replace('where', 'and').replace('author', 'reporter') + """
        order by 4 asc
        limit 10
        """)
        rows = cursor.fetchall()
        stats = []
        for ticket, component, summary, t in rows:
            stats.append({
                'name':
                summary,
                'id':
                ticket,
                'component':
                component,
                'url':
                req.href.ticket(ticket),
                'url2':
                req.href.query(component=component, order="priority"),
                'time':
                pretty_timedelta(to_datetime(float(t))),
            })
        data['oldest'] = stats

        cursor.execute("""
        select id, component, summary, %s
        from ticket """ % SECONDS + where.replace('author', 'reporter') + """
        order by 4 desc
        limit 10
        """)
        rows = cursor.fetchall()
        stats = []
        for ticket, component, summary, t in rows:
            stats.append({
                'name':
                summary,
                'id':
                ticket,
                'component':
                component,
                'url':
                req.href.ticket(ticket),
                'url2':
                req.href.query(component=component, order="priority"),
                'time':
                pretty_timedelta(to_datetime(float(t))),
            })
        data['newest'] = stats

        cursor.execute(
            """
        select tc.ticket, t.component, t.summary, tc.%s
        from ticket_change tc
        join ticket t on t.id = tc.ticket """ % SECONDS +
            where.replace('where', 'and').replace('time', 'tc.time') + """
        order by 4 desc
        limit 10
        """)
        rows = cursor.fetchall()
        stats = []
        for ticket, component, summary, t in rows:
            stats.append({
                'name':
                summary,
                'id':
                ticket,
                'component':
                component,
                'url':
                req.href.ticket(ticket),
                'url2':
                req.href.query(component=component, order="priority"),
                'time':
                pretty_timedelta(to_datetime(float(t))),
            })

        data['recent'] = stats

        return 'tickets.html', data, None
Exemple #45
0
    def _process_code(self, req, cursor, where, data):

        root = self.config.get("stats", "root", "")
        if root and not root.endswith("/"):
            root += "/"

        project = root + req.args.get("project", "")

        if project:
            cursor.execute(
                """
            select rev, %s, author, message, %s
            from revision r
            join (
               select rev
               from node_change
               where path like '%s%%'
               group by rev
            ) changes using (rev)
            """
                % (SECONDS, REPOS, project)
                + where
                + " order by 2"
            )
        else:
            cursor.execute(
                """
            select rev, %s, author, message, %s
            from revision r
            """
                % (SECONDS, REPOS)
                + where
                + " order by 2"
            )
        revisions = cursor.fetchall()

        if project:
            query = (
                """
            select nc.rev, %s, nc.path, nc.change_type, r.author
            from node_change nc
            join revision r %s
            """
                % (REPOS, USING)
                + where
            )
            if where:
                query += " and nc.path like '%s%%'" % project
            else:
                query += " where nc.path like '%s%%'" % project
            cursor.execute(query)
        else:
            cursor.execute(
                """
            select nc.rev, %s, nc.path, nc.change_type, r.author
            from node_change nc
            join revision r %s
            """
                % (REPOS, USING)
                + where
            )
        changes = cursor.fetchall()

        # In version 0.12, support for multiple repositories was
        # added.  We use the reponame to generate proper changeset links.
        if trac.__version__.startswith("0.12"):
            cursor.execute(
                """
            select id, value
            from repository
            where name = 'name'"""
            )
            repositories = dict(cursor.fetchall())
        else:
            repositories = {}

        if revisions:
            head = revisions[0]
            tail = revisions[-1]
            minrev, mintime = head[0], head[1]
            maxrev, maxtime = tail[0], tail[1]
        else:
            minrev = maxrev = mintime = maxtime = 0

        commits = len(revisions)
        developers = len(set(author for _, _, author, _, _ in revisions))

        data["maxrev"] = maxrev
        data["minrev"] = minrev
        if maxtime:
            data["maxtime"] = time.strftime("%a %m/%d/%Y %H:%M:%S %Z", time.localtime(maxtime))
        else:
            data["maxtime"] = "N/A"
        if mintime:
            data["mintime"] = time.strftime("%a %m/%d/%Y %H:%M:%S %Z", time.localtime(mintime))
        else:
            data["mintime"] = "N/A"

        if mintime and maxtime:
            age = float(maxtime - mintime)
        else:
            age = 0
        td = datetime.timedelta(seconds=age)
        years = td.days // 365
        days = td.days % 365
        hours = td.seconds // 3600
        data["age"] = "%d years, %d days, %d hours" % (years, days, hours)

        data["developers"] = developers
        data["commits"] = commits
        if age:
            data["commitsperyear"] = "%.2f" % (commits * 365 * 24 * 60 * 60.0 / age)
            data["commitspermonth"] = "%.2f" % (commits * 30 * 24 * 60 * 60.0 / age)
            data["commitsperday"] = "%.2f" % (commits * 24 * 60 * 60.0 / age)
            data["commitsperhour"] = "%.2f" % (commits * 60 * 60.0 / age)
        else:
            data["commitsperyear"] = 0
            data["commitspermonth"] = 0
            data["commitsperday"] = 0
            data["commitsperhour"] = 0

        if revisions:
            avgsize = sum(len(msg) for _, _, _, msg, _ in revisions) / float(len(revisions))
            avgchanges = float(len(changes)) / len(revisions)
            data["logentry"] = "%d chars" % avgsize
            data["changes"] = "%.2f" % avgchanges
        else:
            data["logentry"] = "N/A"
            data["changes"] = "N/A"

        if self.db_type == "sqlite":
            strftime = "strftime('%%Y-%%W', %s, 'unixepoch')" % SECONDS
        elif self.db_type == "mysql":
            strftime = "date_format(from_unixtime(%s), '%%Y-%%u')" % SECONDS
        elif self.db_type == "postgres":
            strftime = "to_char(to_timestamp(%s), 'YYYY-IW')" % SECONDS  # FIXME: Not %Y-%W
        else:
            assert False

        now = time.time()
        start = now - (52 * 7 * 24 * 60 * 60)
        d = {}
        for _, t, author, _, _ in revisions:
            if t > start:
                week = time.strftime("%Y-%W", time.localtime(t))
                try:
                    d[author][week] += 1
                except KeyError:
                    d[author] = {week: 1}

        stats = []
        for author in sorted(set(author for _, _, author, _, _ in revisions)):
            commits = len(set(x[0] for x in revisions if x[2] == author))
            mintime = min(x[1] for x in revisions if x[2] == author)
            maxtime = max(x[1] for x in revisions if x[2] == author)
            if maxtime > mintime:
                rate = commits * 24.0 * 60 * 60 / float(maxtime - mintime)
            else:
                rate = 0
            change = sum(1 for x in changes if x[4] == author)
            paths = len(set(x[2] for x in changes if x[4] == author))

            year, week = map(int, time.strftime("%Y %W").split())
            weeks = []
            while len(weeks) < 52:
                name = "%04d-%02d" % (year, week)
                try:
                    total = d[author][name]
                except KeyError:
                    total = 0
                weeks.append({"week": name, "total": total})
                week -= 1
                if week < 0:
                    year -= 1
                    week = 52
            stats.append(
                {
                    "name": author,
                    "url": req.href.stats("code", author=author),
                    "commits": commits,
                    "rate": "%.2f" % (rate and float(rate) or 0),
                    "changes": change,
                    "paths": paths,
                    "weeks": list(reversed(weeks)),
                }
            )
        data["byauthors"] = stats

        stats = []
        for rev, t, author, msg, repos in reversed(revisions[-10:]):
            reponame = repositories.get(repos, "")
            stats.append(
                {
                    "name": msg,
                    "author": author,
                    "rev": rev,
                    "url": req.href.changeset(rev, reponame),
                    "url2": req.href.stats("code", author=author),
                    "time": pretty_timedelta(to_datetime(float(t))),
                }
            )
        data["recent"] = stats

        times = dict((rev, t) for rev, t, _, _, _ in revisions)

        stats = []
        if not req.args.get("author", ""):
            d = {}
            total = set()
            for rev, _, path, change_type, _ in sorted(changes, key=lambda x: (times[x[0]], x[1])):
                if change_type in ("A", "C"):
                    total.add(path)
                elif change_type == "D" and path in total:
                    total.remove(path)
                d[int(times[rev] * 1000)] = len(total)
            stats = []
            steps = max(len(d) / 50, 1)
            for k, v in sorted(d.iteritems(), key=itemgetter(0))[::steps]:
                stats.append({"x": k, "y": v})
        data["totalfiles"] = stats

        d = {}
        total = 0
        for _, t, _, _, _ in sorted(revisions, key=lambda x: x[1]):
            total += 1
            d[int(t * 1000)] = total
        stats = []
        steps = max(len(d) / 50, 1)
        for k, v in sorted(d.iteritems(), key=itemgetter(0))[::steps]:
            stats.append({"x": k, "y": v})
        data["totalcommits"] = stats

        times = dict((rev, t) for rev, t, _, _, _ in revisions)
        d = {}
        total = 0
        for rev, _, _, _, _ in sorted(changes, key=lambda x: times[x[0]]):
            total += 1
            d[int(times[rev] * 1000)] = total
        stats = []
        steps = max(len(d) / 50, 1)
        for k, v in sorted(d.iteritems(), key=itemgetter(0))[::steps]:
            stats.append({"x": k, "y": v})
        data["totalchanges"] = stats

        d = {}
        for _, repos, path, _, _ in changes:
            path = path[len(root) :]
            path = (repos, path)
            try:
                d[path] += 1
            except KeyError:
                d[path] = 1
        total = float(sum(d.itervalues()))
        stats = []
        for k, v in sorted(d.iteritems(), key=itemgetter(1), reverse=True)[:10]:
            repos, path = k
            reponame = repositories.get(repos, "")
            if reponame:
                path = reponame + ":" + path
            stats.append(
                {
                    "name": path,
                    "url": req.href.log(reponame, root + k[1]),
                    "count": v,
                    "percent": "%.2f" % (100 * v / total),
                }
            )
        data["byfiles"] = stats

        d = {}
        for _, _, _, change_type, author in changes:
            try:
                d[author][change_type] += 1
            except KeyError:
                d[author] = {"A": 0, "E": 0, "M": 0, "C": 0, "D": 0}
                d[author][change_type] += 1
        stats = []
        for k, v in sorted(d.iteritems()):
            total = sum(v.itervalues())
            adds = int(100.0 * v["A"] / total)
            copies = int(100.0 * v["C"] / total)
            deletes = int(100.0 * v["D"] / total)
            moves = int(100.0 * v["M"] / total)
            edits = int(100.0 * v["E"] / total)
            edits = 100 - (adds + copies + deletes + moves)
            stats.append(
                {
                    "name": k,
                    "url": req.href.stats("code", author=k),
                    "adds": adds,
                    "copies": copies,
                    "deletes": deletes,
                    "edits": edits,
                    "moves": moves,
                }
            )
        data["bychangetypes"] = stats

        d = {}
        for _, repos, path, _, _ in changes:
            path = path[len(root) :]
            slash = path.rfind("/")
            if slash > 0:
                path = path[:slash]
            path = (repos, path)
            try:
                d[path] += 1
            except KeyError:
                d[path] = 1
        total = float(sum(d.itervalues()))
        stats = []
        for k, v in sorted(d.iteritems(), key=itemgetter(1), reverse=True)[:10]:
            repos, path = k
            reponame = repositories.get(repos, "")
            if reponame:
                path = reponame + ":" + path
            stats.append(
                {
                    "name": path,
                    "url": req.href.log(reponame, root + k[1]),
                    "count": v,
                    "percent": "%.2f" % (100 * v / total),
                }
            )
        data["bypaths"] = stats

        d = {}
        for _, _, path, _, _ in changes:
            path = path[len(root) :]
            slash = path.rfind("/")
            if slash > 0:
                path = path[slash + 1 :]
            dot = path.rfind(".")
            if dot > 0:
                ext = path[dot:]
                try:
                    d[ext] += 1
                except KeyError:
                    d[ext] = 1
        total = float(sum(d.itervalues()))
        stats = []
        for k, v in sorted(d.iteritems(), key=itemgetter(1), reverse=True)[:10]:
            stats.append({"name": k, "count": v, "percent": "%.2f" % (100 * v / total)})
        data["byfiletypes"] = stats

        d = {}
        for rev, repos, path, _, _ in changes:
            path = path[len(root) :]
            slash = path.find("/")
            if slash < 0:
                continue
            project = (repos, path[:slash] or "None")
            try:
                d[project][0] += 1
                d[project][1].add(rev)
                d[project][2].add(path)
            except KeyError:
                d[project] = [1, set([rev]), set([path])]
        stats = []
        for k, v in sorted(d.iteritems(), key=lambda x: len(x[0][1]), reverse=True):
            repos, project = k
            reponame = repositories.get(repos, "")
            if reponame:
                project = reponame + ":" + project
            stats.append(
                {
                    "name": project,
                    "url": req.href.browser(reponame, root + k[1]),
                    "changes": v[0],
                    "commits": len(v[1]),
                    "paths": len(v[2]),
                }
            )
        data["byproject"] = stats

        hours = ["0%d:00" % i for i in range(10)]
        hours += ["%d:00" % i for i in range(10, 24)]
        hours = dict((hour, i) for i, hour in enumerate(hours))
        d = dict((i, 0) for i in range(24))
        for rev, t, author, _, _ in revisions:
            hour = time.strftime("%H:00", time.localtime(t))
            d[hours[hour]] += 1
        stats = []
        for x, y in sorted(d.iteritems()):
            stats.append({"x": x, "y": y})
        data["byhour"] = stats

        d = dict((str(i), 0) for i in range(7))
        for rev, t, author, _, _ in revisions:
            day = time.strftime("%w", time.localtime(t))
            d[day] += 1
        stats = []
        for x, y in sorted(d.iteritems()):
            stats.append({"x": x, "y": y})
        data["byday"] = stats

        d = {}
        for _, t, _, _, _ in revisions:
            t = time.localtime(t)
            t = (t[0], t[1], 0, 0, 0, 0, 0, 0, 0)
            t = time.mktime(t)
            try:
                d[t] += 1
            except KeyError:
                d[t] = 1
        if d:
            mintime = min(d.keys())
            maxtime = max(d.keys())
            t = time.localtime(mintime)
            while mintime < maxtime:
                t = (t[0], t[1] + 1, 0, 0, 0, 0, 0, 0, 0)
                mintime = time.mktime(t)
                if mintime not in d:
                    d[mintime] = 0
        stats = []
        for k, v in sorted(d.iteritems()):
            stats.append({"x": int(k * 1000), "y": v})
        data["bymonth"] = stats

        cursor.execute("select distinct(author) from revision")
        authors = set(s for s, in cursor.fetchall())

        projects = set(p[: p.find("/")] for _, _, p, _, _ in changes if p.find("/") != -1)

        ignore = set(stopwords)
        ignore.update(authors)
        ignore.update(projects)

        delete = dict((ord(k), u" ") for k in ".,;:!?-+/\\()<>{}[]=_~`|0123456789*")
        delete.update(dict((ord(k), None) for k in "\"'"))

        d = {}
        for _, _, _, msg, _ in revisions:
            msg = msg.lower()
            msg = msg.translate(delete)
            for word in msg.split():
                if word not in ignore and len(word) > 1:
                    try:
                        d[word] += 1
                    except KeyError:
                        d[word] = 1
        fonts = ["0.8em", "1.0em", "1.25em", "1.5em", "1.75em", "2.0em"]
        items = sorted(d.iteritems(), key=itemgetter(1), reverse=True)[:200]
        min_count = items and min(map(itemgetter(1), items)) or 0
        max_count = items and max(map(itemgetter(1), items)) or 0
        stats = []
        for k, v in sorted(items):
            weight = (log(v) - log(min_count)) / max(log(max_count) - log(min_count), 1)
            index = int(floor(weight * len(fonts)))
            index = min(index, len(fonts) - 1)
            stats.append({"word": k, "url": req.href.search(q=k, noquickjump=1, changeset="on"), "size": fonts[index]})
        data["cloud"] = stats

        return "code.html", data, None
Exemple #46
0
 def pretty_lock_time(self, user, next=False):
     """Convenience method for formatting lock time to string."""
     t_lock = self.lock_time(user, next)
     return (t_lock > 0) and \
         (pretty_timedelta(to_datetime(None) - \
          timedelta(seconds = t_lock))) or None
Exemple #47
0
    def process_request(self, req):
        req.perm.require('TEAMCALENDAR_VIEW')
        pid = self.pm.get_current_project(req)
        syllabus_id = req.data['syllabus_id']
        self.pm.check_component_enabled(self, syllabus_id=syllabus_id)

        work_days = [int(d) for d in self.work_days.syllabus(syllabus_id)]
        weeks_prior = self.weeks_prior.syllabus(syllabus_id)
        weeks_after = self.weeks_after.syllabus(syllabus_id)

        data = {}

        from_date = req.args.get('from_date', '')
        to_date   = req.args.get('to_date', '')
        from_date = from_date and parse_date_only(from_date) or self.find_default_start(weeks_prior)
        to_date   = to_date   and parse_date_only(to_date)   or self.find_default_end(weeks_after)

        # Check time interval
        force_default = True
        delta = (to_date - from_date).days
        if delta < 0:
            add_warning(req, _('Negative time interval selected. Using default.'))
        elif delta > self.MAX_INTERVAL:
            add_warning(req, _('Too big time interval selected (%(interval)s). '
                               'Using default.', interval=pretty_timedelta(to_date, from_date)))
        else:
            force_default = False

        # Reset interval to default
        if force_default:
            from_date = self.find_default_start(weeks_prior)
            to_date   = self.find_default_end(weeks_after)

        # Message
        data['message'] = ''

        # Current user
        data['authname'] = authname = req.authname

        # Can we update?

        data['can_update_own']    = can_update_own    = ('TEAMCALENDAR_UPDATE_OWN'    in req.perm)
        data['can_update_others'] = can_update_others = ('TEAMCALENDAR_UPDATE_OTHERS' in req.perm)
        data['can_update']        = can_update_own or can_update_others

        # Store dates
        data['today']     = date.today()
        data['from_date'] = from_date
        data['to_date']   = to_date

        # Get all people
        data['people'] = people = self.pm.get_project_users(pid)

        # Update timetable if required
        if 'update_calendar' in req.args:
            req.perm.require('TEAMCALENDAR_UPDATE_OWN')

            # deliberately override dates: want to show result of update
            from_date = current_date = parse_date_only(req.args.get('orig_from_date', ''))
            to_date   = parse_date_only(req.args.get('orig_to_date', ''))
            tuples = []
            while current_date <= to_date:
                if can_update_others:
                    for person in people:
                        status = Decimal(req.args.get(u'%s.%s' % (current_date.isoformat(), person), False))
                        tuples.append((current_date, person, status,))
                elif can_update_own:
                    status = Decimal(req.args.get(u'%s.%s' % (current_date.isoformat(), authname), False))
                    tuples.append((current_date, authname, status,))
                current_date += timedelta(1)

            self.update_timetable(tuples, pid, from_date, to_date)
            data['message'] = _('Timetable updated.')

        # Get the current timetable
        timetable = self.get_timetable(from_date, to_date, people, pid, work_days)

        data['timetable'] = []
        current_date = from_date
        while current_date <= to_date:
            data['timetable'].append(dict(date=current_date, people=timetable[current_date]))
            current_date += timedelta(1)

        for day in data['timetable']:
            day['strdate'] = to_unicode(day['date'].strftime('%a %d/%m/%Y'))

        add_stylesheet(req, 'common/css/jquery-ui/jquery.ui.core.css')
        add_stylesheet(req, 'common/css/jquery-ui/jquery.ui.datepicker.css')
        add_stylesheet(req, 'common/css/jquery-ui/jquery.ui.theme.css')
        add_script(req, 'common/js/jquery.ui.core.js')
        add_script(req, 'common/js/jquery.ui.widget.js')
        add_script(req, 'common/js/jquery.ui.datepicker.js')
        add_script(req, 'common/js/datepicker.js')

        add_stylesheet(req, 'teamcalendar/css/calendar.css')

        data['_'] = _
        return 'teamcalendar.html', data, None
Exemple #48
0
    def process_request(self, req):
        """The appropriate mode of operation is inferred from the request
        parameters:

         * If `new_path` and `old_path` are equal (or `old_path` is omitted)
           and `new` and `old` are equal (or `old` is omitted),
           then we're about to view a revision Changeset: `chgset` is True.
           Furthermore, if the path is not the root, the changeset is
           ''restricted'' to that path (only the changes affecting that path,
           its children or its ancestor directories will be shown).
         * In any other case, the set of changes corresponds to arbitrary
           differences between path@rev pairs. If `new_path` and `old_path`
           are equal, the ''restricted'' flag will also be set, meaning in this
           case that the differences between two revisions are restricted to
           those occurring on that path.

        In any case, either path@rev pairs must exist.
        """
        req.perm.assert_permission('CHANGESET_VIEW')

        # -- retrieve arguments
        new_path = req.args.get('new_path')
        new = req.args.get('new')
        old_path = req.args.get('old_path')
        old = req.args.get('old')

        if old and '@' in old:
            old_path, old = unescape(old).split('@')
        if new and '@' in new:
            new_path, new = unescape(new).split('@')

        # -- normalize and check for special case
        repos = self.env.get_repository(req.authname)
        new_path = repos.normalize_path(new_path)
        new = repos.normalize_rev(new)

        repos.authz.assert_permission_for_changeset(new)

        old_path = repos.normalize_path(old_path or new_path)
        old = repos.normalize_rev(old or new)

        if old_path == new_path and old == new: # revert to Changeset
            old_path = old = None

        diff_options = get_diff_options(req)

        # -- setup the `chgset` and `restricted` flags, see docstring above.
        chgset = not old and not old_path
        if chgset:
            restricted = new_path not in ('', '/') # (subset or not)
        else:
            restricted = old_path == new_path # (same path or not)

        # -- redirect if changing the diff options
        if req.args.has_key('update'):
            if chgset:
                if restricted:
                    req.redirect(req.href.changeset(new, new_path))
                else:
                    req.redirect(req.href.changeset(new))
            else:
                req.redirect(req.href.changeset(new, new_path, old=old,
                                                old_path=old_path))

        # -- preparing the diff arguments
        if chgset:
            prev = repos.get_node(new_path, new).get_previous()
            if prev:
                prev_path, prev_rev = prev[:2]
            else:
                prev_path, prev_rev = new_path, repos.previous_rev(new)
            diff_args = DiffArgs(old_path=prev_path, old_rev=prev_rev,
                                 new_path=new_path, new_rev=new)
        else:
            if not new:
                new = repos.youngest_rev
            elif not old:
                old = repos.youngest_rev
            if not old_path:
                old_path = new_path
            diff_args = DiffArgs(old_path=old_path, old_rev=old,
                                 new_path=new_path, new_rev=new)
        if chgset:
            chgset = repos.get_changeset(new)
            message = chgset.message or '--'
            if self.wiki_format_messages:
                message = wiki_to_html(message, self.env, req,
                                              escape_newlines=True)
            else:
                message = html.PRE(message)
            req.check_modified(chgset.date, [
                diff_options[0],
                ''.join(diff_options[1]),
                repos.name,
                repos.rev_older_than(new, repos.youngest_rev),
                message,
                pretty_timedelta(chgset.date, None, 3600)])
        else:
            message = None # FIXME: what date should we choose for a diff?

        req.hdf['changeset'] = diff_args

        format = req.args.get('format')

        if format in ['diff', 'zip']:
            req.perm.assert_permission('FILE_VIEW')
            # choosing an appropriate filename
            rpath = new_path.replace('/','_')
            if chgset:
                if restricted:
                    filename = 'changeset_%s_r%s' % (rpath, new)
                else:
                    filename = 'changeset_r%s' % new
            else:
                if restricted:
                    filename = 'diff-%s-from-r%s-to-r%s' \
                                  % (rpath, old, new)
                elif old_path == '/': # special case for download (#238)
                    filename = '%s-r%s' % (rpath, old)
                else:
                    filename = 'diff-from-%s-r%s-to-%s-r%s' \
                               % (old_path.replace('/','_'), old, rpath, new)
            if format == 'diff':
                self._render_diff(req, filename, repos, diff_args,
                                  diff_options)
                return
            elif format == 'zip':
                self._render_zip(req, filename, repos, diff_args)
                return

        # -- HTML format
        self._render_html(req, repos, chgset, restricted, message,
                          diff_args, diff_options)
        if chgset:
            diff_params = 'new=%s' % new
        else:
            diff_params = unicode_urlencode({'new_path': new_path,
                                             'new': new,
                                             'old_path': old_path,
                                             'old': old})
        add_link(req, 'alternate', '?format=diff&'+diff_params, 'Unified Diff',
                 'text/plain', 'diff')
        add_link(req, 'alternate', '?format=zip&'+diff_params, 'Zip Archive',
                 'application/zip', 'zip')
        add_stylesheet(req, 'common/css/changeset.css')
        add_stylesheet(req, 'common/css/diff.css')
        add_stylesheet(req, 'common/css/code.css')
        return 'changeset.cs', None
Exemple #49
0
    def _render_file(self, req, repos, node, rev=None):
        req.perm.assert_permission('FILE_VIEW')

        mimeview = Mimeview(self.env)

        # MIME type detection
        content = node.get_content()
        chunk = content.read(CHUNK_SIZE)
        mime_type = node.content_type
        if not mime_type or mime_type == 'application/octet-stream':
            mime_type = mimeview.get_mimetype(node.name, chunk) or \
                        mime_type or 'text/plain'

        # Eventually send the file directly
        format = req.args.get('format')
        if format in ['raw', 'txt']:
            req.send_response(200)
            req.send_header('Content-Type',
                            format == 'txt' and 'text/plain' or mime_type)
            req.send_header('Content-Length', node.content_length)
            req.send_header('Last-Modified', http_date(node.last_modified))
            if not self.render_unsafe_content:
                # Force browser to download files instead of rendering
                # them, since they might contain malicious code enabling 
                # XSS attacks
                req.send_header('Content-Disposition', 'attachment')
            req.end_headers()

            while 1:
                if not chunk:
                    raise RequestDone
                req.write(chunk)
                chunk = content.read(CHUNK_SIZE)
        else:
            # The changeset corresponding to the last change on `node` 
            # is more interesting than the `rev` changeset.
            changeset = repos.get_changeset(node.rev)

            message = changeset.message or '--'
            if self.config['changeset'].getbool('wiki_format_messages'):
                message = wiki_to_html(message, self.env, req,
                                       escape_newlines=True)
            else:
                message = html.PRE(message)
            ZhUnit = {'second':u'秒','seconds':u'秒','minute':u'分钟','minutes':u'分钟','hour':u'小时','hours':u'小时',
                            'day':u'天','days':u'天','year':u'年','years':u'年','month':u'月','months':u'月'}
            tempTime = pretty_timedelta(changeset.date)
            numAndUnit = tempTime.split(' ')
            numAndUnit[1] = ZhUnit.get(numAndUnit[1],numAndUnit[1])
            ZhAge = ' '.join(numAndUnit)            
            req.hdf['file'] = {
                'rev': node.rev,
                'changeset_href': req.href.changeset(node.rev),
                'date': format_datetime(changeset.date),
                'age': ZhAge,
                'size': pretty_size(node.content_length),
                'author': changeset.author or 'anonymous',
                'message': message
            } 

            # add ''Plain Text'' alternate link if needed
            if not is_binary(chunk) and mime_type != 'text/plain':
                plain_href = req.href.browser(node.path, rev=rev, format='txt')
                add_link(req, 'alternate', plain_href, 'Plain Text',
                         'text/plain')

            # add ''Original Format'' alternate link (always)
            raw_href = req.href.browser(node.path, rev=rev, format='raw')
            add_link(req, 'alternate', raw_href, 'Original Format', mime_type)

            self.log.debug("Rendering preview of node %s@%s with mime-type %s"
                           % (node.name, str(rev), mime_type))

            del content # the remainder of that content is not needed

            req.hdf['file'] = mimeview.preview_to_hdf(
                req, node.get_content(), node.get_content_length(), mime_type,
                node.created_path, raw_href, annotations=['lineno'])

            add_stylesheet(req, 'common/css/code.css')
Exemple #50
0
 def test_relative_dateonly(self):
     t = datetime_now(utc) - timedelta(days=1)
     label = pretty_timedelta(t)
     self.assertEqual(label, self._format_chrome(t, 'relative', True))
     self.assertEqual(label, self._format_timeline(t, 'relative', True))
Exemple #51
0
    def _process_code(self, req, cursor, where, data):

        root = self.config.get('stats', 'root', '')
        if root and not root.endswith('/'):
            root += '/'

        project = root + req.args.get('project', '')

        if project:
            cursor.execute("""
            select rev, %s, author, message, %s
            from revision r
            join (
               select rev
               from node_change
               where path like '%s%%'
               group by rev
            ) changes using (rev)
            """ % (SECONDS, REPOS, project) + where + " order by 2")
        else:
            cursor.execute("""
            select rev, %s, author, message, %s
            from revision r
            """ % (SECONDS, REPOS) + where + " order by 2")
        revisions = cursor.fetchall()

        if project:
            query = """
            select nc.rev, %s, nc.path, nc.change_type, r.author
            from node_change nc
            join revision r %s
            """ % (REPOS, USING) + where
            if where:
                query += " and nc.path like '%s%%'" % project
            else:
                query += " where nc.path like '%s%%'" % project
            cursor.execute(query)
        else:
            cursor.execute("""
            select nc.rev, %s, nc.path, nc.change_type, r.author
            from node_change nc
            join revision r %s
            """ % (REPOS, USING) + where)
        changes = cursor.fetchall()

        # In version 0.12, support for multiple repositories was
        # added.  We use the reponame to generate proper changeset links.
        if trac.__version__ > '0.12':
            cursor.execute("""
            select id, value
            from repository
            where name = 'name'""")
            repositories = dict(cursor.fetchall())
        else:
            repositories = {}

        if revisions:
            head = revisions[0]
            tail = revisions[-1]
            minrev, mintime = head[0], head[1]
            maxrev, maxtime = tail[0], tail[1]
        else:
            minrev = maxrev = mintime = maxtime = 0

        commits = len(revisions)
        developers = len(set(author for _, _, author, _, _ in revisions))

        data['maxrev'] = maxrev
        data['minrev'] = minrev
        if maxtime:
            data['maxtime'] = time.strftime('%a %m/%d/%Y %H:%M:%S %Z',
                                            time.localtime(maxtime))
        else:
            data['maxtime'] = 'N/A'
        if mintime:
            data['mintime'] = time.strftime('%a %m/%d/%Y %H:%M:%S %Z',
                                            time.localtime(mintime))
        else:
            data['mintime'] = 'N/A'

        if mintime and maxtime:
            age = float(maxtime - mintime)
        else:
            age = 0
        td = datetime.timedelta(seconds=age)
        years = td.days // 365
        days = (td.days % 365)
        hours = td.seconds // 3600
        data['age'] = '%d years, %d days, %d hours' % (years, days, hours)

        data['developers'] = developers
        data['commits'] = commits
        if age:
            data['commitsperyear'] = '%.2f' % (commits * 365 * 24 * 60 * 60. /
                                               age)
            data['commitspermonth'] = '%.2f' % (commits * 30 * 24 * 60 * 60. /
                                                age)
            data['commitsperday'] = '%.2f' % (commits * 24 * 60 * 60. / age)
            data['commitsperhour'] = '%.2f' % (commits * 60 * 60. / age)
        else:
            data['commitsperyear'] = 0
            data['commitspermonth'] = 0
            data['commitsperday'] = 0
            data['commitsperhour'] = 0

        if revisions:
            avgsize = sum(len(msg) for _, _, _, msg, _ in revisions) / float(
                len(revisions))
            avgchanges = float(len(changes)) / len(revisions)
            data['logentry'] = '%d chars' % avgsize
            data['changes'] = '%.2f' % avgchanges
        else:
            data['logentry'] = 'N/A'
            data['changes'] = 'N/A'

        if self.db_type == 'sqlite':
            strftime = "strftime('%%Y-%%W', %s, 'unixepoch')" % SECONDS
        elif self.db_type == 'mysql':
            strftime = "date_format(from_unixtime(%s), '%%Y-%%u')" % SECONDS
        elif self.db_type == 'postgres':
            strftime = "to_char(to_timestamp(%s), 'YYYY-IW')" % SECONDS  # FIXME: Not %Y-%W
        else:
            assert False

        now = time.time()
        start = now - (52 * 7 * 24 * 60 * 60)
        d = {}
        for _, t, author, _, _ in revisions:
            if t > start:
                week = time.strftime('%Y-%W', time.localtime(t))
                d.setdefault(author, {}).setdefault(week, 0)
                d[author][week] += 1
        stats = []
        for i, author in enumerate(
                sorted(set(author for _, _, author, _, _ in revisions))):
            commits = len(set(x[0] for x in revisions if x[2] == author))
            mintime = min(x[1] for x in revisions if x[2] == author)
            maxtime = max(x[1] for x in revisions if x[2] == author)
            if maxtime > mintime:
                rate = commits * 24.0 * 60 * 60 / float(maxtime - mintime)
            else:
                rate = 0
            change = sum(1 for x in changes if x[4] == author)
            paths = len(set(x[2] for x in changes if x[4] == author))

            year, week = map(int, time.strftime('%Y %W').split())
            weeks = []
            while len(weeks) < 52:
                name = '%04d-%02d' % (year, week)
                try:
                    total = d[author][name]
                except KeyError:
                    total = 0
                weeks.append({'week': name, 'total': total})
                week -= 1
                if week < 0:
                    year -= 1
                    week = 52
            stats.append({
                'id': i,
                'name': author,
                'url': req.href.stats("code", author=author),
                'commits': commits,
                'rate': '%.2f' % (rate and float(rate) or 0),
                'changes': change,
                'paths': paths,
                'weeks': list(reversed(weeks)),
            })
        data['byauthors'] = stats

        stats = []
        for rev, t, author, msg, repos in reversed(revisions[-10:]):
            reponame = repositories.get(repos, '')
            stats.append({
                'name': msg,
                'author': author,
                'rev': rev,
                'url': req.href.changeset(rev, reponame),
                'url2': req.href.stats("code", author=author),
                'time': pretty_timedelta(to_datetime(float(t))),
            })
        data['recent'] = stats

        times = dict((rev, t) for rev, t, _, _, _ in revisions)

        stats = []
        if not req.args.get('author', ''):
            d = {}
            total = set()
            for rev, _, path, change_type, _ in sorted(changes,
                                                       key=lambda x:
                                                       (times[x[0]], x[1])):
                if change_type in ('A', 'C'):
                    total.add(path)
                elif change_type == 'D' and path in total:
                    total.remove(path)
                d[int(times[rev] * 1000)] = len(total)
            stats = []
            steps = max(len(d) / 50, 1)
            for k, v in sorted(d.iteritems(), key=itemgetter(0))[::steps]:
                stats.append({
                    'x': k,
                    'y': v,
                })
        data['totalfiles'] = stats

        d = {}
        total = 0
        for _, t, _, _, _ in sorted(revisions, key=lambda x: x[1]):
            total += 1
            d[int(t * 1000)] = total
        stats = []
        steps = max(len(d) / 50, 1)
        for k, v in sorted(d.iteritems(), key=itemgetter(0))[::steps]:
            stats.append({
                'x': k,
                'y': v,
            })
        data['totalcommits'] = stats

        times = dict((rev, t) for rev, t, _, _, _ in revisions)
        d = {}
        total = 0
        for rev, _, _, _, _ in sorted(changes, key=lambda x: times[x[0]]):
            total += 1
            d[int(times[rev] * 1000)] = total
        stats = []
        steps = max(len(d) / 50, 1)
        for k, v in sorted(d.iteritems(), key=itemgetter(0))[::steps]:
            stats.append({
                'x': k,
                'y': v,
            })
        data['totalchanges'] = stats

        d = {}
        for _, repos, path, _, _ in changes:
            path = path[len(root):]
            path = (repos, path)
            try:
                d[path] += 1
            except KeyError:
                d[path] = 1
        total = float(sum(d.itervalues()))
        stats = []
        for k, v in sorted(d.iteritems(), key=itemgetter(1),
                           reverse=True)[:10]:
            repos, path = k
            reponame = repositories.get(repos, '')
            if reponame:
                path = reponame + ':' + path
            stats.append({
                'name': path,
                'url': req.href.log(reponame, root + k[1]),
                'count': v,
                'percent': '%.2f' % (100 * v / total)
            })
        data['byfiles'] = stats

        d = {}
        for _, _, _, change_type, author in changes:
            try:
                d[author][change_type] += 1
            except KeyError:
                d[author] = {'A': 0, 'E': 0, 'M': 0, 'C': 0, 'D': 0}
                d[author][change_type] += 1
        stats = []
        for k, v in sorted(d.iteritems()):
            total = sum(v.itervalues())
            adds = int(100.0 * v['A'] / total)
            copies = int(100.0 * v['C'] / total)
            deletes = int(100.0 * v['D'] / total)
            moves = int(100.0 * v['M'] / total)
            edits = int(100.0 * v['E'] / total)
            edits = 100 - (adds + copies + deletes + moves)
            stats.append({
                'name': k,
                'url': req.href.stats("code", author=k),
                'adds': adds,
                'copies': copies,
                'deletes': deletes,
                'edits': edits,
                'moves': moves
            })
        data['bychangetypes'] = stats

        d = {}
        for _, repos, path, _, _ in changes:
            path = path[len(root):]
            slash = path.rfind('/')
            if slash > 0:
                path = path[:slash]
            path = (repos, path)
            try:
                d[path] += 1
            except KeyError:
                d[path] = 1
        total = float(sum(d.itervalues()))
        stats = []
        for k, v in sorted(d.iteritems(), key=itemgetter(1),
                           reverse=True)[:10]:
            repos, path = k
            reponame = repositories.get(repos, '')
            if reponame:
                path = reponame + ':' + path
            stats.append({
                'name': path,
                'url': req.href.log(reponame, root + k[1]),
                'count': v,
                'percent': '%.2f' % (100 * v / total)
            })
        data['bypaths'] = stats

        d = {}
        for _, _, path, _, _ in changes:
            path = path[len(root):]
            slash = path.rfind('/')
            if slash > 0:
                path = path[slash + 1:]
            dot = path.rfind('.')
            if dot > 0:
                ext = path[dot:]
                try:
                    d[ext] += 1
                except KeyError:
                    d[ext] = 1
        total = float(sum(d.itervalues()))
        stats = []
        for k, v in sorted(d.iteritems(), key=itemgetter(1),
                           reverse=True)[:10]:
            stats.append({
                'name': k,
                'count': v,
                'percent': '%.2f' % (100 * v / total)
            })
        data['byfiletypes'] = stats

        d = {}
        for rev, repos, path, _, _ in changes:
            path = path[len(root):]
            slash = path.find('/')
            if slash < 0:
                continue
            project = (repos, path[:slash] or 'None')
            try:
                d[project][0] += 1
                d[project][1].add(rev)
                d[project][2].add(path)
            except KeyError:
                d[project] = [1, set([rev]), set([path])]
        stats = []
        for k, v in sorted(d.iteritems(),
                           key=lambda x: len(x[0][1]),
                           reverse=True):
            repos, project = k
            reponame = repositories.get(repos, '')
            if reponame:
                project = reponame + ':' + project
            stats.append({
                'name': project,
                'url': req.href.browser(reponame, root + k[1]),
                'changes': v[0],
                'commits': len(v[1]),
                'paths': len(v[2]),
            })
        data['byproject'] = stats

        hours = ['0%d:00' % i for i in range(10)]
        hours += ['%d:00' % i for i in range(10, 24)]
        hours = dict((hour, i) for i, hour in enumerate(hours))
        d = dict((i, 0) for i in range(24))
        for rev, t, author, _, _ in revisions:
            hour = time.strftime('%H:00', time.localtime(t))
            d[hours[hour]] += 1
        stats = []
        for x, y in sorted(d.iteritems()):
            stats.append({
                'x': x,
                'y': y,
            })
        data['byhour'] = stats

        d = dict((str(i), 0) for i in range(7))
        for rev, t, author, _, _ in revisions:
            day = time.strftime('%w', time.localtime(t))
            d[day] += 1
        stats = []
        for x, y in sorted(d.iteritems()):
            stats.append({
                'x': x,
                'y': y,
            })
        data['byday'] = stats

        d = {}
        for _, t, _, _, _ in revisions:
            t = time.localtime(t)
            t = (t[0], t[1], 0, 0, 0, 0, 0, 0, 0)
            t = time.mktime(t)
            try:
                d[t] += 1
            except KeyError:
                d[t] = 1
        if d:
            mintime = min(d.keys())
            maxtime = max(d.keys())
            t = time.localtime(mintime)
            while mintime < maxtime:
                t = (t[0], t[1] + 1, 0, 0, 0, 0, 0, 0, 0)
                mintime = time.mktime(t)
                if mintime not in d:
                    d[mintime] = 0
        stats = []
        for k, v in sorted(d.iteritems()):
            stats.append({'x': int(k * 1000), 'y': v})
        data['bymonth'] = stats

        cursor.execute("select distinct(author) from revision")
        authors = set(s for s, in cursor.fetchall())

        projects = set(p[:p.find('/')] for _, _, p, _, _ in changes
                       if p.find('/') != -1)

        ignore = set(stopwords)
        ignore.update(authors)
        ignore.update(projects)

        delete = dict(
            (ord(k), u' ') for k in '.,;:!?-+/\\()<>{}[]=_~`|0123456789*')
        delete.update(dict((ord(k), None) for k in '\"\''))

        d = {}
        for _, _, _, msg, _ in revisions:
            msg = msg.lower()
            msg = msg.translate(delete)
            for word in msg.split():
                if word not in ignore and len(word) > 1:
                    try:
                        d[word] += 1
                    except KeyError:
                        d[word] = 1
        fonts = ['0.8em', '1.0em', '1.25em', '1.5em', '1.75em', '2.0em']
        items = sorted(d.iteritems(), key=itemgetter(1), reverse=True)[:200]
        min_count = items and min(map(itemgetter(1), items)) or 0
        max_count = items and max(map(itemgetter(1), items)) or 0
        stats = []
        for k, v in sorted(items):
            weight = (log(v) - log(min_count)) / max(
                log(max_count) - log(min_count), 1)
            index = int(floor(weight * len(fonts)))
            index = min(index, len(fonts) - 1)
            stats.append({
                'word':
                k,
                'url':
                req.href.search(q=k, noquickjump=1, changeset="on"),
                'size':
                fonts[index]
            })
        data['cloud'] = stats

        return 'code.html', data, None
Exemple #52
0
 def dateinfo(date):
     return self.get_timeline_link(req,
                                   date,
                                   pretty_timedelta(date),
                                   precision='second')
Exemple #53
0
    def _insert_ticket_data(self, req, db, ticket, reporter_id):
        """Insert ticket data into the hdf"""
        replyto = req.args.get('replyto')
        req.hdf['title'] = '#%d (%s)' % (ticket.id, ticket['summary'])
        req.hdf['ticket'] = ticket.values
        req.hdf['ticket'] = {
            'id': ticket.id,
            'href': req.href.ticket(ticket.id),
            'replyto': replyto
        }

        # -- Ticket fields

        for field in TicketSystem(self.env).get_ticket_fields():
            if field['type'] in ('radio', 'select'):
                value = ticket.values.get(field['name'])
                options = field['options']
                if field['name'] == 'milestone' \
                    and not req.perm.has_permission('TICKET_ADMIN'):
                    options = [
                        opt for opt in options
                        if not Milestone(self.env, opt, db=db).is_completed
                    ]
                if value and not value in options:
                    # Current ticket value must be visible even if its not in the
                    # possible values
                    options.append(value)
                field['options'] = options
            name = field['name']
            del field['name']
            if name in ('summary', 'reporter', 'description', 'type', 'status',
                        'resolution', 'owner'):
                field['skip'] = True
            req.hdf['ticket.fields.' + name] = field

        req.hdf['ticket.reporter_id'] = reporter_id
        req.hdf['ticket.description.formatted'] = wiki_to_html(
            ticket['description'], self.env, req, db)

        req.hdf['ticket.opened'] = format_datetime(ticket.time_created)
        req.hdf['ticket.opened_delta'] = pretty_timedelta(ticket.time_created)
        if ticket.time_changed != ticket.time_created:
            req.hdf['ticket'] = {
                'lastmod': format_datetime(ticket.time_changed),
                'lastmod_delta': pretty_timedelta(ticket.time_changed)
            }

        # -- Ticket Change History

        def quote_original(author, original, link):
            if not 'comment' in req.args:  # i.e. the comment was not yet edited
                req.hdf['ticket.comment'] = '\n'.join(
                    [u'En réponse à [%s %s]:' % (link, author)] +
                    [u'> %s' % line for line in original.splitlines()] + [''])

        if replyto == 'description':
            quote_original(ticket['reporter'], ticket['description'],
                           'ticket:%d' % ticket.id)
        replies = {}
        changes = []
        cnum = 0
        description_lastmod = description_author = None
        for change in self.grouped_changelog_entries(ticket, db):
            changes.append(change)
            # wikify comment
            comment = ''
            if 'comment' in change:
                comment = change['comment']
                change['comment'] = wiki_to_html(comment, self.env, req, db)
            if change['permanent']:
                cnum = change['cnum']
                # keep track of replies threading
                if 'replyto' in change:
                    replies.setdefault(change['replyto'], []).append(cnum)
                # eventually cite the replied to comment
                if replyto == str(cnum):
                    quote_original(change['author'], comment,
                                   'comment:%s' % replyto)
            if 'description' in change['fields']:
                change['fields']['description'] = ''
                description_lastmod = change['date']
                description_author = change['author']

        req.hdf['ticket'] = {
            'changes': changes,
            'replies': replies,
            'cnum': cnum + 1
        }
        if description_lastmod:
            req.hdf['ticket.description'] = {
                'lastmod': description_lastmod,
                'author': description_author
            }

        # -- Ticket Attachments

        req.hdf['ticket.attachments'] = attachments_to_hdf(
            self.env, req, db, 'ticket', ticket.id)
        if req.perm.has_permission('TICKET_APPEND'):
            req.hdf['ticket.attach_href'] = req.href.attachment(
                'ticket', ticket.id)

        # Add the possible actions to hdf
        actions = TicketSystem(self.env).get_available_actions(
            ticket, req.perm)
        for action in actions:
            req.hdf['ticket.actions.' + action] = '1'
Exemple #54
0
    def _render_html(self, req, repos, chgset, restricted, message,
                     diff, diff_options):
        """HTML version"""
        req.hdf['changeset'] = {
            'chgset': chgset and True,
            'restricted': restricted,
            'href': {
                'new_rev': req.href.changeset(diff.new_rev),
                'old_rev': req.href.changeset(diff.old_rev),
                'new_path': req.href.browser(diff.new_path, rev=diff.new_rev),
                'old_path': req.href.browser(diff.old_path, rev=diff.old_rev)
            }
        }

        if chgset: # Changeset Mode (possibly restricted on a path)
            path, rev = diff.new_path, diff.new_rev

            # -- getting the change summary from the Changeset.get_changes
            def get_changes():
                for npath, kind, change, opath, orev in chgset.get_changes():
                    old_node = new_node = None
                    if (restricted and
                        not (npath == path or                # same path
                             npath.startswith(path + '/') or # npath is below
                             path.startswith(npath + '/'))): # npath is above
                        continue
                    if change != Changeset.ADD:
                        old_node = repos.get_node(opath, orev)
                    if change != Changeset.DELETE:
                        new_node = repos.get_node(npath, rev)
                    yield old_node, new_node, kind, change

            def _changeset_title(rev):
                if restricted:
                    return 'Changeset %s for %s' % (rev, path)
                else:
                    return u'변경사항 %s' % rev

            title = _changeset_title(rev)
            properties = []
            for name, value, wikiflag, htmlclass in chgset.get_properties():
                if wikiflag:
                    value = wiki_to_html(value or '', self.env, req)
                properties.append({'name': name, 'value': value,
                                   'htmlclass': htmlclass})

            req.hdf['changeset'] = {
                'revision': chgset.rev,
                'time': format_datetime(chgset.date),
                'age': pretty_timedelta(chgset.date, None, 3600),
                'author': chgset.author or 'anonymous',
                'message': message, 'properties': properties
            }
            oldest_rev = repos.oldest_rev
            if chgset.rev != oldest_rev:
                if restricted:
                    prev = repos.get_node(path, rev).get_previous()
                    if prev:
                        prev_path, prev_rev = prev[:2]
                        if prev_rev:
                            prev_href = req.href.changeset(prev_rev, prev_path)
                    else:
                        prev_path = prev_rev = None
                else:
                    add_link(req, 'first', req.href.changeset(oldest_rev),
                             'Changeset %s' % oldest_rev)
                    prev_path = diff.old_path
                    prev_rev = repos.previous_rev(chgset.rev)
                    if prev_rev:
                        prev_href = req.href.changeset(prev_rev)
                if prev_rev:
                    add_link(req, 'prev', prev_href, _changeset_title(prev_rev))
            youngest_rev = repos.youngest_rev
            if str(chgset.rev) != str(youngest_rev):
                if restricted:
                    next_rev = repos.next_rev(chgset.rev, path)
                    if next_rev:
                        if repos.has_node(path, next_rev):
                            next_href = req.href.changeset(next_rev, path)
                        else: # must be a 'D'elete or 'R'ename, show full cset
                            next_href = req.href.changeset(next_rev)
                else:
                    add_link(req, 'last', req.href.changeset(youngest_rev),
                             'Changeset %s' % youngest_rev)
                    next_rev = repos.next_rev(chgset.rev)
                    if next_rev:
                        next_href = req.href.changeset(next_rev)
                if next_rev:
                    add_link(req, 'next', next_href, _changeset_title(next_rev))

        else: # Diff Mode
            # -- getting the change summary from the Repository.get_changes
            def get_changes():
                for d in repos.get_changes(**diff):
                    yield d

            reverse_href = req.href.changeset(diff.old_rev, diff.old_path,
                                                   old=diff.new_rev,
                                                   old_path=diff.new_path)
            req.hdf['changeset.reverse_href'] = reverse_href
            req.hdf['changeset.href.log'] = req.href.log(
                diff.new_path, rev=diff.new_rev, stop_rev=diff.old_rev)
            title = self.title_for_diff(diff)
        req.hdf['title'] = title

        if not req.perm.has_permission('BROWSER_VIEW'):
            return

        def _change_info(old_node, new_node, change):
            info = {'change': change}
            if old_node:
                info['path.old'] = old_node.path
                info['rev.old'] = old_node.rev
                info['shortrev.old'] = repos.short_rev(old_node.rev)
                old_href = req.href.browser(old_node.created_path,
                                            rev=old_node.created_rev)
                # Reminder: old_node.path may not exist at old_node.rev
                #           as long as old_node.rev==old_node.created_rev
                #           ... and diff.old_rev may have nothing to do
                #           with _that_ node specific history...
                info['browser_href.old'] = old_href
            if new_node:
                info['path.new'] = new_node.path
                info['rev.new'] = new_node.rev # created rev.
                info['shortrev.new'] = repos.short_rev(new_node.rev)
                new_href = req.href.browser(new_node.created_path,
                                            rev=new_node.created_rev)
                # (same remark as above)
                info['browser_href.new'] = new_href
            return info

        hidden_properties = self.config.getlist('browser', 'hide_properties')

        def _prop_changes(old_node, new_node):
            old_props = old_node.get_properties()
            new_props = new_node.get_properties()
            changed_props = {}
            if old_props != new_props:
                for k,v in old_props.items():
                    if not k in new_props:
                        changed_props[k] = {
                            'old': render_node_property(self.env, k, v)}
                    elif v != new_props[k]:
                        changed_props[k] = {
                            'old': render_node_property(self.env, k, v),
                            'new': render_node_property(self.env, k,
                                                        new_props[k])}
                for k,v in new_props.items():
                    if not k in old_props:
                        changed_props[k] = {
                            'new': render_node_property(self.env, k, v)}
                for k in hidden_properties:
                    if k in changed_props:
                        del changed_props[k]
            changed_properties = []
            for name, props in changed_props.iteritems():
                props.update({'name': name})
                changed_properties.append(props)
            return changed_properties

        def _estimate_changes(old_node, new_node):
            old_size = old_node.get_content_length()
            new_size = new_node.get_content_length()
            return old_size + new_size

        def _content_changes(old_node, new_node):
            """Returns the list of differences.

            The list is empty when no differences between comparable files
            are detected, but the return value is None for non-comparable files.
            """
            old_content = old_node.get_content().read()
            if is_binary(old_content):
                return None

            new_content = new_node.get_content().read()
            if is_binary(new_content):
                return None

            mview = Mimeview(self.env)
            old_content = mview.to_unicode(old_content, old_node.content_type)
            new_content = mview.to_unicode(new_content, new_node.content_type)

            if old_content != new_content:
                context = 3
                options = diff_options[1]
                for option in options:
                    if option.startswith('-U'):
                        context = int(option[2:])
                        break
                if context < 0:
                    context = None
                tabwidth = self.config['diff'].getint('tab_width') or \
                           self.config['mimeviewer'].getint('tab_width', 8)
                return hdf_diff(old_content.splitlines(),
                                new_content.splitlines(),
                                context, tabwidth,
                                ignore_blank_lines='-B' in options,
                                ignore_case='-i' in options,
                                ignore_space_changes='-b' in options)
            else:
                return []

        if req.perm.has_permission('FILE_VIEW'):
            diff_bytes = diff_files = 0
            if self.max_diff_bytes or self.max_diff_files:
                for old_node, new_node, kind, change in get_changes():
                    if change in Changeset.DIFF_CHANGES and kind == Node.FILE:
                        diff_files += 1
                        diff_bytes += _estimate_changes(old_node, new_node)
            show_diffs = (not self.max_diff_files or \
                          diff_files <= self.max_diff_files) and \
                         (not self.max_diff_bytes or \
                          diff_bytes <= self.max_diff_bytes or \
                          diff_files == 1)
        else:
            show_diffs = False

        idx = 0
        for old_node, new_node, kind, change in get_changes():
            show_entry = change != Changeset.EDIT
            if change in Changeset.DIFF_CHANGES and \
                   req.perm.has_permission('FILE_VIEW'):
                assert old_node and new_node
                props = _prop_changes(old_node, new_node)
                if props:
                    req.hdf['changeset.changes.%d.props' % idx] = props
                    show_entry = True
                if kind == Node.FILE and show_diffs:
                    diffs = _content_changes(old_node, new_node)
                    if diffs != []:
                        if diffs:
                            req.hdf['changeset.changes.%d.diff' % idx] = diffs
                        # elif None (means: manually compare to (previous))
                        show_entry = True
            if show_entry or not show_diffs:
                info = _change_info(old_node, new_node, change)
                if change in Changeset.DIFF_CHANGES and not show_diffs:
                    if chgset:
                        diff_href = req.href.changeset(new_node.rev,
                                                       new_node.path)
                    else:
                        diff_href = req.href.changeset(
                            new_node.created_rev, new_node.created_path,
                            old=old_node.created_rev,
                            old_path=old_node.created_path)
                    info['diff_href'] = diff_href
                req.hdf['changeset.changes.%d' % idx] = info
            idx += 1 # the sequence should be immutable
Exemple #55
0
    def _save_attachement(self, req, attachment):
        from trac.web import RequestDone
        from trac.attachment import AttachmentModule, InvalidAttachment
        from trac.resource import get_resource_url
        from trac.timeline.web_ui import TimelineModule
        import os
        import unicodedata
        from trac.util.datefmt import pretty_timedelta

        response = None
        try:
            upload = req.args["attachment"]
            if not hasattr(upload, "filename") or not upload.filename:
                raise TracError(_("No file uploaded"))
            if hasattr(upload.file, "fileno"):
                size = os.fstat(upload.file.fileno())[6]
            else:
                upload.file.seek(0, 2)  # seek to end of file
                size = upload.file.tell()
                upload.file.seek(0)
            if size == 0:
                raise TracError(_("Can't upload empty file"))

            # Maximum attachment size (in bytes)
            max_size = AttachmentModule(self.env).max_size
            if max_size >= 0 and size > max_size:
                raise TracError(_("Maximum attachment size: %(num)s bytes", num=max_size), _("Upload failed"))

            # We try to normalize the filename to unicode NFC if we can.
            # Files uploaded from OS X might be in NFD.
            filename = unicodedata.normalize("NFC", unicode(upload.filename, "utf-8"))
            filename = filename.replace("\\", "/").replace(":", "/")
            filename = os.path.basename(filename)
            if not filename:
                raise TracError(_("No file uploaded"))
            # Now the filename is known, update the attachment resource
            # attachment.filename = filename
            attachment.description = req.args.get("description", "")
            attachment.author = get_reporter_id(req, "author")
            attachment.ipnr = req.remote_addr

            # Validate attachment
            for manipulator in AttachmentModule(self.env).manipulators:
                for field, message in manipulator.validate_attachment(req, attachment):
                    if field:
                        raise InvalidAttachment(
                            _("Attachment field %(field)s is " "invalid: %(message)s", field=field, message=message)
                        )
                    else:
                        raise InvalidAttachment(_("Invalid attachment: %(message)s", message=message))

            if req.args.get("replace"):
                try:
                    old_attachment = Attachment(self.env, attachment.resource(id=filename))
                    if not (old_attachment.author and req.authname and old_attachment.author == req.authname):
                        req.perm(attachment.resource).require("ATTACHMENT_DELETE")
                    if not attachment.description.strip() and old_attachment.description:
                        attachment.description = old_attachment.description
                    old_attachment.delete()
                except TracError:
                    pass  # don't worry if there's nothing to replace
                attachment.filename = None
            attachment.insert(filename, upload.file, size)
            timeline = TimelineModule(self.env).get_timeline_link(
                req, attachment.date, pretty_timedelta(attachment.date), precision="second"
            )
            response = {
                "attachment": {
                    "href": get_resource_url(self.env, attachment.resource, req.href),
                    "realm": attachment.resource.parent.realm,
                    "objid": attachment.resource.parent.id,
                    "filename": filename,
                    "size": size,
                    "author": attachment.author,
                    "description": attachment.description,
                    "timeline": timeline.generate().render().replace("<", "&lt;").replace(">", "&gt;"),
                }
            }
        except (TracError, InvalidAttachment), e:
            response = {"error": e.message}
Exemple #56
0
    def _process_tickets(self, req, cursor, where, since, data):

        cursor.execute(
            """
        select
            min(%s),
            max(%s),
            count(*),
            count(distinct reporter) """
            % (SECONDS, SECONDS)
            + """
        from ticket """
            + where.replace("author", "reporter")
        )
        mintime, maxtime, tickets, reporters = cursor.fetchall()[0]

        data["reporters"] = reporters
        if maxtime:
            data["maxtime"] = time.strftime("%a %m/%d/%Y %H:%M:%S %Z", time.localtime(maxtime))
        else:
            data["maxtime"] = "N/A"
        if mintime:
            data["mintime"] = time.strftime("%a %m/%d/%Y %H:%M:%S %Z", time.localtime(mintime))
        else:
            data["mintime"] = "N/A"

        if mintime and maxtime:
            age = float(maxtime - mintime)
        else:
            age = 0
        td = datetime.timedelta(seconds=age)
        years = td.days // 365
        days = td.days % 365
        hours = td.seconds // 3600
        data["age"] = "%d years, %d days, %d hours" % (years, days, hours)

        data["total"] = tickets
        if age:
            data["peryear"] = "%.2f" % (tickets * 365 * 24 * 60 * 60.0 / age)
            data["permonth"] = "%.2f" % (tickets * 30 * 24 * 60 * 60.0 / age)
            data["perday"] = "%.2f" % (tickets * 24 * 60 * 60.0 / age)
            data["perhour"] = "%.2f" % (tickets * 60 * 60.0 / age)
        else:
            data["peryear"] = 0
            data["permonth"] = 0
            data["perday"] = 0
            data["perhour"] = 0

        cursor.execute(
            """\
        select author, sum(reports), sum(changes)
        from (select reporter as author, count(*) as reports, 0 as changes
              from ticket """
            + where.replace("author", "reporter")
            + """
              group by 1
              union
              select author, 0 as reports, count(*) as changes
              from ticket_change """
            + where
            + """
              group by 1
              ) as data
        group by 1 order by 2 desc
        """
        )
        rows = cursor.fetchall()
        d = dict((path, (int(x), int(y))) for path, x, y in rows)
        stats = []
        for k, v in sorted(d.items(), key=itemgetter(1), reverse=True):
            stats.append({"name": k, "url": req.href.stats("tickets", author=k), "reports": v[0], "changes": v[1]})
        data["byauthor"] = stats

        cursor.execute(
            """\
        select t.component, count(distinct t.id), count(distinct open.id)
        from ticket t
        join ticket open using (component)
        where (open.resolution is null or length(open.resolution) = 0) """
            + where.replace("where", "and").replace("time", "t.time").replace("author", "t.reporter")
            + """
        group by 1 order by 2 desc
        """
        )
        rows = cursor.fetchall()
        stats = []
        for component, total, open in rows:
            stats.append(
                {
                    "name": component,
                    "url": req.href.query(status=("new", "opened", "resolved"), component=component, order="priority"),
                    "open": open,
                    "total": total,
                }
            )
        data["bycomponent"] = stats

        cursor.execute(
            """\
        select t.milestone, count(distinct t.id), count(distinct open.id)
        from ticket t
        join ticket open using (milestone)
        where (open.resolution is null or length(open.resolution) = 0) """
            + where.replace("where", "and").replace("time", "t.time").replace("author", "t.reporter")
            + """
        group by 1 order by 2 desc
        """
        )
        rows = cursor.fetchall()
        stats = []
        for milestone, total, open in rows:
            stats.append(
                {
                    "name": milestone,
                    "url": req.href.query(status=("new", "opened", "resolved"), milestone=milestone, order="priority"),
                    "open": open,
                    "total": total,
                }
            )
        data["bymilestone"] = stats

        stats = []
        if not req.args.get("author", ""):
            __where = where.replace("where %s > %s" % (SECONDS, since), "")
            __where = __where.replace("and %s > %s" % (SECONDS, since), "")
            cursor.execute(
                """\
            select id, %s, 'none' as oldvalue, 'new' as newvalue
            from ticket """
                % SECONDS
                + __where
                + """
            union
            select ticket, %s, oldvalue, newvalue
            from ticket_change where field = 'status' """
                % SECONDS
                + __where.replace("where", "and")
            )
            rows = cursor.fetchall()
            d = {}
            opened = 0
            accepted = 0
            for ticket, t, oldvalue, newvalue in sorted(rows, key=itemgetter(1)):
                if newvalue == "accepted" and oldvalue != "accepted":
                    accepted += 1
                elif newvalue != "accepted" and oldvalue == "accepted":
                    accepted -= 1
                if newvalue in ("new", "reopened") and oldvalue not in ("new", "reopened"):
                    opened += 1
                elif newvalue == "closed" and oldvalue != "closed":
                    opened -= 1
                d[int(t)] = (opened, accepted)
            steps = max(len(d) / 250, 1)
            for k, v in sorted(d.iteritems(), key=itemgetter(0))[::steps]:
                if k > since:
                    stats.append({"x": k * 1000, "opened": v[0], "accepted": v[1]})
        data["history"] = stats

        cursor.execute(
            """\
        select tc.ticket, t.component, t.summary, count(*)
        from ticket_change tc
        join ticket t on t.id = tc.ticket """
            + where.replace("time", "tc.time")
            + """
        group by 1, 2, 3
        order by 3 desc
        limit 10
        """
        )
        rows = cursor.fetchall()
        total = float(sum(int(v) for _, _, _, v in rows))
        stats = []
        for ticket, component, summary, v in rows:
            stats.append(
                {
                    "name": summary,
                    "id": ticket,
                    "component": component,
                    "url": req.href.ticket(ticket),
                    "url2": req.href.query(component=component, order="priority"),
                    "count": int(v),
                    "percent": "%.2f" % (100 * int(v) / total),
                }
            )
        data["active"] = stats

        cursor.execute(
            """
        select id, component, summary, %s
        from ticket
        where status != 'closed' """
            % SECONDS
            + where.replace("where", "and").replace("author", "reporter")
            + """
        order by 4 asc
        limit 10
        """
        )
        rows = cursor.fetchall()
        stats = []
        for ticket, component, summary, t in rows:
            stats.append(
                {
                    "name": summary,
                    "id": ticket,
                    "component": component,
                    "url": req.href.ticket(ticket),
                    "url2": req.href.query(component=component, order="priority"),
                    "time": pretty_timedelta(to_datetime(float(t))),
                }
            )
        data["oldest"] = stats

        cursor.execute(
            """
        select id, component, summary, %s
        from ticket """
            % SECONDS
            + where.replace("author", "reporter")
            + """
        order by 4 desc
        limit 10
        """
        )
        rows = cursor.fetchall()
        stats = []
        for ticket, component, summary, t in rows:
            stats.append(
                {
                    "name": summary,
                    "id": ticket,
                    "component": component,
                    "url": req.href.ticket(ticket),
                    "url2": req.href.query(component=component, order="priority"),
                    "time": pretty_timedelta(to_datetime(float(t))),
                }
            )
        data["newest"] = stats

        cursor.execute(
            """
        select tc.ticket, t.component, t.summary, tc.%s
        from ticket_change tc
        join ticket t on t.id = tc.ticket """
            % SECONDS
            + where.replace("where", "and").replace("time", "tc.time")
            + """
        order by 4 desc
        limit 10
        """
        )
        rows = cursor.fetchall()
        stats = []
        for ticket, component, summary, t in rows:
            stats.append(
                {
                    "name": summary,
                    "id": ticket,
                    "component": component,
                    "url": req.href.ticket(ticket),
                    "url2": req.href.query(component=component, order="priority"),
                    "time": pretty_timedelta(to_datetime(float(t))),
                }
            )

        data["recent"] = stats

        return "tickets.html", data, None
Exemple #57
0
 def _format_reminder_text(self, ticket, id, author, origin, description):
     return "Ticket reminder added by %s %s ago (%s)%s" % (
         author, pretty_timedelta(origin), format_datetime(origin),
         ":\n%s" % (description, ) if description else ".")
Exemple #58
0
    def _process_wiki(self, req, cursor, where, since, data):

        cursor.execute(
            """
        select min(%s),
               max(%s),
               count(*),
               count(distinct author) """
            % (SECONDS, SECONDS)
            + """
        from wiki """
            + where
        )
        mintime, maxtime, edits, editors = cursor.fetchall()[0]

        data["editors"] = editors
        if maxtime:
            data["maxtime"] = time.strftime("%a %m/%d/%Y %H:%M:%S %Z", time.localtime(maxtime))
        else:
            data["maxtime"] = "N/A"
        if mintime:
            data["mintime"] = time.strftime("%a %m/%d/%Y %H:%M:%S %Z", time.localtime(mintime))
        else:
            data["mintime"] = "N/A"

        if mintime and maxtime:
            age = float(maxtime - mintime)
        else:
            age = 0
        td = datetime.timedelta(seconds=age)
        years = td.days // 365
        days = td.days % 365
        hours = td.seconds // 3600
        data["age"] = "%d years, %d days, %d hours" % (years, days, hours)

        data["edits"] = edits
        if age:
            data["peryear"] = "%.2f" % (edits * 365 * 24 * 60 * 60.0 / age)
            data["permonth"] = "%.2f" % (edits * 30 * 24 * 60 * 60.0 / age)
            data["perday"] = "%.2f" % (edits * 24 * 60 * 60.0 / age)
            data["perhour"] = "%.2f" % (edits * 60 * 60.0 / age)
        else:
            data["peryear"] = 0
            data["permonth"] = 0
            data["perday"] = 0
            data["perhour"] = 0

        cursor.execute("select name, author, count(*) from wiki " + where + " group by 1, 2")
        pages = cursor.fetchall()

        d = {}
        for name, author, count in pages:
            try:
                d[author][0] += count
                d[author][1].add(name)
            except KeyError:
                d[author] = [count, set([name])]
        total = float(sum(x[0] for x in d.values()))
        stats = []
        for k, v in sorted(d.items(), key=itemgetter(1), reverse=True):
            stats.append(
                {
                    "name": k,
                    "url": req.href.stats("wiki", author=k),
                    "count": v[0],
                    "pages": len(v[1]),
                    "percent": "%.2f" % (100 * v[0] / total),
                }
            )
        data["byauthor"] = stats

        __where = where.replace("where %s > %s" % (SECONDS, since), "")
        __where = __where.replace("and %s > %s" % (SECONDS, since), "")
        cursor.execute(
            """
        select name, %s """
            % SECONDS
            + """
        from wiki """
            + __where
            + """
        order by 2 asc
        """
        )
        history = cursor.fetchall()

        stats = []
        if not req.args.get("author", ""):
            d = {}
            total = set()
            for name, t in history:
                total.add(name)
                d[int(t)] = len(total)
            stats = []
            steps = max(len(d) / 250, 1)
            for k, v in sorted(d.iteritems(), key=itemgetter(0))[::steps]:
                if k > since:
                    stats.append({"x": k * 1000, "y": v})
        data["history"] = stats

        d = {}
        for name, _, count in pages:
            try:
                d[name] += count
            except KeyError:
                d[name] = count
        total = float(sum(d.values()))
        stats = []
        for k, v in sorted(d.items(), key=itemgetter(1), reverse=True)[:10]:
            stats.append({"name": k, "url": req.href.wiki(k), "count": v, "percent": "%.2f" % (100 * v / total)})
        data["pages"] = stats

        cursor.execute(
            """
        select name, version, length(text)
        from wiki """
            + where
            + """
        group by 1, 2, 3
        having version = max(version)
        order by 3 desc
        limit 10
        """
        )
        rows = cursor.fetchall()
        d = dict((name, int(size)) for name, _, size in rows)
        stats = []
        for k, v in sorted(d.items(), key=itemgetter(1), reverse=True):
            stats.append({"name": k, "url": req.href.wiki(k), "size": v})
        data["largest"] = stats

        cursor.execute(
            """
        select name, version, author, %s """
            % SECONDS
            + """
        from wiki """
            + where
            + """
        order by 4 desc
        limit 10
        """
        )
        rows = cursor.fetchall()
        stats = []
        for name, version, author, t in rows:
            stats.append(
                {
                    "name": name,
                    "author": author,
                    "url": req.href.wiki(name, version=version),
                    "url2": req.href.stats("wiki", author=author),
                    "time": pretty_timedelta(to_datetime(float(t))),
                }
            )

        data["recent"] = stats

        return "wiki.html", data, None
Exemple #59
0
    def get_list(self, realm, wl, req, fields=None):
        db = self.env.get_db_cnx()
        cursor = db.cursor()
        context = Context.from_request(req)
        locale = getattr(req, 'locale', LC_TIME)

        ticketlist = []
        extradict = {}
        if not fields:
            fields = set(self.default_fields['ticket'])
        else:
            fields = set(fields)

        if 'changetime' in fields:
            max_changetime = datetime(1970, 1, 1, tzinfo=utc)
            min_changetime = datetime.now(utc)
        if 'time' in fields:
            max_time = datetime(1970, 1, 1, tzinfo=utc)
            min_time = datetime.now(utc)

        for sid, last_visit in wl.get_watched_resources(
                'ticket', req.authname):
            ticketdict = {}
            try:
                ticket = Ticket(self.env, sid, db)
                exists = ticket.exists
            except:
                exists = False

            if not exists:
                ticketdict['deleted'] = True
                if 'id' in fields:
                    ticketdict['id'] = sid
                    ticketdict['ID'] = '#' + sid
                if 'author' in fields:
                    ticketdict['author'] = '?'
                if 'changetime' in fields:
                    ticketdict['changedsincelastvisit'] = 1
                    ticketdict['changetime'] = '?'
                    ticketdict['ichangetime'] = 0
                if 'time' in fields:
                    ticketdict['time'] = '?'
                    ticketdict['itime'] = 0
                if 'comment' in fields:
                    ticketdict['comment'] = tag.strong(t_("deleted"),
                                                       class_='deleted')
                if 'notify' in fields:
                    ticketdict['notify'] = wl.is_notify(req, 'ticket', sid)
                if 'description' in fields:
                    ticketdict['description'] = ''
                if 'owner' in fields:
                    ticketdict['owner'] = ''
                if 'reporter' in fields:
                    ticketdict['reporter'] = ''
                ticketlist.append(ticketdict)
                continue

            render_elt = lambda x: x
            if not (Chrome(self.env).show_email_addresses or \
                    'EMAIL_VIEW' in req.perm(ticket.resource)):
                render_elt = obfuscate_email_address

            # Copy all requested fields from ticket
            if fields:
                for f in fields:
                    ticketdict[f] = ticket.values.get(f, u'')
            else:
                ticketdict = ticket.values.copy()

            changetime = ticket.time_changed
            if wl.options['attachment_changes']:
                for attachment in Attachment.select(self.env, 'ticket', sid,
                                                    db):
                    if attachment.date > changetime:
                        changetime = attachment.date
            if 'attachment' in fields:
                attachments = []
                for attachment in Attachment.select(self.env, 'ticket', sid,
                                                    db):
                    wikitext = u'[attachment:"' + u':'.join([
                        attachment.filename, 'ticket', sid
                    ]) + u'" ' + attachment.filename + u']'
                    attachments.extend([
                        tag(', '),
                        format_to_oneliner(self.env,
                                           context,
                                           wikitext,
                                           shorten=False)
                    ])
                if attachments:
                    attachments.reverse()
                    attachments.pop()
                ticketdict['attachment'] = moreless(attachments, 5)

            # Changes are special. Comment, commentnum and last author are included in them.
            if 'changes' in fields or 'author' in fields or 'comment' in fields or 'commentnum' in fields:
                changes = []
                # If there are now changes the reporter is the last author
                author = ticket.values['reporter']
                commentnum = u"0"
                comment = u""
                want_changes = 'changes' in fields
                for date, cauthor, field, oldvalue, newvalue, permanent in ticket.get_changelog(
                        changetime, db):
                    author = cauthor
                    if field == 'comment':
                        if 'commentnum' in fields:
                            ticketdict['commentnum'] = to_unicode(oldvalue)
                        if 'comment' in fields:
                            comment = to_unicode(newvalue)
                            comment = moreless(comment, 200)
                            ticketdict['comment'] = comment
                        if not want_changes:
                            break
                    else:
                        if want_changes:
                            label = self.fields['ticket'].get(field, u'')
                            if label:
                                changes.extend([
                                    tag(
                                        tag.strong(label), ' ',
                                        render_property_diff(
                                            self.env, req, ticket, field,
                                            oldvalue, newvalue)),
                                    tag('; ')
                                ])
                if want_changes:
                    # Remove the last tag('; '):
                    if changes:
                        changes.pop()
                    changes = moreless(changes, 5)
                    ticketdict['changes'] = tag(changes)

            if 'id' in fields:
                ticketdict['id'] = sid
                ticketdict['ID'] = format_to_oneliner(self.env,
                                                      context,
                                                      '#' + sid,
                                                      shorten=True)
            if 'cc' in fields:
                if render_elt == obfuscate_email_address:
                    ticketdict['cc'] = ', '.join(
                        [render_elt(c) for c in ticketdict['cc'].split(', ')])
            if 'author' in fields:
                ticketdict['author'] = render_elt(author)
            if 'changetime' in fields:
                ichangetime = to_timestamp(changetime)
                ticketdict.update(
                    changetime=format_datetime(changetime,
                                               locale=locale,
                                               tzinfo=req.tz),
                    ichangetime=ichangetime,
                    changedsincelastvisit=(last_visit < ichangetime and 1
                                           or 0),
                    changetime_delta=pretty_timedelta(changetime),
                    changetime_link=req.href.timeline(
                        precision='seconds',
                        from_=trac_format_datetime(changetime,
                                                   'iso8601',
                                                   tzinfo=req.tz)))
                if changetime > max_changetime:
                    max_changetime = changetime
                if changetime < min_changetime:
                    min_changetime = changetime
            if 'time' in fields:
                time = ticket.time_created
                ticketdict.update(time=format_datetime(time,
                                                       locale=locale,
                                                       tzinfo=req.tz),
                                  itime=to_timestamp(time),
                                  time_delta=pretty_timedelta(time),
                                  time_link=req.href.timeline(
                                      precision='seconds',
                                      from_=trac_format_datetime(
                                          time, 'iso8601', tzinfo=req.tz)))
                if time > max_time:
                    max_time = time
                if time < min_time:
                    min_time = time
            if 'description' in fields:
                description = ticket.values['description']
                description = moreless(description, 200)
                ticketdict['description'] = description
            if 'notify' in fields:
                ticketdict['notify'] = wl.is_notify(req, 'ticket', sid)
            if 'owner' in fields:
                ticketdict['owner'] = render_elt(ticket.values['owner'])
            if 'reporter' in fields:
                ticketdict['reporter'] = render_elt(ticket.values['reporter'])
            if 'tags' in fields and self.tagsystem:
                tags = []
                for t in self.tagsystem.get_tags(req, Resource('ticket', sid)):
                    tags.extend(
                        [tag.a(t, href=req.href('tags', q=t)),
                         tag(', ')])
                if tags:
                    tags.pop()
                ticketdict['tags'] = moreless(tags, 10)

            ticketlist.append(ticketdict)

        if 'changetime' in fields:
            extradict['max_changetime'] = format_datetime(max_changetime,
                                                          locale=locale,
                                                          tzinfo=req.tz)
            extradict['min_changetime'] = format_datetime(min_changetime,
                                                          locale=locale,
                                                          tzinfo=req.tz)
        if 'time' in fields:
            extradict['max_time'] = format_datetime(max_time,
                                                    locale=locale,
                                                    tzinfo=req.tz)
            extradict['min_time'] = format_datetime(min_time,
                                                    locale=locale,
                                                    tzinfo=req.tz)

        return ticketlist, extradict