def get_forums(self, req, cursor, order_by = 'ORDER BY subject ASC'): columns = ('id', 'name', 'author', 'time', 'moderators', 'group', 'subject', 'description', 'topics', 'replies', 'lastreply', 'lasttopic') sql = "SELECT id, name, author, time, moderators, forum_group," \ " subject, description, (SELECT COUNT(id) FROM topic t WHERE" \ " t.forum = forum.id) AS topics, (SELECT COUNT(id) FROM message m" \ " WHERE m.forum = forum.id) AS replies, (SELECT MAX(time) FROM" \ " message m WHERE m.forum = forum.id) AS lasttopic, (SELECT" \ " MAX(time) FROM topic t WHERE t.forum = forum.id) AS lastreply" \ " FROM forum " + order_by self.log.debug(sql) cursor.execute(sql) forums = [] for row in cursor: row = dict(zip(columns, row)) row['moderators'] = wiki_to_oneliner(row['moderators'], self.env) row['description'] = wiki_to_oneliner(row['description'], self.env) if row['lastreply']: row['lastreply'] = pretty_timedelta(row['lastreply']) else: row['lastreply'] = 'No replies' if row['lasttopic']: row['lasttopic'] = pretty_timedelta(row['lasttopic']) else: row['lasttopic'] = 'No topics' row['time'] = format_datetime(row['time']) forums.append(row) return forums
def get_forums(self, req, cursor, order_by='ORDER BY subject ASC'): columns = ('id', 'name', 'author', 'time', 'moderators', 'group', 'subject', 'description', 'topics', 'replies', 'lastreply', 'lasttopic') sql = "SELECT id, name, author, time, moderators, forum_group," \ " subject, description, (SELECT COUNT(id) FROM topic t WHERE" \ " t.forum = forum.id) AS topics, (SELECT COUNT(id) FROM message m" \ " WHERE m.forum = forum.id) AS replies, (SELECT MAX(time) FROM" \ " message m WHERE m.forum = forum.id) AS lasttopic, (SELECT" \ " MAX(time) FROM topic t WHERE t.forum = forum.id) AS lastreply" \ " FROM forum " + order_by self.log.debug(sql) cursor.execute(sql) forums = [] for row in cursor: row = dict(zip(columns, row)) row['moderators'] = wiki_to_oneliner(row['moderators'], self.env) row['description'] = wiki_to_oneliner(row['description'], self.env) if row['lastreply']: row['lastreply'] = pretty_timedelta(row['lastreply']) else: row['lastreply'] = 'No replies' if row['lasttopic']: row['lasttopic'] = pretty_timedelta(row['lasttopic']) else: row['lasttopic'] = 'No topics' row['time'] = format_datetime(row['time']) forums.append(row) return forums
def _get_build_data(env, req, build): platform = TargetPlatform.fetch(env, build.platform) data = {'id': build.id, 'name': build.slave, 'rev': build.rev, 'status': _status_label[build.status], 'platform': getattr(platform, 'name', 'unknown'), 'cls': _status_label[build.status].replace(' ', '-'), 'href': req.href.build(build.config, build.id), 'chgset_href': req.href.changeset(build.rev)} if build.started: data['started'] = format_datetime(build.started) data['started_delta'] = pretty_timedelta(build.started) data['duration'] = pretty_timedelta(build.started) if build.stopped: data['stopped'] = format_datetime(build.stopped) data['stopped_delta'] = pretty_timedelta(build.stopped) data['duration'] = pretty_timedelta(build.stopped, build.started) data['slave'] = { 'name': build.slave, 'ipnr': build.slave_info.get(Build.IP_ADDRESS), 'os_name': build.slave_info.get(Build.OS_NAME), 'os_family': build.slave_info.get(Build.OS_FAMILY), 'os_version': build.slave_info.get(Build.OS_VERSION), 'machine': build.slave_info.get(Build.MACHINE), 'processor': build.slave_info.get(Build.PROCESSOR) } return data
def _get_build_data(env, req, build, repos_name=None): chgset_url = '' if repos_name: chgset_resource = get_chgset_resource(env, repos_name, build.rev) chgset_url = get_resource_url(env, chgset_resource, req.href) platform = TargetPlatform.fetch(env, build.platform) data = {'id': build.id, 'name': build.slave, 'rev': build.rev, 'status': _status_label[build.status], 'platform': getattr(platform, 'name', 'unknown'), 'cls': _status_label[build.status].replace(' ', '-'), 'href': req.href.build(build.config, build.id), 'chgset_href': chgset_url} if build.started: data['started'] = format_datetime(build.started) data['started_delta'] = pretty_timedelta(build.started) data['duration'] = pretty_timedelta(build.started) if build.stopped: data['stopped'] = format_datetime(build.stopped) data['stopped_delta'] = pretty_timedelta(build.stopped) data['duration'] = pretty_timedelta(build.stopped, build.started) data['slave'] = { 'name': build.slave, 'ipnr': build.slave_info.get(Build.IP_ADDRESS), 'os_name': build.slave_info.get(Build.OS_NAME), 'os_family': build.slave_info.get(Build.OS_FAMILY), 'os_version': build.slave_info.get(Build.OS_VERSION), 'machine': build.slave_info.get(Build.MACHINE), 'processor': build.slave_info.get(Build.PROCESSOR) } return data
def _get_build_data(env, req, build): data = { 'id': build.id, 'name': build.slave, 'rev': build.rev, 'status': _status_label[build.status], 'cls': _status_label[build.status].replace(' ', '-'), 'href': req.href.build(build.config, build.id), 'chgset_href': req.href.changeset(build.rev) } if build.started: data['started'] = format_datetime(build.started) data['started_delta'] = pretty_timedelta(build.started) data['duration'] = pretty_timedelta(build.started) if build.stopped: data['stopped'] = format_datetime(build.stopped) data['stopped_delta'] = pretty_timedelta(build.stopped) data['duration'] = pretty_timedelta(build.stopped, build.started) data['slave'] = { 'name': build.slave, 'ipnr': build.slave_info.get(Build.IP_ADDRESS), 'os_name': build.slave_info.get(Build.OS_NAME), 'os_family': build.slave_info.get(Build.OS_FAMILY), 'os_version': build.slave_info.get(Build.OS_VERSION), 'machine': build.slave_info.get(Build.MACHINE), 'processor': build.slave_info.get(Build.PROCESSOR) } return data
def get_timeline_events(self, req, start, stop, filters, pid, syllabus_id): if pid is None: return is_multi = isinstance(pid, (list, tuple)) if is_multi: # TODO: return # Worklog changes show_starts = 'workstart' in filters show_stops = 'workstop' in filters if show_starts or show_stops: add_stylesheet(req, "worklog/worklogplugin.css") ts_start = to_timestamp(start) ts_stop = to_timestamp(stop) ticket_realm = Resource('ticket') db = self.env.get_read_db() cursor = db.cursor() cursor.execute(""" SELECT wl.worker,wl.ticket,wl.time,wl.starttime,wl.comment,wl.kind,t.summary,t.status,t.resolution,t.type FROM ( SELECT worker, ticket, starttime AS time, starttime, comment, 'start' AS kind FROM work_log UNION SELECT worker, ticket, endtime AS time, starttime, comment, 'stop' AS kind FROM work_log ) AS wl JOIN ticket t ON t.id = wl.ticket AND project_id=%s AND wl.time>=%s AND wl.time<=%s ORDER BY wl.time""", (pid, ts_start, ts_stop)) for worker,tid,ts,ts_start,comment,kind,summary,status,resolution,type in cursor: ticket = ticket_realm(id=tid) time = to_datetime(ts) started = None if kind == 'start': if not show_starts: continue yield ('workstart', pid, time, worker, (ticket,summary,status,resolution,type, started, "")) else: if not show_stops: continue started = to_datetime(ts_start) if comment: comment = "(Time spent: %s)\n\n%s" % (pretty_timedelta(started, time), comment) else: comment = '(Time spent: %s)' % pretty_timedelta(started, time) yield ('workstop', pid, time, worker, (ticket,summary,status,resolution,type, started, comment))
def get_topics(self, req, cursor, forum_id, order_by = 'time', desc = False): if not order_by in ('replies', 'lastreply',): order_by = 't.' + order_by columns = ('id', 'forum', 'time', 'subject', 'body', 'author', 'replies', 'lastreply') sql = "SELECT t.id, t.forum, t.time, t.subject, t.body, t.author," \ " m.replies, m.lastreply FROM topic t LEFT JOIN (SELECT COUNT(id)" \ " AS replies, MAX(time) AS lastreply, topic FROM message GROUP BY" \ " topic) m ON t.id = m.topic WHERE t.forum = %s ORDER BY " \ + order_by + (" ASC", " DESC")[bool(desc)] self.log.debug(sql % (forum_id,)) cursor.execute(sql, (forum_id,)) topics = [] for row in cursor: row = dict(zip(columns, row)) row['author'] = wiki_to_oneliner(row['author'], self.env) row['body'] = wiki_to_html(row['body'], self.env, req) if row['lastreply']: row['lastreply'] = pretty_timedelta(float(row['lastreply'])) else: row['lastreply'] = 'No replies' if not row['replies']: row['replies'] = 0 row['time'] = format_datetime(row['time']) topics.append(row) return topics
def get_downloads(self, req, cursor, order_by='id', desc=False): columns = ('id', 'file', 'description', 'size', 'time', 'count', 'author', 'tags', 'component', 'version', 'architecture', 'platform', 'type') sql = "SELECT id, file, description, size, time, count, author, tags," \ " component, version, architecture, platform, type FROM download " \ "ORDER BY " + order_by + (" ASC", " DESC")[bool(desc)] self.log.debug(sql) cursor.execute(sql) downloads = [] for row in cursor: row = dict(zip(columns, row)) row['description'] = wiki_to_oneliner(row['description'], self.env) row['size'] = pretty_size(row['size']) row['time'] = pretty_timedelta(row['time']) row['count'] = row['count'] or 0 downloads.append(row) # Replace field ids with apropriate objects. for download in downloads: download['architecture'] = self.get_architecture( cursor, download['architecture']) download['platform'] = self.get_platform(cursor, download['platform']) download['type'] = self.get_type(cursor, download['type']) return downloads
def get_changes(env, repos, revs, full=None, req=None, format=None): db = env.get_db_cnx() changes = {} for rev in revs: changeset = repos.get_changeset(rev) message = changeset.message or '--' files = None if format == 'changelog': files = [change[0] for change in changeset.get_changes()] elif message: if not full: message = wiki_to_oneliner(message, env, db, shorten=True) else: message = wiki_to_html(message, env, req, db, absurls=(format == 'rss'), escape_newlines=True) if not message: message = '--' changes[rev] = { 'date_seconds': changeset.date, 'date': format_datetime(changeset.date), 'age': pretty_timedelta(changeset.date), 'author': changeset.author or 'anonymous', 'message': message, 'shortlog': shorten_line(message), 'files': files } return changes
def _render_history(self, req, db, page): """Extract the complete history for a given page and stores it in the HDF. This information is used to present a changelog/history for a given page. """ req.perm.assert_permission("WIKI_VIEW") if not page.exists: raise TracError, "Page %s does not exist" % page.name req.hdf["title"] = page.name + " (history)" history = [] for version, t, author, comment, ipnr in page.get_history(): history.append( { "url": self.env.href.wiki(page.name, version=version), "diff_url": self.env.href.wiki(page.name, version=version, action="diff"), "version": version, "time": format_datetime(t), "time_delta": pretty_timedelta(t), "author": author, "comment": wiki_to_oneliner(comment or "", self.env, db), "ipaddr": ipnr, } ) req.hdf["wiki.history"] = history
def get_topics(self, req, cursor, forum_id, order_by='time', desc=False): if not order_by in ( 'replies', 'lastreply', ): order_by = 't.' + order_by columns = ('id', 'forum', 'time', 'subject', 'body', 'author', 'replies', 'lastreply') sql = "SELECT t.id, t.forum, t.time, t.subject, t.body, t.author," \ " m.replies, m.lastreply FROM topic t LEFT JOIN (SELECT COUNT(id)" \ " AS replies, MAX(time) AS lastreply, topic FROM message GROUP BY" \ " topic) m ON t.id = m.topic WHERE t.forum = %s ORDER BY " \ + order_by + (" ASC", " DESC")[bool(desc)] self.log.debug(sql % (forum_id, )) cursor.execute(sql, (forum_id, )) topics = [] for row in cursor: row = dict(zip(columns, row)) row['author'] = wiki_to_oneliner(row['author'], self.env) row['body'] = wiki_to_html(row['body'], self.env, req) if row['lastreply']: row['lastreply'] = pretty_timedelta(float(row['lastreply'])) else: row['lastreply'] = 'No replies' if not row['replies']: row['replies'] = 0 row['time'] = format_datetime(row['time']) topics.append(row) return topics
def _render_history(self, req, db, page): """Extract the complete history for a given page and stores it in the HDF. This information is used to present a changelog/history for a given page. """ req.perm.assert_permission('WIKI_VIEW') if not page.exists: raise TracError, "Page %s does not exist" % page.name req.hdf['title'] = page.name + ' (history)' history = [] for version, t, author, comment, ipnr in page.get_history(): history.append({ 'url': self.env.href.wiki(page.name, version=version), 'diff_url': self.env.href.wiki(page.name, version=version, action='diff'), 'version': version, 'time': format_datetime(t), 'time_delta': pretty_timedelta(t), 'author': author, 'comment': wiki_to_oneliner(comment or '', self.env, db), 'ipaddr': ipnr }) req.hdf['wiki.history'] = history
def get_downloads(self, req, cursor, order_by = 'id', desc = False): columns = ('id', 'file', 'description', 'size', 'time', 'count', 'author', 'tags', 'component', 'version', 'architecture', 'platform', 'type') sql = "SELECT id, file, description, size, time, count, author, tags," \ " component, version, architecture, platform, type FROM download " \ "ORDER BY " + order_by + (" ASC", " DESC")[bool(desc)] self.log.debug(sql) cursor.execute(sql) downloads = [] for row in cursor: row = dict(zip(columns, row)) row['description'] = wiki_to_oneliner(row['description'], self.env) row['size'] = pretty_size(row['size']) row['time'] = pretty_timedelta(row['time']) row['count'] = row['count'] or 0 downloads.append(row) # Replace field ids with apropriate objects. for download in downloads: download['architecture'] = self.get_architecture(cursor, download['architecture']) download['platform'] = self.get_platform(cursor, download['platform']) download['type'] = self.get_type(cursor, download['type']) return downloads
def process_request(self, req): req.perm.require('BUILD_VIEW') db = self.env.get_db_cnx() build_id = int(req.args.get('id')) build = Build.fetch(self.env, build_id, db=db) assert build, 'Build %s does not exist' % build_id if req.method == 'POST': if req.args.get('action') == 'invalidate': self._do_invalidate(req, build, db) req.redirect(req.href.build(build.config, build.id)) add_link(req, 'up', req.href.build(build.config), 'Build Configuration') data = {'title': 'Build %s - %s' % (build_id, _status_title[build.status]), 'page_mode': 'view_build', 'build': {}} config = BuildConfig.fetch(self.env, build.config, db=db) data['build']['config'] = { 'name': config.label, 'href': req.href.build(config.name) } formatters = [] for formatter in self.log_formatters: formatters.append(formatter.get_formatter(req, build)) summarizers = {} # keyed by report type for summarizer in self.report_summarizers: categories = summarizer.get_supported_categories() summarizers.update(dict([(cat, summarizer) for cat in categories])) data['build'].update(_get_build_data(self.env, req, build)) steps = [] for step in BuildStep.select(self.env, build=build.id, db=db): steps.append({ 'name': step.name, 'description': step.description, 'duration': pretty_timedelta(step.started, step.stopped), 'failed': step.status == BuildStep.FAILURE, 'errors': step.errors, 'log': self._render_log(req, build, formatters, step), 'reports': self._render_reports(req, config, build, summarizers, step) }) data['build']['steps'] = steps data['build']['can_delete'] = ('BUILD_DELETE' in req.perm) repos = self.env.get_repository(req.authname) repos.authz.assert_permission(config.path) chgset = repos.get_changeset(build.rev) data['build']['chgset_author'] = chgset.author add_script(req, 'bitten/tabset.js') add_stylesheet(req, 'bitten/bitten.css') return 'bitten_build.html', data, None
def get_task_markup(self, req, ticket, task): if not task: return '' ticket_text = 'ticket #' + str(task['ticket']) if task['ticket'] == ticket: ticket_text = 'this ticket' timedelta = pretty_timedelta(task['starttime'], None); return '<li>%s</li>' % wiki_to_oneliner('You have been working on %s for %s' % (ticket_text, timedelta), self.env, req=req)
def get_forums(self, req, cursor, order_by = 'subject', desc = False): if not order_by in ('topics', 'replies', 'lasttopic', 'lastreply'): order_by = 'f.' + order_by columns = ('id', 'name', 'author', 'time', 'moderators', 'group', 'subject', 'description', 'topics', 'replies', 'lasttopic', 'lastreply') sql = "SELECT f.id, f.name, f.author, f.time, f.moderators, " \ "f.forum_group, f.subject, f.description, ta.topics, ta.replies, " \ "ta.lasttopic, ta.lastreply FROM forum f LEFT JOIN (SELECT " \ "COUNT(t.id) AS topics, MAX(t.time) AS lasttopic, SUM(ma.replies) " \ "AS replies, MAX(ma.lastreply) AS lastreply, t.forum AS forum FROM " \ " topic t LEFT JOIN (SELECT COUNT(m.id) AS replies, MAX(m.time) AS " \ "lastreply, m.topic AS topic FROM message m GROUP BY m.topic) ma ON " \ "t.id = ma.topic GROUP BY forum) ta ON f.id = ta.forum ORDER BY " + \ order_by + (" ASC", " DESC")[bool(desc)] self.log.debug(sql) cursor.execute(sql) forums = [] for row in cursor: row = dict(zip(columns, row)) row['moderators'] = wiki_to_oneliner(row['moderators'], self.env) row['description'] = wiki_to_oneliner(row['description'], self.env) if row['lastreply']: row['lastreply'] = pretty_timedelta(float(row['lastreply'])) else: row['lastreply'] = 'No replies' if row['lasttopic']: self.log.debug('lasttopic: %s' % row['lasttopic']) row['lasttopic'] = pretty_timedelta(float(row['lasttopic'])) else: row['lasttopic'] = 'No topics' if not row['topics']: row['topics'] = 0 if not row['replies']: row['replies'] = 0 else: # SUM on PosgreSQL returns float number. row['replies'] = int(row['replies']) row['time'] = format_datetime(row['time']) forums.append(row) return forums
def get_forums(self, req, cursor, asc=0, order_by='subject'): if not order_by in ('topics', 'replies', 'lasttopic', 'lastreply'): order_by = 'f.' + order_by columns = ('id', 'name', 'author', 'time', 'moderators', 'group', 'subject', 'description', 'topics', 'replies', 'lasttopic', 'lastreply') sql = "SELECT f.id, f.name, f.author, f.time, f.moderators, " \ "f.forum_group, f.subject, f.description, ta.topics, ta.replies, " \ "ta.lasttopic, ta.lastreply FROM forum f LEFT JOIN (SELECT " \ "COUNT(t.id) AS topics, MAX(t.time) AS lasttopic, SUM(ma.replies) " \ "AS replies, MAX(ma.lastreply) AS lastreply, t.forum AS forum FROM " \ " topic t LEFT JOIN (SELECT COUNT(m.id) AS replies, MAX(m.time) AS " \ "lastreply, m.topic AS topic FROM message m GROUP BY m.topic) ma ON " \ "t.id = ma.topic GROUP BY forum) ta ON f.id = ta.forum ORDER BY " + \ order_by + (" DESC", " ASC")[int(asc)] self.log.debug(sql) cursor.execute(sql) forums = [] for row in cursor: row = dict(zip(columns, row)) row['moderators'] = wiki_to_oneliner(row['moderators'], self.env) row['description'] = wiki_to_oneliner(row['description'], self.env) if row['lastreply']: row['lastreply'] = pretty_timedelta(float(row['lastreply'])) else: row['lastreply'] = 'No replies' if row['lasttopic']: self.log.debug('lasttopic: %s' % row['lasttopic']) row['lasttopic'] = pretty_timedelta(float(row['lasttopic'])) else: row['lasttopic'] = 'No topics' if not row['topics']: row['topics'] = 0 if not row['replies']: row['replies'] = 0 else: # SUM on PosgreSQL returns float number. row['replies'] = int(row['replies']) row['time'] = format_datetime(row['time']) forums.append(row) return forums
def formattime(self,time): """Return formatted time for ListOfWikiPages table.""" time = int(time) return [ tag.span( format_datetime ( time ) ), tag.span( " (", tag.a( pretty_timedelta ( time ), href = self.href('timeline', precision='seconds', from_= quote_plus( format_datetime (time,'iso8601') ) ) ), " ago)" ) ]
def formattime(self, time): """Return formatted time for ListOfWikiPages table.""" time = int(time) return [ tag.span(format_datetime(time)), tag.span( " (", tag.a(pretty_timedelta(time), href=self.href('timeline', precision='seconds', from_=quote_plus( format_datetime(time, 'iso8601')))), " ago)") ]
def _entry_to_hdf(req, entry): return { 'id': entry.id, 'time': format_datetime(entry.time), 'timedelta': pretty_timedelta(entry.time), 'path': entry.path, 'url': req.abs_href(entry.path), 'path_clipped': shorten_line(entry.path, 25), 'href': req.href(entry.path), 'admin_href': req.href.admin('spamfilter', 'monitor', entry.id), 'author': entry.author, 'author_clipped': shorten_line(entry.author, 25), 'ipnr': entry.ipnr, 'authenticated': entry.authenticated, 'headers': entry.headers, 'content': shorten_line(entry.content), 'full_content': entry.content, 'rejected': entry.rejected, 'karma': entry.karma, 'reasons': entry.reasons }
def process_request(self, req, chrome, projects): folders = [] for project in projects: env = project["env"] if not req.authname: req.authname = "anonymous" try: repos = env.get_repository(req.authname) except TracError: continue try: change = repos.get_changeset(repos.get_youngest_rev()) folders.append({'name': project["name"], 'href': project["href"]+"/browser", 'rev': repos.get_youngest_rev(), 'age': util.pretty_timedelta(change.date), 'author': change.author, 'message': wiki_to_oneliner(change.message, env, env.get_db_cnx(), shorten=True, absurls=True, req=req)}) except Exception, e: pass
def _build_row(self, name, time, author, version, comment): time = from_utimestamp(time) cols = [] # page name if self.pagename == 'short': _name = name.rsplit('/', 1)[-1] else: _name = name cols.append(tag.td(tag.a(_name, href=self.href.wiki(name)), class_='name')) # last modified href_ago = self.href('timeline', precision='seconds', from_=format_datetime(time, 'iso8601')) a_ago = [' (', tag.a(pretty_timedelta(time), href=href_ago), ' ago)'] cols.append(tag.td(format_datetime(time, self.date_format), a_ago, class_='time')) # version a_version = tag.a(version, href=self.href.wiki(name, version=version)) a_diff = tag.a('d', title='diff', href=self.href.wiki(name, action='diff', version=version)) a_history = tag.a('h', title='history', href=self.href.wiki(name, action='history')) cols.append(tag.td(a_version, ' [', a_diff, '|', a_history, ']', class_='version')) cols.append(tag.td(author, class_='author')) cols.append(tag.td(comment, class_='comment')) return tag.tr(cols)
def get_topics(self, req, cursor, forum, order_by = 'ORDER BY time ASC'): columns = ('id', 'forum', 'time', 'subject', 'body', 'author', 'replies', 'lastreply') sql = "SELECT id, forum, time, subject, body, author, (SELECT" \ " COUNT(id) FROM message m WHERE m.topic = topic.id) AS replies," \ " (SELECT MAX(time) FROM message m WHERE m.topic = topic.id) AS" \ " lastreply FROM topic WHERE forum = %s " + order_by self.log.debug(sql % (forum,)) cursor.execute(sql, (forum,)) topics = [] for row in cursor: row = dict(zip(columns, row)) row['author'] = wiki_to_oneliner(row['author'], self.env) row['body'] = wiki_to_html(row['body'], self.env, req) if row['lastreply']: row['lastreply'] = pretty_timedelta(row['lastreply']) else: row['lastreply'] = 'No replies' row['time'] = format_datetime(row['time']) topics.append(row) return topics
def get_topics(self, req, cursor, forum, order_by='ORDER BY time ASC'): columns = ('id', 'forum', 'time', 'subject', 'body', 'author', 'replies', 'lastreply') sql = "SELECT id, forum, time, subject, body, author, (SELECT" \ " COUNT(id) FROM message m WHERE m.topic = topic.id) AS replies," \ " (SELECT MAX(time) FROM message m WHERE m.topic = topic.id) AS" \ " lastreply FROM topic WHERE forum = %s " + order_by self.log.debug(sql % (forum, )) cursor.execute(sql, (forum, )) topics = [] for row in cursor: row = dict(zip(columns, row)) row['author'] = wiki_to_oneliner(row['author'], self.env) row['body'] = wiki_to_html(row['body'], self.env, req) if row['lastreply']: row['lastreply'] = pretty_timedelta(row['lastreply']) else: row['lastreply'] = 'No replies' row['time'] = format_datetime(row['time']) topics.append(row) return topics
def reset_orphaned_builds(self): """Reset all in-progress builds to ``PENDING`` state if they've been running so long that the configured timeout has been reached. This is used to cleanup after slaves that have unexpectedly cancelled a build without notifying the master, or are for some other reason not reporting back status updates. """ if not self.timeout: # If no timeout is set, none of the in-progress builds can be # considered orphaned return db = self.env.get_db_cnx() now = int(time.time()) for build in Build.select(self.env, status=Build.IN_PROGRESS, db=db): if now - build.last_activity < self.timeout: # This build has not reached the timeout yet, assume it's still # being executed continue self.log.info('Orphaning build %d. Last activity was %s (%s)' % \ (build.id, format_datetime(build.last_activity), pretty_timedelta(build.last_activity))) build.status = Build.PENDING build.slave = None build.slave_info = {} build.started = 0 build.stopped = 0 build.last_activity = 0 for step in list(BuildStep.select(self.env, build=build.id, db=db)): step.delete(db=db) build.update(db=db) Attachment.delete_all(self.env, 'build', build.resource.id, db) db.commit()
def _render_file(self, req, repos, node, rev=None): req.perm.assert_permission('FILE_VIEW') changeset = repos.get_changeset(node.rev) req.hdf['file'] = { 'rev': node.rev, 'changeset_href': util.escape(self.env.href.changeset(node.rev)), 'date': util.format_datetime(changeset.date), 'age': util.pretty_timedelta(changeset.date), 'author': changeset.author or 'anonymous', 'message': wiki_to_html(changeset.message or '--', self.env, req, escape_newlines=True) } mime_type = node.content_type if not mime_type or mime_type == 'application/octet-stream': mime_type = get_mimetype(node.name) or mime_type or 'text/plain' # We don't have to guess if the charset is specified in the # svn:mime-type property ctpos = mime_type.find('charset=') if ctpos >= 0: charset = mime_type[ctpos + 8:] else: charset = None format = req.args.get('format') if format in ['raw', 'txt']: req.send_response(200) req.send_header('Content-Type', format == 'txt' and 'text/plain' or mime_type) req.send_header('Content-Length', node.content_length) req.send_header('Last-Modified', util.http_date(node.last_modified)) req.end_headers() content = node.get_content() while 1: chunk = content.read(CHUNK_SIZE) if not chunk: raise RequestDone req.write(chunk) else: # Generate HTML preview mimeview = Mimeview(self.env) content = node.get_content().read(mimeview.max_preview_size) if not is_binary(content): if mime_type != 'text/plain': plain_href = self.env.href.peerReviewBrowser(node.path, rev=rev and node.rev, format='txt') add_link(req, 'alternate', plain_href, 'Plain Text', 'text/plain') raw_href = self.env.href.peerReviewBrowser(node.path, rev=rev and node.rev, format='raw') req.hdf['file'] = mimeview.preview_to_hdf( req, content, len(content), mime_type, node.created_path, raw_href, annotations=['addFileNums']) add_link(req, 'alternate', raw_href, 'Original Format', mime_type) add_stylesheet(req, 'common/css/code.css')
def _do_actions(self, req, actions): # Initialize dictionary for data. data = {} # Get database access. db = self.env.get_db_cnx() cursor = db.cursor() # Get API component. api = self.env[ScreenshotsApi] for action in actions: if action == 'get-file': req.perm.assert_permission('SCREENSHOTS_VIEW') # Get request arguments. screenshot_id = int(req.args.get('id') or 0) format = req.args.get('format') or self.default_format width = int(req.args.get('width') or 0) height = int(req.args.get('height') or 0) # Check if requested format is allowed. if not format in self.formats: raise TracError( 'Requested screenshot format that is not allowed.', 'Requested format not allowed.') # Get screenshot. screenshot = api.get_screenshot(cursor, screenshot_id) if screenshot: # Set missing dimensions. width = width or screenshot['width'] height = height or screenshot['height'] if format == 'html': # Format screenshot attributes. screenshot['time'] = pretty_timedelta( screenshot['time']) # Prepare data dictionary. data['screenshot'] = screenshot # Return screenshot template and data. return ('screenshot.cs', data, None) else: # Prepare screenshot filename. name, ext = os.path.splitext(screenshot['file']) format = (format == 'raw') and ext or '.' + format path = os.path.join(self.path, unicode(screenshot['id'])) filename = os.path.join( path, '%s-%sx%s%s' % (name, width, height, format)) orig_name = os.path.join( path, '%s-%sx%s%s' % (name, screenshot['width'], screenshot['height'], ext)) self.log.debug('filemame: %s' % (filename, )) # Send file to request. if not os.path.exists(filename): self._create_image(orig_name, path, name, format, width, height) req.send_header( 'Content-Disposition', 'attachment;filename=%s' % (os.path.basename(filename))) req.send_header('Content-Description', screenshot['description']) req.send_file(filename, mimetypes.guess_type(filename)[0]) else: raise TracError('Screenshot not found.') elif action == 'add': req.perm.assert_permission('SCREENSHOTS_ADMIN') # Fill data dictionary. data['index'] = req.args.get('index') data['versions'] = api.get_versions(cursor) data['components'] = api.get_components(cursor) # Return template with add screenshot form. return ('screenshot-add.cs', data, None) elif action == 'post-add': req.perm.assert_permission('SCREENSHOTS_ADMIN') # Get image file from request. file, filename = self._get_file_from_req(req) name, ext = os.path.splitext(filename) filename = name + ext.lower() # Check correct file type. reg = re.compile(r'^(.*)[.](.*)$') result = reg.match(filename) if result: if not result.group(2).lower() in self.ext: raise TracError('Unsupported uploaded file type.') else: raise TracError('Unsupported uploaded file type.') # Create image object. image = Image.open(file) # Construct screenshot dictionary from form values. screenshot = { 'name': req.args.get('name'), 'description': req.args.get('description'), 'time': int(time.time()), 'author': req.authname, 'tags': req.args.get('tags'), 'file': filename, 'width': image.size[0], 'height': image.size[1] } # Add new screenshot. api.add_screenshot(cursor, screenshot) # Get inserted screenshot to with new id. screenshot = api.get_screenshot_by_time( cursor, screenshot['time']) # Add components to screenshot. components = req.args.get('components') or [] if not isinstance(components, list): components = [components] for component in components: component = { 'screenshot': screenshot['id'], 'component': component } api.add_component(cursor, component) screenshot['components'] = components # Add versions to screenshots versions = req.args.get('versions') or [] if not isinstance(versions, list): versions = [versions] for version in versions: version = { 'screenshot': screenshot['id'], 'version': version } api.add_version(cursor, version) screenshot['versions'] = versions self.log.debug(screenshot) # Prepare file paths path = os.path.join(self.path, unicode(screenshot['id'])) filepath = os.path.join( path, '%s-%ix%i.%s' % (result.group(1), screenshot['width'], screenshot['height'], result.group(2))) path = os.path.normpath(path) filepath = os.path.normpath(filepath) self.log.debug('path: %s' % (path, )) self.log.debug('filepath: %s' % (filepath, )) # Store uploaded image. try: os.mkdir(path) out_file = open(filepath, 'wb+') file.seek(0) shutil.copyfileobj(file, out_file) out_file.close() except Exception, error: api.delete_screenshot(cursor, screenshot['id']) try: self.log.debug(error) except: pass try: os.remove(filename) except: pass try: os.rmdir(path) except: pass raise TracError('Error storing file. Is directory' \ ' specified in path config option in [screenshots]' \ ' section of trac.ini existing? Original message was: %s' \ % (error,)) # Notify change listeners. for listener in self.change_listeners: listener.screenshot_created(screenshot) # Clear id to prevent display of edit and delete button. req.args['id'] = None elif action == 'edit': req.perm.assert_permission('SCREENSHOTS_ADMIN') # Get request arguments. screenshot_id = req.args.get('id') # Prepare data dictionary. data['screenshot'] = api.get_screenshot(cursor, screenshot_id) self.log.debug(data['screenshot'])
def get_timeline_events(self, req, start, stop, filters): if 'build' not in filters or not req.perm.has_permission('BUILD_VIEW'): return # Support both Trac 0.10 and 0.11 if isinstance(start, datetime): # Trac>=0.11 from trac.util.datefmt import to_timestamp start = to_timestamp(start) stop = to_timestamp(stop) add_stylesheet(req, 'HudsonTrac/hudsontrac.css') # get and parse the build-info info, cset = self.__get_info() # extract all build entries for entry in self.__extract_builds(info): # get result, optionally ignoring builds that are still running if entry['building']: if self.disp_building: result = 'IN-PROGRESS' else: continue else: result = entry['result'] # get start/stop times started = entry['timestamp'] / 1000 if started < start or started > stop: continue if result == 'IN-PROGRESS': # we hope the clocks are close... completed = time.time() else: completed = (entry['timestamp'] + entry['duration']) / 1000 # get message message, kind = { 'SUCCESS': ('Build finished successfully', ('build-successful', 'build-successful-alt')[self.alt_succ]), 'UNSTABLE': ('Build unstable', 'build-unstable'), 'ABORTED': ('Build aborted', 'build-aborted'), 'IN-PROGRESS': ('Build in progress', ('build-inprogress', 'build-inprogress-alt')[self.alt_succ]), }.get(result, ('Build failed', 'build-failed')) if self.use_desc: message = entry['description'] and \ unicode(entry['description'], cset) or message # get changesets changesets = '' if self.list_changesets: paths = ['changeSet.items.revision', 'changeSet.items.id'] revs = [unicode(str(r), cset) for r in \ self.__find_all(entry, paths)] if revs: revs = [self.__fmt_changeset(r, req) for r in revs] changesets = '<br/>Changesets: ' + ', '.join(revs) # get author(s) author = None for c in self.disp_culprit: author = { 'starter': self.__find_first(entry, 'actions.causes.userName'), 'author': self.__find_first(entry, ['changeSet.items.user', 'changeSet.items.author.fullName']), 'authors': self.__find_all(entry, ['changeSet.items.user', 'changeSet.items.author.fullName']), 'culprit': self.__find_first(entry, 'culprits.fullName'), 'culprits': self.__find_all(entry, 'culprits.fullName'), }.get(c) if author and not isinstance(author, basestring): author = ', '.join(set(author)) if author: author = unicode(author, cset) break # format response if result == 'IN-PROGRESS': comment = Markup("%s since %s, duration %s%s" % ( message, format_datetime(started), pretty_timedelta(started, completed), changesets)) else: comment = Markup("%s at %s, duration %s%s" % ( message, format_datetime(completed), pretty_timedelta(started, completed), changesets)) href = entry['url'] title = 'Build "%s" (%s)' % \ (unicode(entry['fullDisplayName'], cset), result.lower()) yield kind, href, title, completed, author, comment
def _render_diff(self, req, db, page): req.perm.assert_permission("WIKI_VIEW") if not page.exists: raise TracError, "Version %s of page %s does not exist" % (req.args.get("version"), page.name) add_stylesheet(req, "common/css/diff.css") req.hdf["title"] = page.name + " (diff)" # Ask web spiders to not index old versions req.hdf["html.norobots"] = 1 old_version = req.args.get("old_version") if old_version: old_version = int(old_version) if old_version == page.version: old_version = None elif old_version > page.version: old_version, page = page.version, WikiPage(self.env, page.name, old_version) info = {"version": page.version, "history_href": self.env.href.wiki(page.name, action="history")} num_changes = 0 old_page = None for version, t, author, comment, ipnr in page.get_history(): if version == page.version: if t: info["time"] = format_datetime(t) info["time_delta"] = pretty_timedelta(t) info["author"] = author or "anonymous" info["comment"] = comment or "--" info["ipnr"] = ipnr or "" else: num_changes += 1 if version < page.version: if (old_version and version == old_version) or not old_version: old_page = WikiPage(self.env, page.name, version) info["num_changes"] = num_changes info["old_version"] = version break req.hdf["wiki"] = info diff_style, diff_options = get_diff_options(req) oldtext = old_page and old_page.text.splitlines() or [] newtext = page.text.splitlines() context = 3 for option in diff_options: if option.startswith("-U"): context = int(option[2:]) break if context < 0: context = None changes = hdf_diff( oldtext, newtext, context=context, ignore_blank_lines="-B" in diff_options, ignore_case="-i" in diff_options, ignore_space_changes="-b" in diff_options, ) req.hdf["wiki.diff"] = changes
def _insert_ticket_data(self, req, db, ticket, reporter_id): """Insert ticket data into the hdf""" req.hdf['ticket'] = ticket.values req.hdf['ticket.id'] = ticket.id req.hdf['ticket.href'] = self.env.href.ticket(ticket.id) for field in TicketSystem(self.env).get_ticket_fields(): if field['type'] in ('radio', 'select'): value = ticket.values.get(field['name']) options = field['options'] if value and not value in options: # Current ticket value must be visible even if its not in the # possible values options.append(value) field['options'] = options name = field['name'] del field['name'] if name in ('summary', 'reporter', 'description', 'type', 'status', 'resolution', 'owner'): field['skip'] = True req.hdf['ticket.fields.' + name] = field req.hdf['ticket.reporter_id'] = reporter_id req.hdf['title'] = '#%d (%s)' % (ticket.id, ticket['summary']) req.hdf['ticket.description.formatted'] = wiki_to_html(ticket['description'], self.env, req, db) req.hdf['ticket.opened'] = util.format_datetime(ticket.time_created) req.hdf['ticket.opened_delta'] = util.pretty_timedelta(ticket.time_created) if ticket.time_changed != ticket.time_created: req.hdf['ticket.lastmod'] = util.format_datetime(ticket.time_changed) req.hdf['ticket.lastmod_delta'] = util.pretty_timedelta(ticket.time_changed) changelog = ticket.get_changelog(db=db) curr_author = None curr_date = 0 changes = [] for date, author, field, old, new in changelog: if date != curr_date or author != curr_author: changes.append({ 'date': util.format_datetime(date), 'author': author, 'fields': {} }) curr_date = date curr_author = author if field == 'comment': changes[-1]['comment'] = wiki_to_html(new, self.env, req, db) elif field == 'description': changes[-1]['fields'][field] = '' else: changes[-1]['fields'][field] = {'old': old, 'new': new} req.hdf['ticket.changes'] = changes # List attached files for idx, attachment in util.enum(Attachment.select(self.env, 'ticket', ticket.id)): hdf = attachment_to_hdf(self.env, db, req, attachment) req.hdf['ticket.attachments.%s' % idx] = hdf if req.perm.has_permission('TICKET_APPEND'): req.hdf['ticket.attach_href'] = self.env.href.attachment('ticket', ticket.id) # Add the possible actions to hdf actions = TicketSystem(self.env).get_available_actions(ticket, req.perm) for action in actions: req.hdf['ticket.actions.' + action] = '1'
def process_request(self, req): req.perm.require('BUILD_VIEW') db = self.env.get_db_cnx() build_id = int(req.args.get('id')) build = Build.fetch(self.env, build_id, db=db) if not build: raise HTTPNotFound("Build '%s' does not exist." \ % build_id) if req.method == 'POST': if req.args.get('action') == 'invalidate': self._do_invalidate(req, build, db) req.redirect(req.href.build(build.config, build.id)) add_link(req, 'up', req.href.build(build.config), 'Build Configuration') data = { 'title': 'Build %s - %s' % (build_id, _status_title[build.status]), 'page_mode': 'view_build', 'build': {} } config = BuildConfig.fetch(self.env, build.config, db=db) data['build']['config'] = { 'name': config.label or config.name, 'href': req.href.build(config.name) } context = Context.from_request(req, build.resource) data['context'] = context data['build']['attachments'] = AttachmentModule( self.env).attachment_data(context) formatters = [] for formatter in self.log_formatters: formatters.append(formatter.get_formatter(req, build)) summarizers = {} # keyed by report type for summarizer in self.report_summarizers: categories = summarizer.get_supported_categories() summarizers.update(dict([(cat, summarizer) for cat in categories])) data['build'].update(_get_build_data(self.env, req, build)) steps = [] for step in BuildStep.select(self.env, build=build.id, db=db): steps.append({ 'name': step.name, 'description': step.description, 'duration': pretty_timedelta(step.started, step.stopped), 'failed': step.status == BuildStep.FAILURE, 'errors': step.errors, 'log': self._render_log(req, build, formatters, step), 'reports': self._render_reports(req, config, build, summarizers, step) }) data['build']['steps'] = steps data['build']['can_delete'] = ('BUILD_DELETE' in req.perm \ and build.status != build.PENDING) repos = self.env.get_repository(req.authname) repos.authz.assert_permission(config.branch) chgset = repos.get_changeset(build.rev) data['build']['chgset_author'] = chgset.author add_script(req, 'common/js/folding.js') add_script(req, 'bitten/tabset.js') add_stylesheet(req, 'bitten/bitten.css') return 'bitten_build.html', data, None
def process_request(self, req): href = req.href user = to_unicode(req.authname) if not user or user == 'anonymous': raise WatchlistError( tag("Please ", tag.a("log in", href=href('login')), " to view or change your watchlist!")) args = req.args wldict = args.copy() action = args.get('action', 'view') redirectback = self.gredirectback ispattern = False # Disabled for now, not implemented fully # args.get('ispattern','0') onwatchlistpage = req.environ.get('HTTP_REFERER', '').find( href.watchlist()) != -1 if ispattern or onwatchlistpage: redirectback = False if action in ('watch', 'unwatch', 'notifyon', 'notifyoff'): try: realm = to_unicode(args['realm']) resid = to_unicode(args['resid']) except KeyError: raise WatchlistError( "Realm and ResId needed for watch/unwatch action!") if realm not in ('wiki', 'ticket'): raise WatchlistError( "Only wikis and tickets can be watched/unwatched!") is_watching = self.is_watching(realm, resid, user) realm_perm = realm.upper() + '_VIEW' in req.perm if ispattern: pattern = self._convert_pattern(resid) else: reslink = href(realm, resid) res_exists = self.res_exists(realm, resid, user) else: is_watching = None wlhref = href("watchlist") add_ctxtnav(req, "Watched Wikis", href=wlhref + '#wikis') add_ctxtnav(req, "Watched Tickets", href=wlhref + '#tickets') #add_ctxtnav(req, "Settings", href=wlhref + '#settings') wiki_perm = 'WIKI_VIEW' in req.perm ticket_perm = 'TICKET_VIEW' in req.perm wldict['wiki_perm'] = wiki_perm wldict['ticket_perm'] = ticket_perm wldict['error'] = False gnotify = self.gnotify wldict['notify'] = gnotify and self.gnotifycolumn if onwatchlistpage: wldict['show_messages'] = self.gmsgwowlpage else: wldict['show_messages'] = self.gmsgwlpage msgrespage = self.gmsgrespage # DB look-up db = self.env.get_db_cnx() cursor = db.cursor() if action == "watch": lst = (user, realm, resid) if realm_perm and not is_watching: col = realm == 'wiki' and 'name' or 'id' if ispattern: #cursor.log = self.env.log # Check if wiki/ticket exists: cursor.execute( "SELECT count(*) FROM %s WHERE %s LIKE %%s" % (realm, col), (pattern, )) #("'"+pattern+"'",) ) count = cursor.fetchone() if not count or not count[0]: raise WatchlistError( "Selected pattern %s:%s (%s) doesn't match anything!" % (realm, resid, pattern)) #cursor.execute( # "INSERT INTO watchlist (wluser, realm, resid) " # "SELECT '%s','%s',%s FROM %s WHERE %s LIKE %%s" % (user,realm,col, # realm,col), (resid,) ) cursor.execute( "INSERT INTO watchlist (wluser, realm, resid) " + "SELECT %s,%s,%s FROM %s WHERE %s LIKE %%s" % \ ("'"+user+"'","'"+realm+"'", col, realm, col), (pattern,) ) else: if not res_exists: wldict['error'] = True if redirectback and not onwatchlistpage: raise WatchlistError( "Selected resource %s:%s doesn't exists!" % (realm, resid)) redirectback = False else: cursor.execute( "INSERT INTO watchlist (wluser, realm, resid) " "VALUES (%s,%s,%s);", lst) db.commit() if not onwatchlistpage and redirectback and msgrespage: req.session['watchlist_message'] = ( 'This %s has been added to your watchlist.' % realm) if self.gnotify and self.gnotifybydefault: action = "notifyon" else: if redirectback: req.redirect(reslink) raise RequestDone action = "view" elif action == "unwatch": lst = (user, realm, resid) if realm_perm: if ispattern: #cursor.log = self.env.log is_watching = True cursor.execute( "DELETE FROM watchlist " "WHERE wluser=%s AND realm=%s AND resid LIKE %s", (user, realm, pattern)) db.commit() elif is_watching: cursor.execute( "DELETE FROM watchlist " "WHERE wluser=%s AND realm=%s AND resid=%s;", lst) db.commit() elif not res_exists: wldict['error'] = True if redirectback and not onwatchlistpage: raise WatchlistError( "Selected resource %s:%s doesn't exists!" % (realm, resid)) redirectback = False if not onwatchlistpage and redirectback and msgrespage: req.session['watchlist_message'] = ( 'This %s has been removed from your watchlist.' % realm) if self.gnotify and self.gnotifybydefault: action = "notifyoff" else: if redirectback: req.redirect(reslink) raise RequestDone action = "view" if action == "notifyon": if self.gnotify: self.set_notify(req.session.sid, True, realm, resid) db.commit() if redirectback: if msgrespage: req.session['watchlist_notify_message'] = ( 'You are now receiving ' 'change notifications about this resource.') req.redirect(reslink) raise RequestDone action = "view" elif action == "notifyoff": if self.gnotify: self.unset_notify(req.session.sid, True, realm, resid) db.commit() if redirectback: if msgrespage: req.session['watchlist_notify_message'] = ( 'You are no longer receiving ' 'change notifications about this resource.') req.redirect(reslink) raise RequestDone action = "view" if action == "settings": d = args.copy() del d['action'] self._save_user_settings(user, d) action = "view" wldict['user_settings'] = d else: wldict['user_settings'] = self._get_user_settings(user) wldict['is_watching'] = is_watching if action == "view": timeline = href('timeline', precision='seconds') + "&from=" def timeline_link(time): return timeline + quote_plus(format_datetime(time, 'iso8601')) wikilist = [] if wiki_perm: # Watched wikis which got deleted: cursor.execute( "SELECT resid FROM watchlist WHERE realm='wiki' AND wluser=%s " "AND resid NOT IN (SELECT DISTINCT name FROM wiki);", (user, )) for (name, ) in cursor.fetchall(): notify = False if gnotify: notify = self.is_notify(req.session.sid, True, 'wiki', name) wikilist.append({ 'name': name, 'author': '?', 'datetime': '?', 'comment': tag.strong("DELETED!", class_='deleted'), 'notify': notify, 'deleted': True, }) # Existing watched wikis: cursor.execute( "SELECT name,author,time,version,comment FROM wiki AS w1 WHERE name IN " "(SELECT resid FROM watchlist WHERE wluser=%s AND realm='wiki') " "AND version=(SELECT MAX(version) FROM wiki AS w2 WHERE w1.name=w2.name) " "ORDER BY time DESC;", (user, )) wikis = cursor.fetchall() for name, author, time, version, comment in wikis: notify = False if gnotify: notify = self.is_notify(req.session.sid, True, 'wiki', name) wikilist.append({ 'name': name, 'author': author, 'version': version, 'datetime': format_datetime(time), 'timedelta': pretty_timedelta(time), 'timeline_link': timeline_link(time), 'comment': comment, 'notify': notify, }) wldict['wikilist'] = wikilist if ticket_perm: ticketlist = [] cursor.execute( "SELECT id,type,time,changetime,summary,reporter FROM ticket WHERE id IN " "(SELECT CAST(resid AS decimal) FROM watchlist WHERE wluser=%s AND realm='ticket') " "GROUP BY id,type,time,changetime,summary,reporter " "ORDER BY changetime DESC;", (user, )) tickets = cursor.fetchall() for id, type, time, changetime, summary, reporter in tickets: self.commentnum = 0 self.comment = '' notify = False if gnotify: notify = self.is_notify(req.session.sid, True, 'ticket', id) cursor.execute( "SELECT author,field,oldvalue,newvalue FROM ticket_change " "WHERE ticket=%s and time=%s " "ORDER BY field DESC;", (id, changetime)) def format_change(field, oldvalue, newvalue): """Formats tickets changes.""" fieldtag = tag.strong(field) if field == 'cc': oldvalues = set(oldvalue and oldvalue.split(', ') or []) newvalues = set(newvalue and newvalue.split(', ') or []) added = newvalues.difference(oldvalues) removed = oldvalues.difference(newvalues) strng = fieldtag if added: strng += tag(" ", tag.em(', '.join(added)), " added") if removed: if added: strng += tag(', ') strng += tag(" ", tag.em(', '.join(removed)), " removed") return strng elif field == 'description': return fieldtag + tag( " modified (", tag.a("diff", href=href('ticket', id, action='diff', version=self.commentnum)), ")") elif field == 'comment': self.commentnum = oldvalue self.comment = newvalue return tag("") elif not oldvalue: return fieldtag + tag(" ", tag.em(newvalue), " added") elif not newvalue: return fieldtag + tag(" ", tag.em(oldvalue), " deleted") else: return fieldtag + tag(" changed from ", tag.em(oldvalue), " to ", tag.em(newvalue)) changes = [] author = reporter for author_, field, oldvalue, newvalue in cursor.fetchall( ): author = author_ changes.extend([ format_change(field, oldvalue, newvalue), tag("; ") ]) # changes holds list of formatted changes interleaved with # tag('; '). The first change is always the comment which # returns an empty tag, so we skip the first two elements # [tag(''), tag('; ')] and remove the last tag('; '): changes = changes and tag(changes[2:-1]) or tag() ticketlist.append({ 'id': to_unicode(id), 'type': type, 'author': author, 'commentnum': to_unicode(self.commentnum), 'comment': len(self.comment) <= 250 and self.comment or self.comment[:250] + '...', 'datetime': format_datetime(changetime), 'timedelta': pretty_timedelta(changetime), 'timeline_link': timeline_link(changetime), 'changes': changes, 'summary': summary, 'notify': notify, }) wldict['ticketlist'] = ticketlist return ("watchlist.html", wldict, "text/html") else: raise WatchlistError("Invalid watchlist action '%s'!" % action) raise WatchlistError("Watchlist: Unsupported request!")
def _render_file(self, req, repos, node, rev=None): req.perm.assert_permission('FILE_VIEW') changeset = repos.get_changeset(node.rev) req.hdf['file'] = { 'rev': node.rev, 'changeset_href': util.escape(self.env.href.changeset(node.rev)), 'date': time.strftime('%x %X', time.localtime(changeset.date)), 'age': util.pretty_timedelta(changeset.date), 'author': changeset.author or 'anonymous', 'message': wiki_to_html(changeset.message or '--', self.env, req, escape_newlines=True) } mime_type = node.content_type if not mime_type or mime_type == 'application/octet-stream': mime_type = get_mimetype(node.name) or mime_type or 'text/plain' # We don't have to guess if the charset is specified in the # svn:mime-type property ctpos = mime_type.find('charset=') if ctpos >= 0: charset = mime_type[ctpos + 8:] else: charset = None format = req.args.get('format') if format in ['raw', 'txt']: req.send_response(200) req.send_header('Content-Type', format == 'txt' and 'text/plain' or mime_type) req.send_header('Content-Length', node.content_length) req.send_header('Last-Modified', util.http_date(node.last_modified)) req.end_headers() content = node.get_content() while 1: chunk = content.read(CHUNK_SIZE) if not chunk: raise RequestDone req.write(chunk) else: # Generate HTML preview max_preview_size = int( self.config.get('mimeviewer', 'max_preview_size', '262144')) content = node.get_content().read(max_preview_size) max_size_reached = len(content) == max_preview_size if not charset: charset = detect_unicode(content) or \ self.config.get('trac', 'default_charset') if not is_binary(content): content = util.to_utf8(content, charset) if mime_type != 'text/plain': plain_href = self.env.href.browser(node.path, rev=rev and node.rev, format='txt') add_link(req, 'alternate', plain_href, 'Plain Text', 'text/plain') if max_size_reached: req.hdf['file.max_file_size_reached'] = 1 req.hdf['file.max_file_size'] = max_preview_size preview = ' ' else: preview = Mimeview(self.env).render(req, mime_type, content, node.name, node.rev, annotations=['lineno']) req.hdf['file.preview'] = preview raw_href = self.env.href.browser(node.path, rev=rev and node.rev, format='raw') req.hdf['file.raw_href'] = util.escape(raw_href) add_link(req, 'alternate', raw_href, 'Original Format', mime_type) add_stylesheet(req, 'common/css/code.css')
def _render_file(self, req, repos, node, rev=None): req.perm.assert_permission('FILE_VIEW') changeset = repos.get_changeset(node.rev) req.hdf['file'] = { 'rev': node.rev, 'changeset_href': util.escape(self.env.href.changeset(node.rev)), 'date': util.format_datetime(changeset.date), 'age': util.pretty_timedelta(changeset.date), 'author': changeset.author or 'anonymous', 'message': wiki_to_html(changeset.message or '--', self.env, req, escape_newlines=True) } mime_type = node.content_type if not mime_type or mime_type == 'application/octet-stream': mime_type = get_mimetype(node.name) or mime_type or 'text/plain' # We don't have to guess if the charset is specified in the # svn:mime-type property ctpos = mime_type.find('charset=') if ctpos >= 0: charset = mime_type[ctpos + 8:] else: charset = None format = req.args.get('format') if format in ['raw', 'txt']: req.send_response(200) req.send_header('Content-Type', format == 'txt' and 'text/plain' or mime_type) req.send_header('Content-Length', node.content_length) req.send_header('Last-Modified', util.http_date(node.last_modified)) req.end_headers() content = node.get_content() while 1: chunk = content.read(CHUNK_SIZE) if not chunk: raise RequestDone req.write(chunk) else: # Generate HTML preview mimeview = Mimeview(self.env) content = node.get_content().read(mimeview.max_preview_size()) if not is_binary(content): if mime_type != 'text/plain': plain_href = self.env.href.peerReviewBrowser(node.path, rev=rev and node.rev, format='txt') add_link(req, 'alternate', plain_href, 'Plain Text', 'text/plain') req.hdf['file'] = mimeview.preview_to_hdf(req, mime_type, charset, content, node.name, node.rev, annotations=['addFileNums']) raw_href = self.env.href.peerReviewBrowser(node.path, rev=rev and node.rev, format='raw') req.hdf['file.raw_href'] = util.escape(raw_href) add_link(req, 'alternate', raw_href, 'Original Format', mime_type) add_stylesheet(req, 'common/css/code.css')
def get_ticket_markup(self, who, since): timedelta = pretty_timedelta(since, None); return '<li>%s has been working on this ticket for %s</li>' % (who, timedelta)
def process_request(self, req): req.perm.require('BUILD_VIEW') db = self.env.get_db_cnx() build_id = int(req.args.get('id')) build = Build.fetch(self.env, build_id, db=db) if not build: raise HTTPNotFound("Build '%s' does not exist." \ % build_id) if req.method == 'POST': if req.args.get('action') == 'invalidate': self._do_invalidate(req, build, db) req.redirect(req.href.build(build.config, build.id)) add_link(req, 'up', req.href.build(build.config), 'Build Configuration') data = {'title': 'Build %s - %s' % (build_id, _status_title[build.status]), 'page_mode': 'view_build', 'build': {}} config = BuildConfig.fetch(self.env, build.config, db=db) data['build']['config'] = { 'name': config.label or config.name, 'href': req.href.build(config.name) } context = Context.from_request(req, build.resource) data['context'] = context data['build']['attachments'] = AttachmentModule(self.env).attachment_data(context) formatters = [] for formatter in self.log_formatters: formatters.append(formatter.get_formatter(req, build)) summarizers = {} # keyed by report type for summarizer in self.report_summarizers: categories = summarizer.get_supported_categories() summarizers.update(dict([(cat, summarizer) for cat in categories])) data['build'].update(_get_build_data(self.env, req, build)) steps = [] for step in BuildStep.select(self.env, build=build.id, db=db): steps.append({ 'name': step.name, 'description': step.description, 'duration': pretty_timedelta(step.started, step.stopped or int(time.time())), 'status': _step_status_label[step.status], 'cls': _step_status_label[step.status].replace(' ', '-'), 'errors': step.errors, 'log': self._render_log(req, build, formatters, step), 'reports': self._render_reports(req, config, build, summarizers, step) }) data['build']['steps'] = steps data['build']['can_delete'] = ('BUILD_DELETE' in req.perm \ and build.status != build.PENDING) repos = self.env.get_repository(authname=req.authname) assert repos, 'No "(default)" Repository: Add a repository or alias ' \ 'named "(default)" to Trac.' _has_permission(req.perm, repos, config.path, rev=build.rev, raise_error=True) chgset = repos.get_changeset(build.rev) data['build']['chgset_author'] = chgset.author data['build']['display_rev'] = repos.normalize_rev(build.rev) add_script(req, 'common/js/folding.js') add_script(req, 'bitten/tabset.js') add_script(req, 'bitten/jquery.flot.js') add_stylesheet(req, 'bitten/bitten.css') return 'bitten_build.html', data, None
def _render_file(self, req, context, repos, node, rev=None): req.perm(context.resource).require('FILE_VIEW') changeset = repos.get_changeset(node.rev) mime_type = node.content_type if not mime_type or mime_type == 'application/octet-stream': mime_type = get_mimetype(node.name) or mime_type or 'text/plain' # We don't have to guess if the charset is specified in the # svn:mime-type property ctpos = mime_type.find('charset=') if ctpos >= 0: charset = mime_type[ctpos + 8:] else: charset = None content = node.get_content() chunk = content.read(CHUNK_SIZE) format = req.args.get('format') if format in ('raw', 'txt'): req.send_response(200) req.send_header('Content-Type', format == 'txt' and 'text/plain' or mime_type) req.send_header('Content-Length', node.content_length) req.send_header('Last-Modified', util.http_date(node.last_modified)) req.end_headers() while 1: if not chunk: raise RequestDone req.write(chunk) chunk = content.read(CHUNK_SIZE) else: # Generate HTML preview mimeview = Mimeview(self.env) # The changeset corresponding to the last change on `node` # is more interesting than the `rev` changeset. changeset = repos.get_changeset(node.rev) # add ''Plain Text'' alternate link if needed if not is_binary(chunk) and mime_type != 'text/plain': plain_href = req.href.browser(node.path, rev=rev, format='txt') add_link(req, 'alternate', plain_href, 'Plain Text', 'text/plain') add_stylesheet(req, 'common/css/code.css') raw_href = self.env.href.peerReviewBrowser(node.path, rev=rev and node.rev, format='raw') preview_data = mimeview.preview_data(context, node.get_content(), node.get_content_length(), mime_type, node.created_path, raw_href, annotations=['lineno']) add_link(req, 'alternate', raw_href, 'Original Format', mime_type) return { 'changeset': changeset, 'size': node.content_length, 'preview': preview_data['rendered'], 'annotate': False, 'rev': node.rev, 'changeset_href': util.escape(self.env.href.changeset(node.rev)), 'date': util.format_datetime(changeset.date), 'age': util.pretty_timedelta(changeset.date), 'author': changeset.author or 'anonymous', 'message': wiki_to_html(changeset.message or '--', self.env, req, escape_newlines=True) }
def get_timeline_events(self, req, start, stop, filters): if 'build' not in filters or not req.perm.has_permission('BUILD_VIEW'): return # Support both Trac 0.10 and 0.11 if isinstance(start, datetime): # Trac>=0.11 from trac.util.datefmt import to_timestamp start = to_timestamp(start) stop = to_timestamp(stop) add_stylesheet(req, 'HudsonTrac/hudsontrac.css') # get and parse the build-info info, cset = self.__get_info() # extract all build entries for entry in self.__extract_builds(info): # get result, optionally ignoring builds that are still running if entry['building']: if self.disp_building: result = 'IN-PROGRESS' else: continue else: result = entry['result'] # get start/stop times started = entry['timestamp'] / 1000 if started < start or started > stop: continue if result == 'IN-PROGRESS': # we hope the clocks are close... completed = time.time() else: completed = (entry['timestamp'] + entry['duration']) / 1000 # get message message, kind = { 'SUCCESS': ('Build finished successfully', ('build-successful', 'build-successful-alt')[self.alt_succ]), 'UNSTABLE': ('Build unstable', 'build-unstable'), 'ABORTED': ('Build aborted', 'build-aborted'), 'IN-PROGRESS': ('Build in progress', ('build-inprogress', 'build-inprogress-alt')[self.alt_succ]), }.get(result, ('Build failed', 'build-failed')) if self.use_desc: message = entry['description'] and \ unicode(entry['description'], cset) or message # get changesets changesets = '' if self.list_changesets: paths = ['changeSet.items.revision', 'changeSet.items.id'] revs = [unicode(str(r), cset) for r in \ self.__find_all(entry, paths)] if revs: revs = [self.__fmt_changeset(r, req) for r in revs] changesets = '<br/>Changesets: ' + ', '.join(revs) # get author(s) author = None for c in self.disp_culprit: author = { 'starter': self.__find_first(entry, 'actions.causes.userName'), 'author': self.__find_first(entry, [ 'changeSet.items.user', 'changeSet.items.author.fullName' ]), 'authors': self.__find_all(entry, [ 'changeSet.items.user', 'changeSet.items.author.fullName' ]), 'culprit': self.__find_first(entry, 'culprits.fullName'), 'culprits': self.__find_all(entry, 'culprits.fullName'), }.get(c) if author and not isinstance(author, basestring): author = ', '.join(set(author)) if author: author = unicode(author, cset) break # format response if result == 'IN-PROGRESS': comment = Markup( "%s since %s, duration %s%s" % (message, format_datetime(started), pretty_timedelta(started, completed), changesets)) else: comment = Markup( "%s at %s, duration %s%s" % (message, format_datetime(completed), pretty_timedelta(started, completed), changesets)) href = entry['url'] title = 'Build "%s" (%s)' % \ (unicode(entry['fullDisplayName'], cset), result.lower()) yield kind, href, title, completed, author, comment
def _render_diff(self, req, db, page): req.perm.assert_permission('WIKI_VIEW') if not page.exists: raise TracError, "Version %s of page %s does not exist" \ % (req.args.get('version'), page.name) add_stylesheet(req, 'common/css/diff.css') req.hdf['title'] = escape(page.name) + ' (diff)' # Ask web spiders to not index old versions req.hdf['html.norobots'] = 1 old_version = req.args.get('old_version') if old_version: old_version = int(old_version) if old_version == page.version: old_version = None elif old_version > page.version: old_version, page = page.version, \ WikiPage(self.env, page.name, old_version) info = { 'version': page.version, 'history_href': escape(self.env.href.wiki(page.name, action='history')) } num_changes = 0 old_page = None for version, t, author, comment, ipnr in page.get_history(): if version == page.version: if t: info['time'] = format_datetime(t) info['time_delta'] = pretty_timedelta(t) info['author'] = escape(author or 'anonymous') info['comment'] = escape(comment or '--') info['ipnr'] = escape(ipnr or '') else: num_changes += 1 if version < page.version: if (old_version and version == old_version) or \ not old_version: old_page = WikiPage(self.env, page.name, version) info['num_changes'] = num_changes info['old_version'] = version break req.hdf['wiki'] = info diff_style, diff_options = get_diff_options(req) oldtext = old_page and old_page.text.splitlines() or [] newtext = page.text.splitlines() context = 3 for option in diff_options: if option.startswith('-U'): context = int(option[2:]) break if context < 0: context = None changes = hdf_diff(oldtext, newtext, context=context, ignore_blank_lines='-B' in diff_options, ignore_case='-i' in diff_options, ignore_space_changes='-b' in diff_options) req.hdf['wiki.diff'] = changes
def process_request(self, req): href = req.href user = to_unicode(req.authname) if not user or user == "anonymous": raise WatchlistError( tag("Please ", tag.a("log in", href=href("login")), " to view or change your watchlist!") ) args = req.args wldict = args.copy() action = args.get("action", "view") redirectback = self.gredirectback ispattern = False # Disabled for now, not implemented fully # args.get('ispattern','0') onwatchlistpage = req.environ.get("HTTP_REFERER", "").find(href.watchlist()) != -1 if ispattern or onwatchlistpage: redirectback = False if action in ("watch", "unwatch", "notifyon", "notifyoff"): try: realm = to_unicode(args["realm"]) resid = to_unicode(args["resid"]) except KeyError: raise WatchlistError("Realm and ResId needed for watch/unwatch action!") if realm not in ("wiki", "ticket"): raise WatchlistError("Only wikis and tickets can be watched/unwatched!") is_watching = self.is_watching(realm, resid, user) realm_perm = realm.upper() + "_VIEW" in req.perm if ispattern: pattern = self._convert_pattern(resid) else: reslink = href(realm, resid) res_exists = self.res_exists(realm, resid, user) else: is_watching = None wlhref = href("watchlist") add_ctxtnav(req, "Watched Wikis", href=wlhref + "#wikis") add_ctxtnav(req, "Watched Tickets", href=wlhref + "#tickets") # add_ctxtnav(req, "Settings", href=wlhref + '#settings') wiki_perm = "WIKI_VIEW" in req.perm ticket_perm = "TICKET_VIEW" in req.perm wldict["wiki_perm"] = wiki_perm wldict["ticket_perm"] = ticket_perm wldict["error"] = False gnotify = self.gnotify wldict["notify"] = gnotify and self.gnotifycolumn if onwatchlistpage: wldict["show_messages"] = self.gmsgwowlpage else: wldict["show_messages"] = self.gmsgwlpage msgrespage = self.gmsgrespage # DB look-up db = self.env.get_db_cnx() cursor = db.cursor() if action == "watch": lst = (user, realm, resid) if realm_perm and not is_watching: col = realm == "wiki" and "name" or "id" if ispattern: # cursor.log = self.env.log # Check if wiki/ticket exists: cursor.execute("SELECT count(*) FROM %s WHERE %s LIKE %%s" % (realm, col), (pattern,)) # ("'"+pattern+"'",) ) count = cursor.fetchone() if not count or not count[0]: raise WatchlistError( "Selected pattern %s:%s (%s) doesn't match anything!" % (realm, resid, pattern) ) # cursor.execute( # "INSERT INTO watchlist (wluser, realm, resid) " # "SELECT '%s','%s',%s FROM %s WHERE %s LIKE %%s" % (user,realm,col, # realm,col), (resid,) ) cursor.execute( "INSERT INTO watchlist (wluser, realm, resid) " + "SELECT %s,%s,%s FROM %s WHERE %s LIKE %%s" % ("'" + user + "'", "'" + realm + "'", col, realm, col), (pattern,), ) else: if not res_exists: wldict["error"] = True if redirectback and not onwatchlistpage: raise WatchlistError("Selected resource %s:%s doesn't exists!" % (realm, resid)) redirectback = False else: cursor.execute("INSERT INTO watchlist (wluser, realm, resid) " "VALUES (%s,%s,%s);", lst) db.commit() if not onwatchlistpage and redirectback and msgrespage: req.session["watchlist_message"] = "This %s has been added to your watchlist." % realm if self.gnotify and self.gnotifybydefault: action = "notifyon" else: if redirectback: req.redirect(reslink) raise RequestDone action = "view" elif action == "unwatch": lst = (user, realm, resid) if realm_perm: if ispattern: # cursor.log = self.env.log is_watching = True cursor.execute( "DELETE FROM watchlist " "WHERE wluser=%s AND realm=%s AND resid LIKE %s", (user, realm, pattern), ) db.commit() elif is_watching: cursor.execute("DELETE FROM watchlist " "WHERE wluser=%s AND realm=%s AND resid=%s;", lst) db.commit() elif not res_exists: wldict["error"] = True if redirectback and not onwatchlistpage: raise WatchlistError("Selected resource %s:%s doesn't exists!" % (realm, resid)) redirectback = False if not onwatchlistpage and redirectback and msgrespage: req.session["watchlist_message"] = "This %s has been removed from your watchlist." % realm if self.gnotify and self.gnotifybydefault: action = "notifyoff" else: if redirectback: req.redirect(reslink) raise RequestDone action = "view" if action == "notifyon": if self.gnotify: self.set_notify(req.session.sid, True, realm, resid) db.commit() if redirectback: if msgrespage: req.session["watchlist_notify_message"] = ( "You are now receiving " "change notifications about this resource." ) req.redirect(reslink) raise RequestDone action = "view" elif action == "notifyoff": if self.gnotify: self.unset_notify(req.session.sid, True, realm, resid) db.commit() if redirectback: if msgrespage: req.session["watchlist_notify_message"] = ( "You are no longer receiving " "change notifications about this resource." ) req.redirect(reslink) raise RequestDone action = "view" if action == "settings": d = args.copy() del d["action"] self._save_user_settings(user, d) action = "view" wldict["user_settings"] = d else: wldict["user_settings"] = self._get_user_settings(user) wldict["is_watching"] = is_watching if action == "view": timeline = href("timeline", precision="seconds") + "&from=" def timeline_link(time): return timeline + quote_plus(format_datetime(time, "iso8601")) wikilist = [] if wiki_perm: # Watched wikis which got deleted: cursor.execute( "SELECT resid FROM watchlist WHERE realm='wiki' AND wluser=%s " "AND resid NOT IN (SELECT DISTINCT name FROM wiki);", (user,), ) for (name,) in cursor.fetchall(): notify = False if gnotify: notify = self.is_notify(req.session.sid, True, "wiki", name) wikilist.append( { "name": name, "author": "?", "datetime": "?", "comment": tag.strong("DELETED!", class_="deleted"), "notify": notify, "deleted": True, } ) # Existing watched wikis: cursor.execute( "SELECT name,author,time,version,comment FROM wiki AS w1 WHERE name IN " "(SELECT resid FROM watchlist WHERE wluser=%s AND realm='wiki') " "AND version=(SELECT MAX(version) FROM wiki AS w2 WHERE w1.name=w2.name) " "ORDER BY time DESC;", (user,), ) wikis = cursor.fetchall() for name, author, time, version, comment in wikis: notify = False if gnotify: notify = self.is_notify(req.session.sid, True, "wiki", name) wikilist.append( { "name": name, "author": author, "version": version, "datetime": format_datetime(from_utimestamp(time), "%F %T %Z"), "timedelta": pretty_timedelta(from_utimestamp(time)), "timeline_link": timeline_link(from_utimestamp(time)), "comment": comment, "notify": notify, } ) wldict["wikilist"] = wikilist if ticket_perm: ticketlist = [] cursor.execute( "SELECT id,type,time,changetime,summary,reporter FROM ticket WHERE id IN " "(SELECT CAST(resid AS decimal) FROM watchlist WHERE wluser=%s AND realm='ticket') " "GROUP BY id,type,time,changetime,summary,reporter " "ORDER BY changetime DESC;", (user,), ) tickets = cursor.fetchall() for id, type, time, changetime, summary, reporter in tickets: self.commentnum = 0 self.comment = "" notify = False if gnotify: notify = self.is_notify(req.session.sid, True, "ticket", id) cursor.execute( "SELECT author,field,oldvalue,newvalue FROM ticket_change " "WHERE ticket=%s and time=%s " "ORDER BY field DESC;", (id, changetime), ) def format_change(field, oldvalue, newvalue): """Formats tickets changes.""" fieldtag = tag.strong(field) if field == "cc": oldvalues = set(oldvalue and oldvalue.split(", ") or []) newvalues = set(newvalue and newvalue.split(", ") or []) added = newvalues.difference(oldvalues) removed = oldvalues.difference(newvalues) strng = fieldtag if added: strng += tag(" ", tag.em(", ".join(added)), " added") if removed: if added: strng += tag(", ") strng += tag(" ", tag.em(", ".join(removed)), " removed") return strng elif field == "description": return fieldtag + tag( " modified (", tag.a("diff", href=href("ticket", id, action="diff", version=self.commentnum)), ")", ) elif field == "comment": self.commentnum = oldvalue self.comment = newvalue return tag("") elif not oldvalue: return fieldtag + tag(" ", tag.em(newvalue), " added") elif not newvalue: return fieldtag + tag(" ", tag.em(oldvalue), " deleted") else: return fieldtag + tag(" changed from ", tag.em(oldvalue), " to ", tag.em(newvalue)) changes = [] author = reporter for author_, field, oldvalue, newvalue in cursor.fetchall(): author = author_ changes.extend([format_change(field, oldvalue, newvalue), tag("; ")]) # changes holds list of formatted changes interleaved with # tag('; '). The first change is always the comment which # returns an empty tag, so we skip the first two elements # [tag(''), tag('; ')] and remove the last tag('; '): changes = changes and tag(changes[2:-1]) or tag() ticketlist.append( { "id": to_unicode(id), "type": type, "author": author, "commentnum": to_unicode(self.commentnum), "comment": len(self.comment) <= 250 and self.comment or self.comment[:250] + "...", "datetime": format_datetime(from_utimestamp(changetime), "%F %T %Z"), "timedelta": pretty_timedelta(from_utimestamp(changetime)), "timeline_link": timeline_link(from_utimestamp(changetime)), "changes": changes, "summary": summary, "notify": notify, } ) wldict["ticketlist"] = ticketlist return ("watchlist.html", wldict, "text/html") else: raise WatchlistError("Invalid watchlist action '%s'!" % action) raise WatchlistError("Watchlist: Unsupported request!")
def _do_actions(self, req, actions): # Initialize dictionary for data. data = {} # Get database access. db = self.env.get_db_cnx() cursor = db.cursor() # Get API component. api = self.env[ScreenshotsApi] for action in actions: if action == 'get-file': req.perm.assert_permission('SCREENSHOTS_VIEW') # Get request arguments. screenshot_id = int(req.args.get('id') or 0) format = req.args.get('format') or self.default_format width = int(req.args.get('width') or 0) height = int(req.args.get('height') or 0) # Check if requested format is allowed. if not format in self.formats: raise TracError('Requested screenshot format that is not allowed.', 'Requested format not allowed.') # Get screenshot. screenshot = api.get_screenshot(cursor, screenshot_id) if screenshot: # Set missing dimensions. width = width or screenshot['width'] height = height or screenshot['height'] if format == 'html': # Format screenshot attributes. screenshot['time'] = pretty_timedelta(screenshot['time']) # Prepare data dictionary. data['screenshot'] = screenshot # Return screenshot template and data. return ('screenshot.cs', data, None) else: # Prepare screenshot filename. name, ext = os.path.splitext(screenshot['file']) format = (format == 'raw') and ext or '.' + format path = os.path.join(self.path, unicode(screenshot['id'])) filename = os.path.join(path, '%s-%sx%s%s' % (name, width, height, format)) orig_name = os.path.join(path, '%s-%sx%s%s' % (name, screenshot['width'], screenshot['height'], ext)) self.log.debug('filemame: %s' % (filename,)) # Send file to request. if not os.path.exists(filename): self._create_image(orig_name, path, name, format, width, height) req.send_header('Content-Disposition', 'attachment;filename=%s' % (os.path.basename(filename))) req.send_header('Content-Description', screenshot['description']) req.send_file(filename, mimetypes.guess_type(filename)[0]) else: raise TracError('Screenshot not found.') elif action == 'add': req.perm.assert_permission('SCREENSHOTS_ADMIN') # Fill data dictionary. data['index'] = req.args.get('index') data['versions'] = api.get_versions(cursor) data['components'] = api.get_components(cursor) # Return template with add screenshot form. return ('screenshot-add.cs', data, None) elif action == 'post-add': req.perm.assert_permission('SCREENSHOTS_ADMIN') # Get image file from request. file, filename = self._get_file_from_req(req) name, ext = os.path.splitext(filename) filename = name + ext.lower() # Check correct file type. reg = re.compile(r'^(.*)[.](.*)$') result = reg.match(filename) if result: if not result.group(2).lower() in self.ext: raise TracError('Unsupported uploaded file type.') else: raise TracError('Unsupported uploaded file type.') # Create image object. image = Image.open(file) # Construct screenshot dictionary from form values. screenshot = {'name' : req.args.get('name'), 'description' : req.args.get('description'), 'time' : int(time.time()), 'author' : req.authname, 'tags' : req.args.get('tags'), 'file' : filename, 'width' : image.size[0], 'height' : image.size[1]} # Add new screenshot. api.add_screenshot(cursor, screenshot) # Get inserted screenshot to with new id. screenshot = api.get_screenshot_by_time(cursor, screenshot['time']) # Add components to screenshot. components = req.args.get('components') or [] if not isinstance(components, list): components = [components] for component in components: component = {'screenshot' : screenshot['id'], 'component' : component} api.add_component(cursor, component) screenshot['components'] = components # Add versions to screenshots versions = req.args.get('versions') or [] if not isinstance(versions, list): versions = [versions] for version in versions: version = {'screenshot' : screenshot['id'], 'version' : version} api.add_version(cursor, version) screenshot['versions'] = versions self.log.debug(screenshot) # Prepare file paths path = os.path.join(self.path, unicode(screenshot['id'])) filepath = os.path.join(path, '%s-%ix%i.%s' % (result.group(1), screenshot['width'], screenshot['height'], result.group(2))) path = os.path.normpath(path) filepath = os.path.normpath(filepath) self.log.debug('path: %s' % (path,)) self.log.debug('filepath: %s' % (filepath,)) # Store uploaded image. try: os.mkdir(path) out_file = open(filepath, 'wb+') file.seek(0) shutil.copyfileobj(file, out_file) out_file.close() except Exception, error: api.delete_screenshot(cursor, screenshot['id']) try: self.log.debug(error) except: pass try: os.remove(filename) except: pass try: os.rmdir(path) except: pass raise TracError('Error storing file. Is directory' \ ' specified in path config option in [screenshots]' \ ' section of trac.ini existing? Original message was: %s' \ % (error,)) # Notify change listeners. for listener in self.change_listeners: listener.screenshot_created(screenshot) # Clear id to prevent display of edit and delete button. req.args['id'] = None elif action == 'edit': req.perm.assert_permission('SCREENSHOTS_ADMIN') # Get request arguments. screenshot_id = req.args.get('id') # Prepare data dictionary. data['screenshot'] = api.get_screenshot(cursor, screenshot_id) self.log.debug(data['screenshot'])
def _insert_ticket_data(self, req, db, ticket, reporter_id): """Insert ticket data into the hdf""" req.hdf['ticket'] = dict(zip(ticket.values.keys(), map(lambda x: util.escape(x), ticket.values.values()))) req.hdf['ticket.id'] = ticket.id req.hdf['ticket.href'] = self.env.href.ticket(ticket.id) for field in TicketSystem(self.env).get_ticket_fields(): if field['type'] in ('radio', 'select'): value = ticket.values.get(field['name']) options = field['options'] if value and not value in options: # Current ticket value must be visible even if its not in the # possible values options.append(value) field['options'] = [util.escape(option) for option in options] name = field['name'] del field['name'] if name in ('summary', 'reporter', 'description', 'type', 'status', 'resolution', 'owner'): field['skip'] = True req.hdf['ticket.fields.' + name] = field req.hdf['ticket.reporter_id'] = util.escape(reporter_id) req.hdf['title'] = '#%d (%s)' % (ticket.id, util.escape(ticket['summary'])) req.hdf['ticket.description.formatted'] = wiki_to_html(ticket['description'], self.env, req, db) req.hdf['ticket.opened'] = util.format_datetime(ticket.time_created) req.hdf['ticket.opened_delta'] = util.pretty_timedelta(ticket.time_created) if ticket.time_changed != ticket.time_created: req.hdf['ticket.lastmod'] = util.format_datetime(ticket.time_changed) req.hdf['ticket.lastmod_delta'] = util.pretty_timedelta(ticket.time_changed) changelog = ticket.get_changelog(db=db) curr_author = None curr_date = 0 changes = [] for date, author, field, old, new in changelog: if date != curr_date or author != curr_author: changes.append({ 'date': util.format_datetime(date), 'author': util.escape(author), 'fields': {} }) curr_date = date curr_author = author if field == 'comment': changes[-1]['comment'] = wiki_to_html(new, self.env, req, db) elif field == 'description': changes[-1]['fields'][field] = '' else: changes[-1]['fields'][field] = {'old': util.escape(old), 'new': util.escape(new)} req.hdf['ticket.changes'] = changes # List attached files for idx, attachment in util.enum(Attachment.select(self.env, 'ticket', ticket.id)): hdf = attachment_to_hdf(self.env, db, req, attachment) req.hdf['ticket.attachments.%s' % idx] = hdf if req.perm.has_permission('TICKET_APPEND'): req.hdf['ticket.attach_href'] = self.env.href.attachment('ticket', ticket.id) # Add the possible actions to hdf actions = TicketSystem(self.env).get_available_actions(ticket, req.perm) for action in actions: req.hdf['ticket.actions.' + action] = '1'