def _prepare_results(self, result_docs, hits): ui_docs = [self._process_doc(doc) for doc in result_docs] results = Paginator( ui_docs, self.page - 1, self.pagelen, hits) self._prepare_shown_pages(results) results.current_page = {'href': None, 'class': 'current', 'string': str(results.page + 1), 'title': None} parameters = self.parameters if results.has_next_page: next_href = parameters.create_href(page=parameters.page + 1) add_link(self.req, 'next', next_href, _('Next Page')) if results.has_previous_page: prev_href = parameters.create_href(page=parameters.page - 1) add_link(self.req, 'prev', prev_href, _('Previous Page')) self.data[self.DATA_RESULTS] = results prevnext_nav(self.req, _('Previous'), _('Next'))
def get_paginator(self): def href_with_page(page): args = copy.copy(self.req.args) args['page'] = page return self.req.href(self.href, args) comment_count = Comments(self.req, self.env).count(self.args) paginator = Paginator(self.data['comments'], self.page - 1, self.per_page, comment_count) if paginator.has_next_page: add_link(self.req, 'next', href_with_page(self.page + 1), 'Next Page') if paginator.has_previous_page: add_link(self.req, 'prev', href_with_page(self.page - 1), 'Previous Page') shown_pages = paginator.get_shown_pages(page_index_count=11) links = [{ 'href': href_with_page(page), 'class': None, 'string': str(page), 'title': 'Page %d' % page } for page in shown_pages] paginator.shown_pages = links paginator.current_page = { 'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title': None } return paginator
def process_request(self, req): milestone_id = req.args.get('id') req.perm('milestone', milestone_id).require('MILESTONE_VIEW') add_link(req, 'up', req.href.roadmap(), _('Roadmap')) db = self.env.get_db_cnx() # TODO: db can be removed milestone = Milestone(self.env, milestone_id, db) action = req.args.get('action', 'view') if req.method == 'POST': if req.args.has_key('cancel'): if milestone.exists: req.redirect(req.href.milestone(milestone.name)) else: req.redirect(req.href.roadmap()) elif action == 'edit': return self._do_save(req, db, milestone) elif action == 'delete': self._do_delete(req, db, milestone) elif action in ('new', 'edit'): return self._render_editor(req, db, milestone) elif action == 'delete': return self._render_confirm(req, db, milestone) if not milestone.name: req.redirect(req.href.roadmap()) return self._render_view(req, db, milestone)
def _paginate(self, req, results): self.query = req.args.get('q', None) current_page = as_int(req.args.get('listtagged_page'), 1) items_per_page = as_int(req.args.get('listtagged_per_page'), None) if items_per_page is None: items_per_page = self.items_per_page result = Paginator(results, current_page - 1, items_per_page) pagedata = [] shown_pages = result.get_shown_pages(21) for page in shown_pages: page_href = self.get_href(req, items_per_page, page) pagedata.append([page_href, None, str(page), _("Page %(num)d", num=page)]) attributes = ['href', 'class', 'string', 'title'] result.shown_pages = [dict(zip(attributes, p)) for p in pagedata] result.current_page = {'href': None, 'class': 'current', 'string': str(result.page + 1), 'title': None} if result.has_next_page: next_href = self.get_href(req, items_per_page, current_page + 1) add_link(req, 'next', next_href, _('Next Page')) if result.has_previous_page: prev_href = self.get_href(req, items_per_page, current_page - 1) add_link(req, 'prev', prev_href, _('Previous Page')) return result
def post_process_request(self, req, template, data, content_type): get = self.env.config.get for link in self.links: rel = get(self.section, link + '.rel' ) href = get(self.section, link + '.href' ) title = get(self.section, link + '.title' ) mimetype = get(self.section, link + '.type' ) classname = get(self.section, link + '.class' ) if rel and href: add_link(req, rel, href, title or None, mimetype or None, classname or None) for stylesheet in self.stylesheets: filename = get(self.section, stylesheet + '.filename', unicode(self.default_style_base) + stylesheet + '.css' ) mimetype = get(self.section, stylesheet + '.mimetype', 'text/css') if filename: add_stylesheet(req, filename, mimetype) for script in self.scripts: filename = get(self.section, script + '.filename', unicode(self.default_script_base) + script + '.js' ) mimetype = get(self.section, script + '.mimetype', 'text/javascript') if filename: add_script(req, filename, mimetype) return (template, data, content_type)
def post_process_request(self, req, template, data, content_type): # Add Google Map API key using a link tag: if self.api_key: add_link (req, rel='google-key', href='', title=self.api_key, classname='google-key') add_stylesheet (req, 'googlemap/tracgooglemap.css') add_script (req, 'googlemap/tracgooglemap.js') return (template, data, content_type)
def process_request(self, req): milestone_id = req.args.get('id') req.perm.assert_permission('MILESTONE_VIEW') add_link(req, 'up', req.href.roadmap(), 'Roadmap') db = self.env.get_db_cnx() milestone = Milestone(self.env, milestone_id, db) action = req.args.get('action', 'view') if req.method == 'POST': if req.args.has_key('cancel'): if milestone.exists: req.redirect(req.href.milestone(milestone.name)) else: req.redirect(req.href.roadmap()) elif action == 'edit': self._do_save(req, db, milestone) elif action == 'delete': self._do_delete(req, db, milestone) elif action in ('new', 'edit'): self._render_editor(req, db, milestone) elif action == 'delete': self._render_confirm(req, db, milestone) else: self._render_view(req, db, milestone) if not milestone_id and action != 'new': req.redirect(req.href.roadmap()) add_stylesheet(req, 'common/css/roadmap.css') return 'milestone.cs', None
def process_request(self, req): """Process the request.""" id = int(req.args.get('id')) req.perm('ticket', id).require('TICKET_VIEW') if 'TICKET_REMINDER_MODIFY' not in req.perm and \ 'TICKET_ADMIN' not in req.perm: raise PermissionError('TICKET_REMINDER_MODIFY', req.perm._resource, self.env) ticket = Ticket(self.env, id) if 'cancel' in req.args: req.redirect(get_resource_url(self.env, ticket.resource, req.href)) ticket_name = get_resource_name(self.env, ticket.resource) ticket_url = get_resource_url(self.env, ticket.resource, req.href) add_link(req, 'up', ticket_url, ticket_name) add_ctxtnav(req, _('Back to %(ticket)s', ticket=ticket_name), ticket_url) add_stylesheet(req, 'ticketreminder/css/ticketreminder.css') if req.args['action'] == "addreminder": return self._process_add(req, ticket) elif req.args['action'] == "deletereminder": return self._process_delete(req, ticket) else: raise ValueError('Unknown action "%s"' % (req.args['action'],))
def _prepare_attrs(self, req, attr): page = int(req.args.get('page', '1')) # Paginator can't deal with dict, so convert to list. attr_lst = [(k,v) for k,v in attr.iteritems()] max_per_page = as_int(req.args.get('max_per_page'), None) if max_per_page is None: max_per_page = self.ACCTS_PER_PAGE attr = Paginator(attr_lst, page - 1, max_per_page) pagedata = [] shown_pages = attr.get_shown_pages(21) for shown_page in shown_pages: page_href = req.href.admin('accounts', 'users', page=shown_page, max_per_page=max_per_page) pagedata.append([page_href, None, str(shown_page), _("page %(num)s", num=str(shown_page))]) fields = ['href', 'class', 'string', 'title'] attr.shown_pages = [dict(zip(fields, p)) for p in pagedata] attr.current_page = {'href': None, 'class': 'current', 'string': str(attr.page + 1), 'title':None} if attr.has_next_page: next_href = req.href.admin('accounts', 'users', page=page + 1, max_per_page=max_per_page) add_link(req, 'next', next_href, _('Next Page')) if attr.has_previous_page: prev_href = req.href.admin('accounts', 'users', page=page - 1, max_per_page=max_per_page) add_link(req, 'prev', prev_href, _('Previous Page')) page_href = req.href.admin('accounts', 'cleanup') return {'attr': attr, 'page_href': page_href}
def pre_process_request(self, req, handler): """ Pre-process the request by adding 'Zip Archive' link into alternative format links The link is constructed from first and latest revision number, taken from the default repository. :param Request req: Trac request :param object handler: existing handler :returns: Handler, modified or not """ # Add link only in /browser or /browser/?rev= pages if (self.browser_regx.match(req.path_info) and 'BROWSER_VIEW' in req.perm and 'FILE_VIEW' in req.perm): # Get default repository and its type rm = RepositoryManager(self.env) repo = rm.get_repository('') repo_type = rm.repository_type # Construct the export urls for each format and based on revision info latest_rev = plaintext(str(req.args.get('rev', repo.get_youngest_rev()))) # Use Trac's internal implementation if repo_type == 'svn': return handler # For other types, iterate supported formats for format, info in self.formats.items(): add_link(req, 'alternate', req.href('export/archive', rev=latest_rev, format=format), _(info['desc']), info['mime'], info['ext']) return handler
def post_process_request(self, req, template, data, content_type): if self.isfilter: path = req.base_path + self.path add_link(req, 'shortcut icon', path, None, self.mimetype) add_link(req, 'icon', path, None, self.mimetype) return (template, data, content_type)
def test_add_link_advanced(self): req = Mock(hdf=HDFWrapper(), href=Href('/trac.cgi')) add_link(req, 'start', '/trac/wiki', 'Start page', 'text/html', 'home') self.assertEqual('/trac/wiki', req.hdf['chrome.links.start.0.href']) self.assertEqual('Start page', req.hdf['chrome.links.start.0.title']) self.assertEqual('text/html', req.hdf['chrome.links.start.0.type']) self.assertEqual('home', req.hdf['chrome.links.start.0.class'])
def _alt_css(req, filename, title): href = req.href if not filename.startswith("/"): href = href.chrome mt = "text/css" rel = "alternate stylesheet" add_link(req, rel, href(filename), title=title, mimetype=mt)
def page_paginator(self,req,iids,page): results = Paginator(iids, int(page) - 1, self.items_per_page) apath = args_path(req.args) if req: if results.has_next_page: next_href = req.href(req.path_info, max=self.items_per_page, page=page + 1)+apath add_link(req, 'next', next_href, 'Next Page') if results.has_previous_page: prev_href = req.href(req.path_info, max=self.items_per_page, page=page - 1)+apath add_link(req, 'prev', prev_href, 'Previous Page') else: results.show_index = False pagedata = [] shown_pages = results.get_shown_pages(21) for p in shown_pages: pagedata.append([req.href(req.path_info, page=p)+apath, None, str(p), 'Page ' + str(p) + 'd']) results.shown_pages = [dict(zip(['href', 'class', 'string', 'title'], p)) for p in pagedata] results.current_page = {'href': None, 'class': 'current', 'string': str(results.page + 1), 'title':None} return results
def _render_ui(self, req): #add_stylesheet(req, 'itteco/css/colorbox/colorbox.css') add_stylesheet(req, 'itteco/css/common.css') add_stylesheet(req, 'itteco/css/calendar.css') add_jscript( req, [ 'stuff/ui/ui.core.js', 'stuff/ui/ui.draggable.js', 'stuff/ui/ui.droppable.js', 'stuff/ui/ui.resizable.js', 'stuff/ui/ui.datepicker.js', 'stuff/ui/ui.slider.js', 'stuff/ui/plugins/fullcalendar.js', 'stuff/ui/plugins/jquery.colorbox.js', 'stuff/ui/plugins/timepicker.js', 'stuff/plugins/jquery.rpc.js', 'calendar.js' ], IttecoEvnSetup(self.env).debug ) icshref = req.href.calendar(format='ics') add_link(req, 'alternate', icshref, _('iCalendar'), 'text/calendar', 'ics') return 'itteco_calendar_view.html', {}, None
def process_request(self, req): req.perm.assert_permission('TICKET_VIEW') action = req.args.get('action', 'view') if not req.args.has_key('id'): req.redirect(self.env.href.wiki()) db = self.env.get_db_cnx() id = int(req.args.get('id')) ticket = Ticket(self.env, id, db=db) reporter_id = util.get_reporter_id(req) if req.method == 'POST': if not req.args.has_key('preview'): self._do_save(req, db, ticket) else: # Use user supplied values ticket.populate(req.args) req.hdf['ticket.action'] = action req.hdf['ticket.ts'] = req.args.get('ts') req.hdf['ticket.reassign_owner'] = req.args.get('reassign_owner') \ or req.authname req.hdf['ticket.resolve_resolution'] = req.args.get('resolve_resolution') reporter_id = req.args.get('author') comment = req.args.get('comment') if comment: req.hdf['ticket.comment'] = comment # Wiki format a preview of comment req.hdf['ticket.comment_preview'] = wiki_to_html(comment, self.env, req, db) else: req.hdf['ticket.reassign_owner'] = req.authname # Store a timestamp in order to detect "mid air collisions" req.hdf['ticket.ts'] = ticket.time_changed self._insert_ticket_data(req, db, ticket, reporter_id) # If the ticket is being shown in the context of a query, add # links to help navigate in the query result set if 'query_tickets' in req.session: tickets = req.session['query_tickets'].split() if str(id) in tickets: idx = tickets.index(str(ticket.id)) if idx > 0: add_link(req, 'first', self.env.href.ticket(tickets[0]), 'Ticket #%s' % tickets[0]) add_link(req, 'prev', self.env.href.ticket(tickets[idx - 1]), 'Ticket #%s' % tickets[idx - 1]) if idx < len(tickets) - 1: add_link(req, 'next', self.env.href.ticket(tickets[idx + 1]), 'Ticket #%s' % tickets[idx + 1]) add_link(req, 'last', self.env.href.ticket(tickets[-1]), 'Ticket #%s' % tickets[-1]) add_link(req, 'up', req.session['query_href']) add_stylesheet(req, 'common/css/ticket.css') return 'ticket.cs', None
def _render_monitoring_panel(self, req, cat, page): req.perm.assert_permission('SPAM_MONITOR') try: pagenum = int(req.args.get('page', 1)) - 1 except ValueError: pagenum = 1 total = LogEntry.count(self.env) offset = pagenum * self.MAX_PER_PAGE entries = list(LogEntry.select(self.env, limit=self.MAX_PER_PAGE, offset=offset)) if pagenum > 0: add_link(req, 'prev', req.href.admin(cat, page, page=pagenum), 'Previous Page') if offset + self.MAX_PER_PAGE < total: add_link(req, 'next', req.href.admin(cat, page, page=pagenum+2), 'Next Page') return { 'enabled': FilterSystem(self.env).logging_enabled, 'entries': entries, 'offset': offset + 1, 'page': pagenum + 1, 'total': total }
def process_request(self, req): milestone_id = req.args.get('id') req.perm('milestone', milestone_id).require('MILESTONE_VIEW') add_link(req, 'up', req.href.roadmap(), _('Roadmap')) action = req.args.get('action', 'view') try: milestone = Milestone(self.env, milestone_id) except ResourceNotFound: if 'MILESTONE_CREATE' not in req.perm('milestone', milestone_id): raise milestone = Milestone(self.env, None) milestone.name = milestone_id action = 'edit' # rather than 'new' so that it works for POST/save if req.method == 'POST': if req.args.has_key('cancel'): if milestone.exists: req.redirect(req.href.milestone(milestone.name)) else: req.redirect(req.href.roadmap()) elif action == 'edit': return self._do_save(req, milestone) elif action == 'delete': self._do_delete(req, milestone) elif action in ('new', 'edit'): return self._render_editor(req, milestone) elif action == 'delete': return self._render_confirm(req, milestone) if not milestone.name: req.redirect(req.href.roadmap()) return self._render_view(req, milestone)
def process_request(self, req): # did the user ask for any special report? id = int(req.args.get('id', -1)) if id != -1: req.perm('report', id).require('REPORT_VIEW') else: req.perm.require('REPORT_VIEW') data = {} action = req.args.get('action', 'view') if req.method == 'POST': if action == 'new': self._do_create(req) elif action == 'delete': self._do_delete(req, id) elif action == 'edit': self._do_save(req, id) elif action in ('copy', 'edit', 'new'): template = 'report_edit.html' data = self._render_editor(req, id, action == 'copy') Chrome(self.env).add_wiki_toolbars(req) elif action == 'delete': template = 'report_delete.html' data = self._render_confirm_delete(req, id) elif id == -1: template, data, content_type = self._render_list(req) if content_type: # i.e. alternate format return template, data, content_type if action == 'clear': if 'query_href' in req.session: del req.session['query_href'] if 'query_tickets' in req.session: del req.session['query_tickets'] else: template, data, content_type = self._render_view(req, id) if content_type: # i.e. alternate format return template, data, content_type from trac.ticket.query import QueryModule show_query_link = 'TICKET_VIEW' in req.perm and \ self.env.is_component_enabled(QueryModule) if id != -1 or action == 'new': add_ctxtnav(req, _('Available Reports'), href=req.href.report()) add_link(req, 'up', req.href.report(), _('Available Reports')) elif show_query_link: add_ctxtnav(req, _('Available Reports')) # Kludge: only show link to custom query if the query module # is actually enabled if show_query_link: add_ctxtnav(req, _('Custom Query'), href=req.href.query()) data['query_href'] = req.href.query() data['saved_query_href'] = req.session.get('query_href') else: data['query_href'] = None add_stylesheet(req, 'common/css/report.css') return template, data, None
def process_translations_request(self, req): match = re.match(r'^/translations' r'(?:/([0-9]+)?)?' # catalog id r'(?:/([A-Za-z\-_]+)?)?' # locale name r'(?:/([0-9]+)?)?', # page req.path_info) if not match: raise ResourceNotFound("Bad URL") catalog_id, locale_name, page = match.groups() Session = session(self.env) if not catalog_id: # List available catalogs data = {'projects': Session.query(Project).all()} return 'l10n_catalogs_list.html', data, None if not locale_name: # List available locales catalog = Session.query(Catalog).get(int(catalog_id)) if not catalog: req.redirect(req.href.translations()) data = {'catalog': catalog} return 'l10n_locales_list.html', data, None # List messages of specified locale catalog_id, page = int(catalog_id), int(page or 1) locale = Session.query(Locale).filter_by(locale=locale_name, catalog_id=catalog_id).first() if not locale: req.redirect(req.href.translations(catalog_id)) data = {'locale': locale, 'catalog_id': catalog_id} paginator = Paginator(list(locale.catalog.messages), page-1, 5) data['messages'] = paginator shown_pages = paginator.get_shown_pages(25) pagedata = [] for show_page in shown_pages: page_href = req.href.translations(catalog_id, locale_name, show_page) pagedata.append([page_href, None, str(show_page), 'page %s' % show_page]) fields = ['href', 'class', 'string', 'title'] paginator.shown_pages = [dict(zip(fields, p)) for p in pagedata] paginator.current_page = {'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title':None} if paginator.has_next_page: add_link(req, 'next', req.href.translations(catalog_id, locale_name, page+1), _('Next Page')) if paginator.has_previous_page: add_link(req, 'prev', req.href.translations(catalog_id, locale_name, page-1), _('Previous Page')) return 'l10n_messages.html', data, None
def test_add_link_advanced(self): req = Request(href=Href('/trac.cgi')) add_link(req, 'start', '/trac/wiki', 'Start page', 'text/html', 'home') link = req.chrome['links']['start'][0] self.assertEqual('/trac/wiki', link['href']) self.assertEqual('Start page', link['title']) self.assertEqual('text/html', link['type']) self.assertEqual('home', link['class'])
def process_request(self, req): req.perm.assert_permission('TICKET_VIEW') constraints = self._get_constraints(req) if not constraints and not req.args.has_key('order'): # avoid displaying all tickets when the query module is invoked # with no parameters. Instead show only open tickets, possibly # associated with the user constraints = {'status': ('new', 'assigned', 'reopened')} if req.authname and req.authname != 'anonymous': constraints['owner'] = (req.authname,) else: email = req.session.get('email') name = req.session.get('name') if email or name: constraints['cc'] = ('~%s' % email or name,) query = Query(self.env, constraints, req.args.get('order'), req.args.has_key('desc'), req.args.get('group'), req.args.has_key('groupdesc'), req.args.has_key('verbose')) if req.args.has_key('update'): # Reset session vars for var in ('query_constraints', 'query_time', 'query_tickets'): if req.session.has_key(var): del req.session[var] req.redirect(query.get_href(req)) # Add registered converters for conversion in Mimeview(self.env).get_supported_conversions( 'trac.ticket.Query'): add_link(req, 'alternate', query.get_href(req, format=conversion[0]), conversion[1], conversion[3]) constraints = {} for k, v in query.constraints.items(): constraint = {'values': [], 'mode': ''} for val in v: neg = val.startswith('!') if neg: val = val[1:] mode = '' if val[:1] in ('~', '^', '$'): mode, val = val[:1], val[1:] constraint['mode'] = (neg and '!' or '') + mode constraint['values'].append(val) constraints[k] = constraint req.hdf['query.constraints'] = constraints format = req.args.get('format') if format: Mimeview(self.env).send_converted(req, 'trac.ticket.Query', query, format, 'query') self.display_html(req, query) return 'query.cs', None
def process_request(self, req): req.hdf['trac.href.blog'] = req.href.blog() entries = [] for page_name in WikiSystem(self.env).get_pages(prefix='Blog'): page = WikiPage(self.env, page_name) title = page_name text = page.text match = title_split_match(page.text) if match: title = match.group(1) text = match.group(2) comments = text.count('[[SimpleBlogComment(') cutoff = text.find('[[SimpleBlogComment(') if cutoff >= 0: text = text[:cutoff].rstrip() description = wiki_to_html(text, self.env, req) original = self._get_original_post_info(page_name) event = { 'href': self.env.href.wiki(page_name), 'title': title, 'description': description, 'escaped': Markup.escape(unicode(description)), 'date': format_datetime(original['time']), 'rfcdate': http_date(original['time']), 'author': original['author'], 'comment': original['comment'], 'comments': comments, } if page.version > 1: event['updated.version'] = page.version event['updated.date'] = format_datetime(page.time) event['updated.rfcdate'] = http_date(page.time) event['updated.author'] = page.author event['updated.comment'] = page.comment entries.append((original['time'], event)) entries.sort() entries.reverse() max_count = 20 if len(entries) > max_count: entries = entries[:max_count] events = [] for date, event in entries: events.append(event) req.hdf['blog.events'] = events format = req.args.get('format') if format == 'rss': return 'blog_rss.cs', 'application/rss+xml' add_link(req, 'alternate', self.env.href.blog(format='rss'), 'RSS Feed', 'application/rss+xml', 'rss') return 'blog.cs', None
def _render_directory(self, req, repos, node, rev=None): req.perm.assert_permission('BROWSER_VIEW') # Entries metadata info = [] for entry in node.get_entries(): info.append({ 'name': entry.name, 'fullpath': entry.path, 'is_dir': entry.isdir, 'content_length': entry.content_length, 'size': pretty_size(entry.content_length), 'rev': entry.rev, 'log_href': req.href.log(entry.path, rev=rev), 'browser_href': req.href.browser(entry.path, rev=rev) }) changes = get_changes(self.env, repos, [i['rev'] for i in info]) # Ordering of entries order = req.args.get('order', 'name').lower() desc = req.args.has_key('desc') if order == 'date': def file_order(a): return changes[a['rev']]['date_seconds'] elif order == 'size': def file_order(a): return (a['content_length'], embedded_numbers(a['name'].lower())) else: def file_order(a): return embedded_numbers(a['name'].lower()) dir_order = desc and 1 or -1 def browse_order(a): return a['is_dir'] and dir_order or 0, file_order(a) info = sorted(info, key=browse_order, reverse=desc) switch_ordering_hrefs = {} for col in ('name', 'size', 'date'): switch_ordering_hrefs[col] = req.href.browser( node.path, rev=rev, order=col, desc=(col == order and not desc and 1 or None)) # ''Zip Archive'' alternate link patterns = self.downloadable_paths if node.path and patterns and \ filter(None, [fnmatchcase(node.path, p) for p in patterns]): zip_href = req.href.changeset(rev or repos.youngest_rev, node.path, old=rev, old_path='/', format='zip') add_link(req, 'alternate', zip_href, 'Zip Archive', 'application/zip', 'zip') req.hdf['browser'] = {'order': order, 'desc': desc and 1 or 0, 'items': info, 'changes': changes, 'order_href': switch_ordering_hrefs}
def process_request(self, req): req.perm.require('BUILD_VIEW') db = self.env.get_db_cnx() build_id = int(req.args.get('id')) build = Build.fetch(self.env, build_id, db=db) assert build, 'Build %s does not exist' % build_id if req.method == 'POST': if req.args.get('action') == 'invalidate': self._do_invalidate(req, build, db) req.redirect(req.href.build(build.config, build.id)) add_link(req, 'up', req.href.build(build.config), 'Build Configuration') data = {'title': 'Build %s - %s' % (build_id, _status_title[build.status]), 'page_mode': 'view_build', 'build': {}} config = BuildConfig.fetch(self.env, build.config, db=db) data['build']['config'] = { 'name': config.label, 'href': req.href.build(config.name) } formatters = [] for formatter in self.log_formatters: formatters.append(formatter.get_formatter(req, build)) summarizers = {} # keyed by report type for summarizer in self.report_summarizers: categories = summarizer.get_supported_categories() summarizers.update(dict([(cat, summarizer) for cat in categories])) data['build'].update(_get_build_data(self.env, req, build)) steps = [] for step in BuildStep.select(self.env, build=build.id, db=db): steps.append({ 'name': step.name, 'description': step.description, 'duration': pretty_timedelta(step.started, step.stopped), 'failed': step.status == BuildStep.FAILURE, 'errors': step.errors, 'log': self._render_log(req, build, formatters, step), 'reports': self._render_reports(req, config, build, summarizers, step) }) data['build']['steps'] = steps data['build']['can_delete'] = ('BUILD_DELETE' in req.perm) repos = self.env.get_repository(req.authname) repos.authz.assert_permission(config.path) chgset = repos.get_changeset(build.rev) data['build']['chgset_author'] = chgset.author add_script(req, 'bitten/tabset.js') add_stylesheet(req, 'bitten/bitten.css') return 'bitten_build.html', data, None
def add_backlog_conversion_links(env, req, backlog, backlog_url): # TODO: Move tests to new backlog mime = Mimeview(env) for conversion in mime.get_supported_conversions(BACKLOG_CONVERSION_KEY): format = conversion[0] title = conversion[1] mimetype = conversion[4] backlog_href = req.href(backlog_url, backlog.name, backlog.scope, format=format) add_link(req, 'alternate', backlog_href, title, mimetype, format)
def merge_links(srcreq, dstreq, exclude=None): """Incorporate links in `srcreq` into `dstreq`. """ if exclude is None: exclude = ['alternate'] if 'links' in srcreq.chrome: for rel, links in srcreq.chrome['links'].iteritems(): if rel not in exclude: for link in links: add_link(dstreq, rel, **link)
def process_request(self, req): if req.path_info.rstrip('/') == '/hours/user': return self.users(req) user = req.path_info.split('/hours/user/', 1)[-1] add_stylesheet(req, 'common/css/report.css') add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv') return self.user(req, user)
def process_request(self, req): req.perm.require("REPORT_VIEW") # did the user ask for any special report? id = int(req.args.get("id", -1)) action = req.args.get("action", "view") data = {} if req.method == "POST": if action == "new": self._do_create(req) elif action == "delete": self._do_delete(req, id) elif action == "edit": self._do_save(req, id) elif action in ("copy", "edit", "new"): template = "report_edit.html" data = self._render_editor(req, id, action == "copy") Chrome(self.env).add_wiki_toolbars(req) elif action == "delete": template = "report_delete.html" data = self._render_confirm_delete(req, id) elif id == -1: template, data, content_type = self._render_list(req) if content_type: # i.e. alternate format return template, data, content_type if action == "clear": if "query_href" in req.session: del req.session["query_href"] if "query_tickets" in req.session: del req.session["query_tickets"] else: template, data, content_type = self._render_view(req, id) if content_type: # i.e. alternate format return template, data, content_type if id != -1 or action == "new": add_ctxtnav(req, _("Available Reports"), href=req.href.report()) add_link(req, "up", req.href.report(), _("Available Reports")) else: add_ctxtnav(req, _("Available Reports")) # Kludge: only show link to custom query if the query module # is actually enabled from trac.ticket.query import QueryModule if "TICKET_VIEW" in req.perm and self.env.is_component_enabled(QueryModule): add_ctxtnav(req, _("Custom Query"), href=req.href.query()) data["query_href"] = req.href.query() data["saved_query_href"] = req.session.get("query_href") else: data["query_href"] = None add_stylesheet(req, "common/css/report.css") return template, data, None
def pre_process_request(self, req, handler): rmodule = ReportModule(self.env) #report's match request. if it's gonna be true then we'll stick in our translator, #but only if there's a report id (i.e. it's actually a report page) if rmodule.match_request(req) and req.args.get('id', -1) != -1 and req.args.get('action', 'view') == 'view': href = '' params = rmodule.get_var_args(req) if params: href = '&' + unicode_urlencode(params) add_link(req, 'alternate', '?format=rss&changes=true' + href, _('Changes RSS Feed'), 'application/xhtml+xml', 'rss') return handler
class TimelineModule(Component): implements(INavigationContributor, IPermissionRequestor, IRequestHandler, IRequestFilter, ITemplateProvider, IWikiSyntaxProvider) event_providers = ExtensionPoint(ITimelineEventProvider) default_daysback = IntOption( 'timeline', 'default_daysback', 30, """Default number of days displayed in the Timeline, in days. (''since 0.9.'')""") max_daysback = IntOption( 'timeline', 'max_daysback', 90, """Maximum number of days (-1 for unlimited) displayable in the Timeline. (''since 0.11'')""") abbreviated_messages = BoolOption( 'timeline', 'abbreviated_messages', True, """Whether wiki-formatted event messages should be truncated or not. This only affects the default rendering, and can be overriden by specific event providers, see their own documentation. (''Since 0.11'')""") _authors_pattern = re.compile(r'(-)?(?:"([^"]*)"|\'([^\']*)\'|([^\s]+))') # INavigationContributor methods def get_active_navigation_item(self, req): return 'timeline' def get_navigation_items(self, req): if 'TIMELINE_VIEW' in req.perm: yield ('mainnav', 'timeline', tag.a(_("Timeline"), href=req.href.timeline(), accesskey=2)) # IPermissionRequestor methods def get_permission_actions(self): return ['TIMELINE_VIEW'] # IRequestHandler methods def match_request(self, req): return req.path_info == '/timeline' def process_request(self, req): req.perm.assert_permission('TIMELINE_VIEW') format = req.args.get('format') maxrows = int(req.args.get('max', 50 if format == 'rss' else 0)) lastvisit = int(req.session.get('timeline.lastvisit', '0')) # indication of new events is unchanged when form is updated by user revisit = any(a in req.args for a in ['update', 'from', 'daysback', 'author']) if revisit: lastvisit = int( req.session.get('timeline.nextlastvisit', lastvisit)) # Parse the from date and adjust the timestamp to the last second of # the day fromdate = today = datetime.now(req.tz) yesterday = to_datetime( today.replace(tzinfo=None) - timedelta(days=1), req.tz) precisedate = precision = None if 'from' in req.args: # Acquire from date only from non-blank input reqfromdate = req.args['from'].strip() if reqfromdate: precisedate = user_time(req, parse_date, reqfromdate) fromdate = precisedate.astimezone(req.tz) precision = req.args.get('precision', '') if precision.startswith('second'): precision = timedelta(seconds=1) elif precision.startswith('minute'): precision = timedelta(minutes=1) elif precision.startswith('hour'): precision = timedelta(hours=1) else: precision = None fromdate = to_datetime( datetime(fromdate.year, fromdate.month, fromdate.day, 23, 59, 59, 999999), req.tz) daysback = as_int(req.args.get('daysback'), 90 if format == 'rss' else None) if daysback is None: daysback = as_int(req.session.get('timeline.daysback'), None) if daysback is None: daysback = self.default_daysback daysback = max(0, daysback) if self.max_daysback >= 0: daysback = min(self.max_daysback, daysback) authors = req.args.get('authors') if authors is None and format != 'rss': authors = req.session.get('timeline.authors') authors = (authors or '').strip() data = { 'fromdate': fromdate, 'daysback': daysback, 'authors': authors, 'today': user_time(req, format_date, today), 'yesterday': user_time(req, format_date, yesterday), 'precisedate': precisedate, 'precision': precision, 'events': [], 'filters': [], 'abbreviated_messages': self.abbreviated_messages, 'lastvisit': lastvisit } available_filters = [] for event_provider in self.event_providers: available_filters += event_provider.get_timeline_filters(req) or [] # check the request or session for enabled filters, or use default filters = [f[0] for f in available_filters if f[0] in req.args] if not filters and format != 'rss': filters = [ f[0] for f in available_filters if req.session.get('timeline.filter.' + f[0]) == '1' ] if not filters: filters = [f[0] for f in available_filters if len(f) == 2 or f[2]] # save the results of submitting the timeline form to the session if 'update' in req.args: for filter in available_filters: key = 'timeline.filter.%s' % filter[0] if filter[0] in req.args: req.session[key] = '1' elif key in req.session: del req.session[key] stop = fromdate start = to_datetime(stop.replace(tzinfo=None) - \ timedelta(days=daysback + 1), req.tz) # create author include and exclude sets include = set() exclude = set() for match in self._authors_pattern.finditer(authors): name = (match.group(2) or match.group(3) or match.group(4)).lower() if match.group(1): exclude.add(name) else: include.add(name) # gather all events for the given period of time events = [] for provider in self.event_providers: try: for event in provider.get_timeline_events( req, start, stop, filters) or []: # Check for 0.10 events author = (event[2 if len(event) < 6 else 4] or '').lower() if (not include or author in include) \ and not author in exclude: events.append(self._event_data(provider, event)) except Exception, e: # cope with a failure of that provider self._provider_failure(e, req, provider, filters, [f[0] for f in available_filters]) # prepare sorted global list events = sorted(events, key=lambda e: e['date'], reverse=True) if maxrows: events = events[:maxrows] data['events'] = events if format == 'rss': data['email_map'] = Chrome(self.env).get_email_map() rss_context = web_context(req, absurls=True) rss_context.set_hints(wiki_flavor='html', shorten_lines=False) data['context'] = rss_context return 'timeline.rss', data, 'application/rss+xml' else: req.session.set('timeline.daysback', daysback, self.default_daysback) req.session.set('timeline.authors', authors, '') # store lastvisit if events and not revisit: lastviewed = to_utimestamp(events[0]['date']) req.session['timeline.lastvisit'] = max(lastvisit, lastviewed) req.session['timeline.nextlastvisit'] = lastvisit html_context = web_context(req) html_context.set_hints(wiki_flavor='oneliner', shorten_lines=self.abbreviated_messages) data['context'] = html_context add_stylesheet(req, 'common/css/timeline.css') rss_href = req.href.timeline([(f, 'on') for f in filters], daysback=90, max=50, authors=authors, format='rss') add_link(req, 'alternate', auth_link(req, rss_href), _('RSS Feed'), 'application/rss+xml', 'rss') Chrome(self.env).add_jquery_ui(req) for filter_ in available_filters: data['filters'].append({ 'name': filter_[0], 'label': filter_[1], 'enabled': filter_[0] in filters }) # Navigation to the previous/next period of 'daysback' days previous_start = fromdate.replace(tzinfo=None) - \ timedelta(days=daysback + 1) previous_start = format_date(to_datetime(previous_start, req.tz), format='%Y-%m-%d', tzinfo=req.tz) add_link( req, 'prev', req.href.timeline(from_=previous_start, authors=authors, daysback=daysback), _('Previous Period')) if today - fromdate > timedelta(days=0): next_start = fromdate.replace(tzinfo=None) + \ timedelta(days=daysback + 1) next_start = format_date(to_datetime(next_start, req.tz), format='%Y-%m-%d', tzinfo=req.tz) add_link( req, 'next', req.href.timeline(from_=next_start, authors=authors, daysback=daysback), _('Next Period')) prevnext_nav(req, _('Previous Period'), _('Next Period')) return 'timeline.html', data, None
def _render_overview(self, req): data = {'title': 'Build Status'} show_all = False if req.args.get('show') == 'all': show_all = True data['show_all'] = show_all configs = [] for config in BuildConfig.select(self.env, include_inactive=show_all): repos_name, repos, repos_path = get_repos(self.env, config.path, req.authname) rev = config.max_rev or repos.youngest_rev try: if not _has_permission(req.perm, repos, repos_path, rev=rev): continue except NoSuchNode: add_warning(req, "Configuration '%s' points to non-existing " "path '/%s' at revision '%s'. Configuration skipped." \ % (config.name, config.path, rev)) continue description = config.description if description: description = wiki_to_html(description, self.env, req) platforms_data = [] for platform in TargetPlatform.select(self.env, config=config.name): pd = { 'name': platform.name, 'id': platform.id, 'builds_pending': len(list(Build.select(self.env, config=config.name, status=Build.PENDING, platform=platform.id))), 'builds_inprogress': len(list(Build.select(self.env, config=config.name, status=Build.IN_PROGRESS, platform=platform.id))) } platforms_data.append(pd) config_data = { 'name': config.name, 'label': config.label or config.name, 'active': config.active, 'path': config.path, 'description': description, 'builds_pending' : len(list(Build.select(self.env, config=config.name, status=Build.PENDING))), 'builds_inprogress' : len(list(Build.select(self.env, config=config.name, status=Build.IN_PROGRESS))), 'href': req.href.build(config.name), 'builds': [], 'platforms': platforms_data } configs.append(config_data) if not config.active: continue prev_rev = None for platform, rev, build in collect_changes(config, req.authname): if rev != prev_rev: if prev_rev is None: chgset = repos.get_changeset(rev) chgset_resource = get_chgset_resource(self.env, repos_name, rev) config_data['youngest_rev'] = { 'id': rev, 'href': get_resource_url(self.env, chgset_resource, req.href), 'display_rev': display_rev(repos, rev), 'author': chgset.author or 'anonymous', 'date': format_datetime(chgset.date), 'message': wiki_to_oneliner( shorten_line(chgset.message), self.env, req=req) } else: break prev_rev = rev if build: build_data = _get_build_data(self.env, req, build, repos_name) build_data['platform'] = platform.name config_data['builds'].append(build_data) else: config_data['builds'].append({ 'platform': platform.name, 'status': 'pending' }) data['configs'] = sorted(configs, key=lambda x:x['label'].lower()) data['page_mode'] = 'overview' in_progress_builds = Build.select(self.env, status=Build.IN_PROGRESS) pending_builds = Build.select(self.env, status=Build.PENDING) data['builds_pending'] = len(list(pending_builds)) data['builds_inprogress'] = len(list(in_progress_builds)) add_link(req, 'views', req.href.build(view='inprogress'), 'In Progress Builds') add_ctxtnav(req, 'In Progress Builds', req.href.build(view='inprogress')) return data
def process_request(self, req): """The appropriate mode of operation is inferred from the request parameters: * If `new_path` and `old_path` are equal (or `old_path` is omitted) and `new` and `old` are equal (or `old` is omitted), then we're about to view a revision Changeset: `chgset` is True. Furthermore, if the path is not the root, the changeset is ''restricted'' to that path (only the changes affecting that path, its children or its ancestor directories will be shown). * In any other case, the set of changes corresponds to arbitrary differences between path@rev pairs. If `new_path` and `old_path` are equal, the ''restricted'' flag will also be set, meaning in this case that the differences between two revisions are restricted to those occurring on that path. In any case, either path@rev pairs must exist. """ req.perm.assert_permission('CHANGESET_VIEW') # -- retrieve arguments new_path = req.args.get('new_path') new = req.args.get('new') old_path = req.args.get('old_path') old = req.args.get('old') if old and '@' in old: old_path, old = unescape(old).split('@') if new and '@' in new: new_path, new = unescape(new).split('@') # -- normalize and check for special case repos = self.env.get_repository(req.authname) new_path = repos.normalize_path(new_path) new = repos.normalize_rev(new) repos.authz.assert_permission_for_changeset(new) old_path = repos.normalize_path(old_path or new_path) old = repos.normalize_rev(old or new) if old_path == new_path and old == new: # revert to Changeset old_path = old = None diff_options = get_diff_options(req) # -- setup the `chgset` and `restricted` flags, see docstring above. chgset = not old and not old_path if chgset: restricted = new_path not in ('', '/') # (subset or not) else: restricted = old_path == new_path # (same path or not) # -- redirect if changing the diff options if req.args.has_key('update'): if chgset: if restricted: req.redirect(req.href.changeset(new, new_path)) else: req.redirect(req.href.changeset(new)) else: req.redirect(req.href.changeset(new, new_path, old=old, old_path=old_path)) # -- preparing the diff arguments if chgset: prev = repos.get_node(new_path, new).get_previous() if prev: prev_path, prev_rev = prev[:2] else: prev_path, prev_rev = new_path, repos.previous_rev(new) diff_args = DiffArgs(old_path=prev_path, old_rev=prev_rev, new_path=new_path, new_rev=new) else: if not new: new = repos.youngest_rev elif not old: old = repos.youngest_rev if not old_path: old_path = new_path diff_args = DiffArgs(old_path=old_path, old_rev=old, new_path=new_path, new_rev=new) if chgset: chgset = repos.get_changeset(new) message = chgset.message or '--' if self.wiki_format_messages: message = wiki_to_html(message, self.env, req, escape_newlines=True) else: message = html.PRE(message) req.check_modified(chgset.date, [ diff_options[0], ''.join(diff_options[1]), repos.name, repos.rev_older_than(new, repos.youngest_rev), message, pretty_timedelta(chgset.date, None, 3600)]) else: message = None # FIXME: what date should we choose for a diff? req.hdf['changeset'] = diff_args format = req.args.get('format') if format in ['diff', 'zip']: req.perm.assert_permission('FILE_VIEW') # choosing an appropriate filename rpath = new_path.replace('/','_') if chgset: if restricted: filename = 'changeset_%s_r%s' % (rpath, new) else: filename = 'changeset_r%s' % new else: if restricted: filename = 'diff-%s-from-r%s-to-r%s' \ % (rpath, old, new) elif old_path == '/': # special case for download (#238) filename = '%s-r%s' % (rpath, old) else: filename = 'diff-from-%s-r%s-to-%s-r%s' \ % (old_path.replace('/','_'), old, rpath, new) if format == 'diff': self._render_diff(req, filename, repos, diff_args, diff_options) return elif format == 'zip': self._render_zip(req, filename, repos, diff_args) return # -- HTML format self._render_html(req, repos, chgset, restricted, message, diff_args, diff_options) if chgset: diff_params = 'new=%s' % new else: diff_params = unicode_urlencode({'new_path': new_path, 'new': new, 'old_path': old_path, 'old': old}) add_link(req, 'alternate', '?format=diff&'+diff_params, u'Diff unifié', 'text/plain', 'diff') add_link(req, 'alternate', '?format=zip&'+diff_params, u'Archive Zip', 'application/zip', 'zip') add_stylesheet(req, 'common/css/changeset.css') add_stylesheet(req, 'common/css/diff.css') add_stylesheet(req, 'common/css/code.css') return 'changeset.cs', None
offset) results = [list(row) for row in results] numrows = len(results) except Exception, e: data['message'] = tag_( 'Report execution failed: %(error)s', error=tag.pre(exception_to_unicode(e, traceback=True))) return 'report_view.html', data, None paginator = None if limit > 0: paginator = Paginator(results, page - 1, limit, num_items) data['paginator'] = paginator if paginator.has_next_page: add_link(req, 'next', report_href(page=page + 1), _('Next Page')) if paginator.has_previous_page: add_link(req, 'prev', report_href(page=page - 1), _('Previous Page')) pagedata = [] shown_pages = paginator.get_shown_pages(21) for p in shown_pages: pagedata.append([ report_href(page=p), None, str(p), _('Page %(num)d', num=p) ]) fields = ['href', 'class', 'string', 'title'] paginator.shown_pages = [dict(zip(fields, p)) for p in pagedata] paginator.current_page = {
def test_add_link_simple(self): req = Request() add_link(req, 'start', '/trac.cgi/wiki') self.assertEqual('/trac.cgi/wiki', req.chrome['links']['start'][0]['href'])
def display_html(self, req, query): """returns the HTML according to a query for /hours view""" db = self.env.get_db_cnx() # The most recent query is stored in the user session; orig_list = None orig_time = datetime.now(utc) query_time = int(req.session.get('query_time', 0)) query_time = datetime.fromtimestamp(query_time, utc) query_constraints = unicode(query.constraints) if query_constraints != req.session.get('query_constraints') \ or query_time < orig_time - timedelta(hours=1): tickets = query.execute(req, db) # New or outdated query, (re-)initialize session vars req.session['query_constraints'] = query_constraints req.session['query_tickets'] = ' '.join([str(t['id']) for t in tickets]) else: orig_list = [int(id) for id in req.session.get('query_tickets', '').split()] tickets = query.execute(req, db, orig_list) orig_time = query_time context = Context.from_request(req, 'query') ticket_data = query.template_data(context, tickets, orig_list, orig_time, req) # For clients without JavaScript, we add a new constraint here if # requested constraints = ticket_data['constraints'] if 'add' in req.args: field = req.args.get('add_filter') if field: constraint = constraints.setdefault(field, {}) constraint.setdefault('values', []).append('') # FIXME: '' not always correct (e.g. checkboxes) req.session['query_href'] = query.get_href(context.href) req.session['query_time'] = to_timestamp(orig_time) req.session['query_tickets'] = ' '.join([str(t['id']) for t in tickets]) # data dictionary for genshi data = {} # get data for saved queries query_id = req.args.get('query_id') if query_id: try: query_id = int(query_id) except ValueError: add_warning(req, "query_id should be an integer, you put '%s'" % query_id) query_id = None if query_id: data['query_id'] = query_id query_data = self.get_query(query_id) data['query_title'] = query_data['title'] data['query_description'] = query_data['description'] data.setdefault('report', None) data.setdefault('description', None) data['all_columns'] = query.get_all_columns() + self.get_columns() # Don't allow the user to remove the id column data['all_columns'].remove('id') data['all_textareas'] = query.get_all_textareas() # need to re-get the cols because query will remove our fields cols = req.args.get('col') if isinstance(cols, basestring): cols = [cols] if not cols: cols = query.get_columns() + self.get_default_columns() data['col'] = cols now = datetime.now() # get the date range for the query if 'from_year' in req.args: from_date = get_date(req.args['from_year'], req.args.get('from_month'), req.args.get('from_day')) else: from_date = datetime(now.year, now.month, now.day) from_date = from_date - timedelta(days=7) # 1 week ago, by default if 'to_year' in req.args: to_date = get_date(req.args['to_year'], req.args.get('to_month'), req.args.get('to_day'), end_of_day=True) else: to_date = now data['prev_week'] = from_date - timedelta(days=7) data['months'] = [ (i, calendar.month_name[i]) for i in range(1,13) ] data['years'] = range(now.year, now.year - 10, -1) data['days'] = range(1, 32) data['users'] = get_all_users(self.env) data['cur_worker_filter'] = req.args.get('worker_filter', '*any') data['from_date'] = from_date data['to_date'] = to_date ticket_ids = [t['id'] for t in tickets] # generate data for ticket_times time_records = self.get_ticket_hours(ticket_ids, from_date=from_date, to_date=to_date, worker_filter=data['cur_worker_filter']) data['query'] = ticket_data['query'] data['context'] = ticket_data['context'] data['row'] = ticket_data['row'] if 'comments' in req.args.get('row', []): data['row'].append('comments') data['constraints'] = ticket_data['constraints'] our_labels = dict([(f['name'], f['label']) for f in self.fields]) labels = ticket_data['labels'] labels.update(our_labels) data['labels'] = labels order = req.args.get('order') desc = bool(req.args.get('desc')) data['order'] = order data['desc'] = desc headers = [{'name': col, 'label' : labels.get(col), 'href': self.get_href(query, req.args, context.href, order=col, desc=(col == order and not desc) ) } for col in cols] data['headers'] = headers data['fields'] = ticket_data['fields'] data['modes'] = ticket_data['modes'] # group time records time_records_by_ticket = {} for record in time_records: id = record['ticket'] if id not in time_records_by_ticket: time_records_by_ticket[id] = [] time_records_by_ticket[id].append(record) data['extra_group_fields'] = dict(ticket = dict(name='ticket', type='select', label='Ticket'), worker = dict(name='worker', type='select', label='Worker')) num_items = 0 data['groups'] = [] # merge ticket data into ticket_time records for key, tickets in ticket_data['groups']: ticket_times = [] total_time = 0 total_estimated_time = 0 for ticket in tickets: records = time_records_by_ticket.get(ticket['id'], []) [rec.update(ticket) for rec in records] ticket_times += records # sort ticket_times, if needed if order in our_labels: ticket_times.sort(key=lambda x: x[order], reverse=desc) data['groups'].append((key, ticket_times)) num_items += len(ticket_times) data['double_count_warning'] = '' # group by ticket id or other time_ticket fields if necessary if req.args.get('group') in data['extra_group_fields']: query.group = req.args.get('group') if not query.group == "id": data['double_count_warning'] = "Warning: estimated hours may be counted more than once if a ticket appears in multiple groups" tickets = data['groups'][0][1] groups = {} for time_rec in tickets: key = time_rec[query.group] if not key in groups: groups[key] = [] groups[key].append(time_rec) data['groups'] = sorted(groups.items()) total_times = dict((k, self.format_hours(sum(rec['seconds_worked'] for rec in v))) for k, v in data['groups']) total_estimated_times = {} for key, records in data['groups']: seen_tickets = set() est = 0 for record in records: # do not double-count tickets id = record['ticket'] if id in seen_tickets: continue seen_tickets.add(id) estimatedhours = record.get('estimatedhours') or 0 try: estimatedhours = float(estimatedhours) except ValueError: estimatedhours = 0 est += estimatedhours * 3600 total_estimated_times[key] = self.format_hours(est) data['total_times'] = total_times data['total_estimated_times'] = total_estimated_times # format records for record in time_records: if 'seconds_worked' in record: record['seconds_worked'] = self.format_hours(record['seconds_worked']) # XXX misleading name if 'time_started' in record: record['time_started'] = self.format_date(record['time_started']) if 'time_submitted' in record: record['time_submitted'] = self.format_date(record['time_submitted']) data['query'].num_items = num_items data['labels'] = ticket_data['labels'] data['labels'].update(labels) data['can_add_hours'] = req.perm.has_permission('TICKET_ADD_HOURS') data['multiproject'] = self.env.is_component_enabled(MultiprojectHours) from web_ui import TracUserHours data['user_hours'] = self.env.is_component_enabled(TracUserHours) # return the rss, if requested if req.args.get('format') == 'rss': return self.queryhours2rss(req, data) # return the csv, if requested if req.args.get('format') == 'csv': self.queryhours2csv(req, data) # add rss link rss_href = req.href(req.path_info, format='rss') add_link(req, 'alternate', rss_href, _('RSS Feed'), 'application/rss+xml', 'rss') # add csv link add_link(req, 'alternate', req.href(req.path_info, format='csv', **req.args), 'CSV', 'text/csv', 'csv') # add navigation of weeks prev_args = dict(req.args) next_args = dict(req.args) prev_args['from_year'] = (from_date - timedelta(days=7)).year prev_args['from_month'] = (from_date - timedelta(days=7)).month prev_args['from_day'] = (from_date - timedelta(days=7)).day prev_args['to_year'] = from_date.year prev_args['to_month'] = from_date.month prev_args['to_day'] = from_date.day next_args['from_year'] = to_date.year next_args['from_month'] = to_date.month next_args['from_day'] = to_date.day next_args['to_year'] = (to_date + timedelta(days=7)).year next_args['to_month'] = (to_date + timedelta(days=7)).month next_args['to_day'] = (to_date + timedelta(days=7)).day add_link(req, 'prev', self.get_href(query, prev_args, context.href), _('Prev Week')) add_link(req, 'next', self.get_href(query, next_args, context.href), _('Next Week')) prevnext_nav(req, _('Prev Week'), _('Next Week')) add_ctxtnav(req, 'Cross-Project Hours', req.href.hours('multiproject')) add_ctxtnav(req, 'Hours by User', req.href.hours('user', from_day=from_date.day, from_month=from_date.month, from_year=from_date.year, to_day=to_date.year, to_month=to_date.month, to_year=to_date.year)) add_ctxtnav(req, 'Saved Queries', req.href.hours('query/list')) add_stylesheet(req, 'common/css/report.css') add_script(req, 'common/js/query.js') return ('hours_timeline.html', data, 'text/html')
def _render_view(self, req, page): version = page.resource.version # Add registered converters if page.exists: for conversion in Mimeview(self.env) \ .get_supported_conversions('text/x-trac-wiki'): conversion_href = req.href.wiki(page.name, version=version, format=conversion.key) add_link(req, 'alternate', conversion_href, conversion.name, conversion.in_mimetype) data = self._page_data(req, page) if page.name == self.START_PAGE: data['title'] = '' ws = WikiSystem(self.env) context = web_context(req, page.resource) higher, related = [], [] if not page.exists: if 'WIKI_CREATE' not in req.perm(page.resource): raise ResourceNotFound( _("Page %(name)s not found", name=page.name)) formatter = OneLinerFormatter(self.env, context) if '/' in page.name: parts = page.name.split('/') for i in range(len(parts) - 2, -1, -1): name = '/'.join(parts[:i] + [parts[-1]]) if not ws.has_page(name): higher.append( ws._format_link(formatter, 'wiki', '/' + name, name, False)) else: name = page.name name = name.lower() related = [ each for each in ws.pages if name in each.lower() and 'WIKI_VIEW' in req.perm(self.realm, each) ] related.sort() related = [ ws._format_link(formatter, 'wiki', '/' + each, each, False) for each in related ] latest_page = WikiPage(self.env, page.name) prev_version = next_version = None if version: version = as_int(version, None) if version is not None: for hist in latest_page.get_history(): v = hist[0] if v != version: if v < version: if not prev_version: prev_version = v break else: next_version = v prefix = self.PAGE_TEMPLATES_PREFIX templates = [ template[len(prefix):] for template in ws.get_pages(prefix) if 'WIKI_VIEW' in req.perm(self.realm, template) ] # -- prev/up/next links if prev_version: add_link(req, 'prev', req.href.wiki(page.name, version=prev_version), _("Version %(num)s", num=prev_version)) parent = None if version: add_link(req, 'up', req.href.wiki(page.name, version=None), _("View latest version")) elif '/' in page.name: parent = page.name[:page.name.rindex('/')] add_link(req, 'up', req.href.wiki(parent, version=None), _("View parent page")) if next_version: add_link(req, 'next', req.href.wiki(page.name, version=next_version), _('Version %(num)s', num=next_version)) # Add ctxtnav entries if version: prevnext_nav(req, _("Previous Version"), _("Next Version"), _("View Latest Version")) else: if parent: add_ctxtnav(req, _('Up'), req.href.wiki(parent)) self._wiki_ctxtnav(req, page) # Plugin content validation fields = {'text': page.text} for manipulator in self.page_manipulators: manipulator.prepare_wiki_page(req, page, fields) text = fields.get('text', '') data.update({ 'context': context, 'text': text, 'latest_version': latest_page.version, 'attachments': AttachmentModule(self.env).attachment_data(context), 'start_page': self.START_PAGE, 'default_template': self.DEFAULT_PAGE_TEMPLATE, 'templates': templates, 'version': version, 'higher': higher, 'related': related, 'resourcepath_template': 'wiki_page_path.html', 'fullwidth': req.session.get('wiki_fullwidth'), }) add_script(req, 'common/js/wiki.js') return 'wiki_view.html', data
def _render_dir(self, req, repos, node, rev, order, desc): req.perm(node.resource).require('BROWSER_VIEW') download_href = self._get_download_href # Entries metadata class entry(object): _copy = 'name rev created_rev kind isdir path content_length' \ .split() __slots__ = _copy + ['raw_href'] def __init__(self, node): for f in entry._copy: setattr(self, f, getattr(node, f)) self.raw_href = download_href(req.href, repos, node, rev) entries = [ entry(n) for n in node.get_entries() if n.is_viewable(req.perm) ] changes = get_changes(repos, [i.created_rev for i in entries], self.log) if rev: newest = repos.get_changeset(rev).date else: newest = datetime.now(req.tz) # Color scale for the age column timerange = custom_colorizer = None if self.color_scale: timerange = TimeRange(newest) max_s = req.args.get('range_max_secs') min_s = req.args.get('range_min_secs') parent_range = [ timerange.from_seconds(long(s)) for s in [max_s, min_s] if s ] this_range = [c.date for c in changes.values() if c] for dt in this_range + parent_range: timerange.insert(dt) custom_colorizer = self.get_custom_colorizer() # Ordering of entries if order == 'date': def file_order(a): return (changes[a.created_rev].date, embedded_numbers(a.name.lower())) elif order == 'size': def file_order(a): return (a.content_length, embedded_numbers(a.name.lower())) elif order == 'author': def file_order(a): return (changes[a.created_rev].author.lower(), embedded_numbers(a.name.lower())) else: def file_order(a): return embedded_numbers(a.name.lower()) dir_order = 1 if desc else -1 def browse_order(a): return dir_order if a.isdir else 0, file_order(a) entries = sorted(entries, key=browse_order, reverse=desc) # ''Zip Archive'' alternate link zip_href = self._get_download_href(req.href, repos, node, rev) if zip_href: add_link(req, 'alternate', zip_href, _('Zip Archive'), 'application/zip', 'zip') return { 'entries': entries, 'changes': changes, 'timerange': timerange, 'colorize_age': custom_colorizer, 'range_max_secs': (timerange and timerange.to_seconds(timerange.newest)), 'range_min_secs': (timerange and timerange.to_seconds(timerange.oldest)), }
def _render_file(self, req, repos, node, rev=None): req.perm.assert_permission('FILE_VIEW') changeset = repos.get_changeset(node.rev) req.hdf['file'] = { 'rev': node.rev, 'changeset_href': util.escape(self.env.href.changeset(node.rev)), 'date': util.format_datetime(changeset.date), 'age': util.pretty_timedelta(changeset.date), 'author': changeset.author or 'anonymous', 'message': wiki_to_html(changeset.message or '--', self.env, req, escape_newlines=True) } mime_type = node.content_type if not mime_type or mime_type == 'application/octet-stream': mime_type = get_mimetype(node.name) or mime_type or 'text/plain' # We don't have to guess if the charset is specified in the # svn:mime-type property ctpos = mime_type.find('charset=') if ctpos >= 0: charset = mime_type[ctpos + 8:] else: charset = None format = req.args.get('format') if format in ['raw', 'txt']: req.send_response(200) req.send_header('Content-Type', format == 'txt' and 'text/plain' or mime_type) req.send_header('Content-Length', node.content_length) req.send_header('Last-Modified', util.http_date(node.last_modified)) req.end_headers() content = node.get_content() while 1: chunk = content.read(CHUNK_SIZE) if not chunk: raise RequestDone req.write(chunk) else: # Generate HTML preview mimeview = Mimeview(self.env) content = node.get_content().read(mimeview.max_preview_size()) if not is_binary(content): if mime_type != 'text/plain': plain_href = self.env.href.peerReviewBrowser(node.path, rev=rev and node.rev, format='txt') add_link(req, 'alternate', plain_href, 'Plain Text', 'text/plain') req.hdf['file'] = mimeview.preview_to_hdf( req, mime_type, charset, content, node.name, node.rev, annotations=['addFileNums']) raw_href = self.env.href.peerReviewBrowser(node.path, rev=rev and node.rev, format='raw') req.hdf['file.raw_href'] = util.escape(raw_href) add_link(req, 'alternate', raw_href, 'Original Format', mime_type) add_stylesheet(req, 'common/css/code.css')
def add_milestone_link(rel, milestone): href = req.href.milestone(milestone.name, by=req.args.get('by')) add_link(req, rel, href, _('Milestone "%(name)s"', name=milestone.name))
pagedata = [] shown_pages = result.get_shown_pages(21) for page in shown_pages: page_href = self.get_href(req, realms, query, items_per_page, page) pagedata.append( [page_href, None, str(page), _("Page %(num)d", num=page)]) attributes = ['href', 'class', 'string', 'title'] result.shown_pages = [dict(zip(attributes, p)) for p in pagedata] result.current_page = { 'href': None, 'class': 'current', 'string': str(result.page + 1), 'title': None } if result.has_next_page: next_href = self.get_href(req, realms, query, items_per_page, current_page + 1) add_link(req, 'next', next_href, _('Next Page')) if result.has_previous_page: prev_href = self.get_href(req, realms, query, items_per_page, current_page - 1) add_link(req, 'prev', prev_href, _('Previous Page')) return result
class TimelineModule(Component): implements(INavigationContributor, IPermissionRequestor, IRequestHandler) event_providers = ExtensionPoint(ITimelineEventProvider) default_daysback = IntOption( 'timeline', 'default_daysback', 30, """Default number of days displayed in the Timeline, in days. (''since 0.9.'')""") # INavigationContributor methods def get_active_navigation_item(self, req): return 'timeline' def get_navigation_items(self, req): if not req.perm.has_permission('TIMELINE_VIEW'): return yield ('mainnav', 'timeline', html.A(u'时间线', href=req.href.timeline(), accesskey=2)) # IPermissionRequestor methods def get_permission_actions(self): return ['TIMELINE_VIEW'] # IRequestHandler methods def match_request(self, req): return re.match(r'/timeline/?', req.path_info) is not None def process_request(self, req): req.perm.assert_permission('TIMELINE_VIEW') format = req.args.get('format') maxrows = int(req.args.get('max', 0)) # Parse the from date and adjust the timestamp to the last second of # the day t = time.localtime() if req.args.has_key('from'): try: t = time.strptime(req.args.get('from'), '%x') except: pass fromdate = time.mktime( (t[0], t[1], t[2], 23, 59, 59, t[6], t[7], t[8])) try: daysback = max(0, int(req.args.get('daysback', ''))) except ValueError: daysback = self.default_daysback req.hdf['timeline.from'] = format_date(fromdate) req.hdf['timeline.daysback'] = daysback available_filters = [] for event_provider in self.event_providers: available_filters += event_provider.get_timeline_filters(req) filters = [] # check the request or session for enabled filters, or use default for test in (lambda f: req.args.has_key(f[0]), lambda f: req.session.get('timeline.filter.%s' % f[0], '')\ == '1', lambda f: len(f) == 2 or f[2]): if filters: break filters = [f[0] for f in available_filters if test(f)] # save the results of submitting the timeline form to the session if req.args.has_key('update'): for filter in available_filters: key = 'timeline.filter.%s' % filter[0] if req.args.has_key(filter[0]): req.session[key] = '1' elif req.session.has_key(key): del req.session[key] stop = fromdate start = stop - (daysback + 1) * 86400 events = [] for event_provider in self.event_providers: try: events += event_provider.get_timeline_events( req, start, stop, filters) except Exception, e: # cope with a failure of that provider self._provider_failure(e, req, event_provider, filters, [f[0] for f in available_filters]) events.sort(lambda x, y: cmp(y[3], x[3])) if maxrows and len(events) > maxrows: del events[maxrows:] # 网页的标题 req.hdf['title'] = 'Timeline' # Get the email addresses of all known users email_map = {} for username, name, email in self.env.get_known_users(): if email: email_map[username] = email idx = 0 for kind, href, title, date, author, message in events: event = { 'kind': kind, 'title': title, 'href': href, 'author': author or 'anonymous', 'date': format_date(date), 'time': format_time(date, '%H:%M'), 'dateuid': int(date), 'message': message } if format == 'rss': # Strip/escape HTML markup if isinstance(title, Markup): title = title.plaintext(keeplinebreaks=False) event['title'] = title event['message'] = to_unicode(message) if author: # For RSS, author must be an email address if author.find('@') != -1: event['author.email'] = author elif email_map.has_key(author): event['author.email'] = email_map[author] event['date'] = http_date(date) req.hdf['timeline.events.%s' % idx] = event idx += 1 if format == 'rss': return 'timeline_rss.cs', 'application/rss+xml' add_stylesheet(req, 'common/css/timeline.css') rss_href = req.href.timeline([(f, 'on') for f in filters], daysback=90, max=50, format='rss') add_link(req, 'alternate', rss_href, 'RSS Feed', 'application/rss+xml', 'rss') ZhAvailable_filters = { 'milestone': u'里程碑更新', 'ticket': u'传票更新', 'changeset': u'SVN仓库更新' } for idx, fltr in enumerate(available_filters): req.hdf['timeline.filters.%d' % idx] = { 'name': fltr[0], 'label': ZhAvailable_filters.get(fltr[0], fltr[1]), 'enabled': int(fltr[0] in filters) } return 'timeline.cs', None
class BrowserModule(Component): implements(INavigationContributor, IPermissionRequestor, IRequestHandler, IWikiSyntaxProvider, IHTMLPreviewAnnotator, IWikiMacroProvider) property_renderers = ExtensionPoint(IPropertyRenderer) downloadable_paths = ListOption( 'browser', 'downloadable_paths', '/trunk, /branches/*, /tags/*', doc="""List of repository paths that can be downloaded. Leave the option empty if you want to disable all downloads, otherwise set it to a comma-separated list of authorized paths (those paths are glob patterns, i.e. "*" can be used as a wild card) (''since 0.10'')""") color_scale = BoolOption('browser', 'color_scale', True, doc="""Enable colorization of the ''age'' column. This uses the same color scale as the source code annotation: blue is older, red is newer. (''since 0.11'')""") NEWEST_COLOR = (255, 136, 136) newest_color = Option( 'browser', 'newest_color', repr(NEWEST_COLOR), doc="""(r,g,b) color triple to use for the color corresponding to the newest color, for the color scale used in ''blame'' or the browser ''age'' column if `color_scale` is enabled. (''since 0.11'')""") OLDEST_COLOR = (136, 136, 255) oldest_color = Option( 'browser', 'oldest_color', repr(OLDEST_COLOR), doc="""(r,g,b) color triple to use for the color corresponding to the oldest color, for the color scale used in ''blame'' or the browser ''age'' column if `color_scale` is enabled. (''since 0.11'')""") intermediate_point = Option( 'browser', 'intermediate_point', '', doc="""If set to a value between 0 and 1 (exclusive), this will be the point chosen to set the `intermediate_color` for interpolating the color value. (''since 0.11'')""") intermediate_color = Option( 'browser', 'intermediate_color', '', doc="""(r,g,b) color triple to use for the color corresponding to the intermediate color, if two linear interpolations are used for the color scale (see `intermediate_point`). If not set, the intermediate color between `oldest_color` and `newest_color` will be used. (''since 0.11'')""") render_unsafe_content = BoolOption( 'browser', 'render_unsafe_content', 'false', """Whether raw files should be rendered in the browser, or only made downloadable. Pretty much any file may be interpreted as HTML by the browser, which allows a malicious user to create a file containing cross-site scripting attacks. For open repositories where anyone can check-in a file, it is recommended to leave this option disabled (which is the default).""") hidden_properties = ListOption( 'browser', 'hide_properties', 'svk:merge', doc="""Comma-separated list of version control properties to hide from the repository browser. (''since 0.9'')""") # public methods def get_custom_colorizer(self): """Returns a converter for values from [0.0, 1.0] to a RGB triple.""" def interpolate(old, new, value): # Provides a linearly interpolated color triple for `value` # which must be a floating point value between 0.0 and 1.0 return tuple([int(b + (a - b) * value) for a, b in zip(new, old)]) def parse_color(rgb, default): # Get three ints out of a `rgb` string or return `default` try: t = tuple([int(v) for v in re.split(r'(\d+)', rgb)[1::2]]) return t if len(t) == 3 else default except ValueError: return default newest_color = parse_color(self.newest_color, self.NEWEST_COLOR) oldest_color = parse_color(self.oldest_color, self.OLDEST_COLOR) try: intermediate = float(self.intermediate_point) except ValueError: intermediate = None if intermediate: intermediate_color = parse_color(self.intermediate_color, None) if not intermediate_color: intermediate_color = tuple([ (a + b) / 2 for a, b in zip(newest_color, oldest_color) ]) def colorizer(value): if value <= intermediate: value = value / intermediate return interpolate(oldest_color, intermediate_color, value) else: value = (value - intermediate) / (1.0 - intermediate) return interpolate(intermediate_color, newest_color, value) else: def colorizer(value): return interpolate(oldest_color, newest_color, value) return colorizer # INavigationContributor methods def get_active_navigation_item(self, req): return 'browser' def get_navigation_items(self, req): rm = RepositoryManager(self.env) if 'BROWSER_VIEW' in req.perm and rm.get_real_repositories(): yield ('mainnav', 'browser', tag.a(_('Browse Source'), href=req.href.browser())) # IPermissionRequestor methods def get_permission_actions(self): return ['BROWSER_VIEW', 'FILE_VIEW'] # IRequestHandler methods def match_request(self, req): match = re.match(r'/(export|browser|file)(/.*)?$', req.path_info) if match: mode, path = match.groups() if mode == 'export': if path and '/' in path: path_elts = path.split('/', 2) if len(path_elts) != 3: return False path = path_elts[2] req.args['rev'] = path_elts[1] req.args['format'] = 'raw' elif mode == 'file': req.redirect(req.href.browser(path, rev=req.args.get('rev'), format=req.args.get('format')), permanent=True) req.args['path'] = path or '/' return True def process_request(self, req): req.perm.require('BROWSER_VIEW') presel = req.args.get('preselected') if presel and (presel + '/').startswith(req.href.browser() + '/'): req.redirect(presel) path = req.args.get('path', '/') rev = req.args.get('rev', '') if rev.lower() in ('', 'head'): rev = None order = req.args.get('order', 'name').lower() desc = req.args.has_key('desc') xhr = req.get_header('X-Requested-With') == 'XMLHttpRequest' rm = RepositoryManager(self.env) all_repositories = rm.get_all_repositories() reponame, repos, path = rm.get_repository_by_path(path) # Repository index show_index = not reponame and path == '/' if show_index: if repos and (as_bool(all_repositories[''].get('hidden')) or not repos.is_viewable(req.perm)): repos = None if not repos and reponame: raise ResourceNotFound( _("Repository '%(repo)s' not found", repo=reponame)) if reponame and reponame != repos.reponame: # Redirect alias qs = req.query_string req.redirect( req.href.browser(repos.reponame or None, path) + ('?' + qs if qs else '')) reponame = repos.reponame if repos else None # Find node for the requested path/rev context = web_context(req) node = None display_rev = lambda rev: rev if repos: try: if rev: rev = repos.normalize_rev(rev) # If `rev` is `None`, we'll try to reuse `None` consistently, # as a special shortcut to the latest revision. rev_or_latest = rev or repos.youngest_rev node = get_existing_node(req, repos, path, rev_or_latest) except NoSuchChangeset, e: raise ResourceNotFound(e.message, _('Invalid changeset number')) context = context.child( repos.resource.child('source', path, version=rev_or_latest)) display_rev = repos.display_rev # Prepare template data path_links = get_path_links(req.href, reponame, path, rev, order, desc) repo_data = dir_data = file_data = None if show_index: repo_data = self._render_repository_index(context, all_repositories, order, desc) if node: if node.isdir: dir_data = self._render_dir(req, repos, node, rev, order, desc) elif node.isfile: file_data = self._render_file(req, context, repos, node, rev) if not repos and not (repo_data and repo_data['repositories']): raise ResourceNotFound(_("No node %(path)s", path=path)) quickjump_data = properties_data = None if node and not xhr: properties_data = self.render_properties('browser', context, node.get_properties()) quickjump_data = list(repos.get_quickjump_entries(rev)) data = { 'context': context, 'reponame': reponame, 'repos': repos, 'repoinfo': all_repositories.get(reponame or ''), 'path': path, 'rev': node and node.rev, 'stickyrev': rev, 'display_rev': display_rev, 'created_path': node and node.created_path, 'created_rev': node and node.created_rev, 'properties': properties_data, 'path_links': path_links, 'order': order, 'desc': 1 if desc else None, 'repo': repo_data, 'dir': dir_data, 'file': file_data, 'quickjump_entries': quickjump_data, 'wiki_format_messages': \ self.config['changeset'].getbool('wiki_format_messages'), 'xhr': xhr, } if xhr: # render and return the content only return 'dir_entries.html', data, None if dir_data or repo_data: add_script(req, 'common/js/expand_dir.js') add_script(req, 'common/js/keyboard_nav.js') # Links for contextual navigation if node: if node.isfile: prev_rev = repos.previous_rev(rev=node.created_rev, path=node.created_path) if prev_rev: href = req.href.browser(reponame, node.created_path, rev=prev_rev) add_link(req, 'prev', href, _('Revision %(num)s', num=display_rev(prev_rev))) if rev is not None: add_link(req, 'up', req.href.browser(reponame, node.created_path)) next_rev = repos.next_rev(rev=node.created_rev, path=node.created_path) if next_rev: href = req.href.browser(reponame, node.created_path, rev=next_rev) add_link(req, 'next', href, _('Revision %(num)s', num=display_rev(next_rev))) prevnext_nav(req, _('Previous Revision'), _('Next Revision'), _('Latest Revision')) else: if path != '/': add_link(req, 'up', path_links[-2]['href'], _('Parent directory')) add_ctxtnav( req, tag.a(_('Last Change'), href=req.href.changeset(node.created_rev, reponame, node.created_path))) if node.isfile: annotate = data['file']['annotate'] if annotate: add_ctxtnav(req, _('Normal'), title=_('View file without annotations'), href=req.href.browser(reponame, node.created_path, rev=rev)) if annotate != 'blame': add_ctxtnav(req, _('Blame'), title=_('Annotate each line with the last ' 'changed revision ' '(this can be time consuming...)'), href=req.href.browser(reponame, node.created_path, rev=rev, annotate='blame')) add_ctxtnav(req, _('Revision Log'), href=req.href.log(reponame, path, rev=rev)) path_url = repos.get_path_url(path, rev) if path_url: if path_url.startswith('//'): path_url = req.scheme + ':' + path_url add_ctxtnav(req, _('Repository URL'), href=path_url) add_stylesheet(req, 'common/css/browser.css') return 'browser.html', data, None
def process_request(self, req): if req.perm.has_permission('CODE_REVIEW_MGR'): req.hdf['manager'] = 1 else: req.perm.assert_permission('CODE_REVIEW_DEV') req.hdf['manager'] = 0 #get some link locations for the template req.hdf['trac.href.peerReviewMain'] = self.env.href.peerReviewMain() req.hdf['trac.href.peerReviewNew'] = self.env.href.peerReviewNew() req.hdf['trac.href.peerReviewSearch'] = self.env.href.peerReviewSearch() req.hdf['trac.href.peerReviewOptions'] = self.env.href.peerReviewOptions() #for top-right navigation links req.hdf['main'] = "no" req.hdf['create'] = "no" req.hdf['search'] = "no" req.hdf['options'] = "no" #get the fileID from the request arguments idFile = req.args.get('IDFile') self.fileID = idFile #if the file id is not set - display an error message if idFile == None: req.hdf['error.type'] = "TracError" req.hdf['error.title'] = "File ID Error" req.hdf['error.message'] = "No file ID given - unable to load page." return 'error.cs', None #get the database db = self.env.get_db_cnx() dbBack = dbBackend(db) #get all the comments for this file self.comments = dbBack.getCommentDictForFile(idFile) #get the file properties from the database resultFile = dbBack.getReviewFile(idFile) #make the thumbtac image global so the line annotator has access to it self.imagePath = self.env.href.chrome() + '/hw/images/thumbtac11x11.gif' #get image and link locations req.hdf['trac.href.peerReviewCommentCallback'] = self.env.href.peerReviewCommentCallback() req.hdf['trac.href.peerReviewView'] = self.env.href.peerReviewView() req.hdf['trac.htdocs.thumbtac'] = self.imagePath req.hdf['trac.htdocs.plus'] = self.env.href.chrome() + '/hw/images/plus.gif' req.hdf['trac.htdocs.minus'] = self.env.href.chrome() + '/hw/images/minus.gif' #if the file is not found in the database - display an error message if resultFile == None: req.hdf['error.type'] = "TracError" req.hdf['error.title'] = "File ID Error" req.hdf['error.message'] = "Unable to locate given file ID in database." return 'error.cs', None #get the respository repos = self.env.get_repository(req.authname) #get the file attributes req.hdf['review.path'] = resultFile.Path req.hdf['review.version'] = resultFile.Version req.hdf['review.lineStart'] = resultFile.LineStart req.hdf['review.lineEnd'] = resultFile.LineEnd req.hdf['review.reviewID'] = resultFile.IDReview #make these global for the line annotator self.lineEnd = string.atoi(resultFile.LineEnd) self.lineStart = string.atoi(resultFile.LineStart) #if the repository can't be found - display an error message if(repos == None): req.hdf['error.type'] = "TracError" req.hdf['error.title'] = "Subversion Repository Error" req.hdf['error.message'] = "Unable to acquire subversion repository." return 'error.cs', None #get the correct location - using revision number and repository path node = get_existing_node(self.env, repos, resultFile.Path, resultFile.Version) #if the node can't be found - display error message if(node == None): req.hdf['error.type'] = "TracError" req.hdf['error.title'] = "Subversion Node Error" req.hdf['error.message'] = "Unable to locate subversion node for this file." return 'error.cs', None # Generate HTML preview - this code take from Trac - refer to their documentation mime_type = node.content_type if not mime_type or mime_type == 'application/octet-stream': mime_type = get_mimetype(node.name) or mime_type or 'text/plain' ctpos = mime_type.find('charset=') if ctpos >= 0: charset = mime_type[ctpos + 8:] else: charset = None mimeview = Mimeview(self.env) rev = None content = node.get_content().read(mimeview.max_preview_size()) if not is_binary(content): if mime_type != 'text/plain': plain_href = self.env.href.peerReviewBrowser(node.path, rev=rev and node.rev, format='txt') add_link(req, 'alternate', plain_href, 'Plain Text', 'text/plain') #assign the preview to a variable for clearsilver req.hdf['file'] = mimeview.preview_to_hdf(req, mime_type, charset, content, node.name, node.rev, annotations=['performCodeReview']) add_stylesheet(req, 'common/css/code.css') add_stylesheet(req, 'common/css/browser.css') return 'peerReviewPerform.cs', None
def _render_diff(self, req, page): if not page.exists: raise TracError( _("Version %(num)s of page \"%(name)s\" does not " "exist", num=req.args.get('version'), name=page.name)) old_version = req.args.getint('old_version') if old_version: if old_version == page.version: old_version = None elif old_version > page.version: # FIXME: what about reverse diffs? old_version = page.resource.version page = WikiPage(self.env, page.name, old_version) req.perm(page.resource).require('WIKI_VIEW') latest_page = WikiPage(self.env, page.name) req.perm(latest_page.resource).require('WIKI_VIEW') new_version = page.version date = author = comment = None num_changes = 0 prev_version = next_version = None for version, t, a, c in latest_page.get_history(): if version == new_version: date = t author = a or 'anonymous' comment = c or '--' else: if version < new_version: num_changes += 1 if not prev_version: prev_version = version if old_version is None or version == old_version: old_version = version break else: next_version = version if not old_version: old_version = 0 old_page = WikiPage(self.env, page.name, old_version) req.perm(old_page.resource).require('WIKI_VIEW') # -- text diffs old_text = old_page.text.splitlines() new_text = page.text.splitlines() diff_data, changes = self._prepare_diff(req, page, old_text, new_text, old_version, new_version) # -- prev/up/next links if prev_version: add_link( req, 'prev', req.href.wiki(page.name, action='diff', version=prev_version), _("Version %(num)s", num=prev_version)) add_link(req, 'up', req.href.wiki(page.name, action='history'), _('Page history')) if next_version: add_link( req, 'next', req.href.wiki(page.name, action='diff', version=next_version), _("Version %(num)s", num=next_version)) data = self._page_data(req, page, 'diff') data.update({ 'change': { 'date': date, 'author': author, 'comment': comment }, 'new_version': new_version, 'old_version': old_version, 'latest_version': latest_page.version, 'num_changes': num_changes, 'longcol': 'Version', 'shortcol': 'v', 'changes': changes, 'diff': diff_data, 'can_edit_comment': 'WIKI_ADMIN' in req.perm(page.resource), }) prevnext_nav(req, _("Previous Change"), _("Next Change"), _("Wiki History")) return 'wiki_diff.html', data
def process_request(self, req): req.perm.require('LOG_VIEW') mode = req.args.get('mode', 'stop_on_copy') path = req.args.get('path', '/') rev = req.args.get('rev') stop_rev = req.args.get('stop_rev') revs = req.args.get('revs') format = req.args.get('format') verbose = req.args.get('verbose') limit = req.args.getint('limit', self.default_log_limit) rm = RepositoryManager(self.env) reponame, repos, path = rm.get_repository_by_path(path) if not repos: if path == '/': raise TracError( _("No repository specified and no default" " repository configured.")) else: raise ResourceNotFound( _("Repository '%(repo)s' not found", repo=reponame or path.strip('/'))) if reponame != repos.reponame: # Redirect alias qs = req.query_string req.redirect( req.href.log(repos.reponame or None, path) + ('?' + qs if qs else '')) normpath = repos.normalize_path(path) # if `revs` parameter is given, then we're restricted to the # corresponding revision ranges. # If not, then we're considering all revisions since `rev`, # on that path, in which case `revranges` will be None. if revs: revranges = RevRanges(repos, revs, resolve=True) rev = revranges.b else: revranges = None rev = repos.normalize_rev(rev) # The `history()` method depends on the mode: # * for ''stop on copy'' and ''follow copies'', it's `Node.history()` # unless explicit ranges have been specified # * for ''show only add, delete'' we're using # `Repository.get_path_history()` cset_resource = repos.resource.child(self.realm) show_graph = False curr_revrange = [] if mode == 'path_history': def history(): for h in repos.get_path_history(path, rev): if 'CHANGESET_VIEW' in req.perm(cset_resource(id=h[1])): yield h elif revranges: show_graph = path == '/' and not verbose \ and not repos.has_linear_changesets \ and len(revranges) == 1 def history(): separator = False for a, b in reversed(revranges.pairs): curr_revrange[:] = (a, b) node = get_existing_node(req, repos, path, b) for p, rev, chg in node.get_history(): if repos.rev_older_than(rev, a): break if 'CHANGESET_VIEW' in req.perm(cset_resource(id=rev)): separator = True yield p, rev, chg else: separator = False if separator: yield p, rev, None else: show_graph = path == '/' and not verbose \ and not repos.has_linear_changesets def history(): node = get_existing_node(req, repos, path, rev) for h in node.get_history(): if 'CHANGESET_VIEW' in req.perm(cset_resource(id=h[1])): yield h # -- retrieve history, asking for limit+1 results info = [] depth = 1 previous_path = normpath count = 0 history_remaining = True for old_path, old_rev, old_chg in history(): if stop_rev and repos.rev_older_than(old_rev, stop_rev): break old_path = repos.normalize_path(old_path) item = { 'path': old_path, 'rev': old_rev, 'existing_rev': old_rev, 'change': old_chg, 'depth': depth, } if old_chg == Changeset.DELETE: item['existing_rev'] = repos.previous_rev(old_rev, old_path) if not (mode == 'path_history' and old_chg == Changeset.EDIT): info.append(item) if old_path and old_path != previous_path and \ not (mode == 'path_history' and old_path == normpath): depth += 1 item['depth'] = depth item['copyfrom_path'] = old_path if mode == 'stop_on_copy': break elif mode == 'path_history': depth -= 1 if old_chg is None: # separator entry stop_limit = limit else: count += 1 stop_limit = limit + 1 if count >= stop_limit: break previous_path = old_path else: history_remaining = False if not info: node = get_existing_node(req, repos, path, rev) if repos.rev_older_than(stop_rev, node.created_rev): # FIXME: we should send a 404 error here raise TracError( _( "The file or directory '%(path)s' doesn't " "exist at revision %(rev)s or at any " "previous revision.", path=path, rev=repos.display_rev(rev)), _('Nonexistent path')) # Generate graph data graph = {} if show_graph: threads, vertices, columns = \ make_log_graph(repos, (item['rev'] for item in info)) graph.update(threads=threads, vertices=vertices, columns=columns, colors=self.graph_colors, line_width=0.04, dot_radius=0.1) add_script(req, 'common/js/excanvas.js', ie_if='IE') add_script(req, 'common/js/log_graph.js') add_script_data(req, graph=graph) def make_log_href(path, **args): link_rev = rev if rev == str(repos.youngest_rev): link_rev = None params = {'rev': link_rev, 'mode': mode, 'limit': limit} params.update(args) if verbose: params['verbose'] = verbose return req.href.log(repos.reponame or None, path, **params) if format in ('rss', 'changelog'): info = [i for i in info if i['change']] # drop separators if info and count > limit: del info[-1] elif info and history_remaining and count >= limit: # stop_limit reached, there _might_ be some more next_rev = info[-1]['rev'] next_path = info[-1]['path'] next_revranges = None if curr_revrange: new_revrange = (curr_revrange[0], next_rev) \ if info[-1]['change'] else None next_revranges = revranges.truncate(curr_revrange, new_revrange) next_revranges = unicode(next_revranges) or None if next_revranges or not revranges: older_revisions_href = make_log_href(next_path, rev=next_rev, revs=next_revranges) add_link( req, 'next', older_revisions_href, _('Revision Log (restarting at %(path)s, rev. ' '%(rev)s)', path=next_path, rev=repos.display_rev(next_rev))) # only show fully 'limit' results, use `change == None` as a marker info[-1]['change'] = None revisions = [i['rev'] for i in info] changes = get_changes(repos, revisions, self.log) extra_changes = {} if format == 'changelog': for rev in revisions: changeset = changes[rev] cs = {} cs['message'] = wrap(changeset.message, 70, initial_indent='\t', subsequent_indent='\t') files = [] actions = [] for cpath, kind, chg, bpath, brev in changeset.get_changes(): files.append(bpath if chg == Changeset.DELETE else cpath) actions.append(chg) cs['files'] = files cs['actions'] = actions extra_changes[rev] = cs data = { 'context': web_context(req, 'source', path, parent=repos.resource), 'reponame': repos.reponame or None, 'repos': repos, 'path': path, 'rev': rev, 'stop_rev': stop_rev, 'display_rev': repos.display_rev, 'revranges': revranges, 'mode': mode, 'verbose': verbose, 'limit': limit, 'items': info, 'changes': changes, 'extra_changes': extra_changes, 'graph': graph, 'wiki_format_messages': self.config['changeset'].getbool('wiki_format_messages') } if format == 'changelog': return 'revisionlog.txt', data, {'content_type': 'text/plain'} elif format == 'rss': data['context'] = web_context(req, 'source', path, parent=repos.resource, absurls=True) return ('revisionlog.rss', data, { 'content_type': 'application/rss+xml' }) item_ranges = [] range = [] for item in info: if item['change'] is None: # separator if range: # start new range range.append(item) item_ranges.append(range) range = [] else: range.append(item) if range: item_ranges.append(range) data['item_ranges'] = item_ranges add_stylesheet(req, 'common/css/diff.css') add_stylesheet(req, 'common/css/browser.css') path_links = get_path_links(req.href, repos.reponame, path, rev) if path_links: data['path_links'] = path_links if path != '/': add_link(req, 'up', path_links[-2]['href'], _('Parent directory')) rss_href = make_log_href(path, format='rss', revs=revs, stop_rev=stop_rev) add_link(req, 'alternate', auth_link(req, rss_href), _('RSS Feed'), 'application/rss+xml', 'rss') changelog_href = make_log_href(path, format='changelog', revs=revs, stop_rev=stop_rev) add_link(req, 'alternate', changelog_href, _('ChangeLog'), 'text/plain') add_ctxtnav(req, _('View Latest Revision'), href=req.href.browser(repos.reponame or None, path)) if 'next' in req.chrome['links']: next = req.chrome['links']['next'][0] add_ctxtnav( req, tag.span(tag.a(_('Older Revisions'), href=next['href']), Markup(' →'))) return 'revisionlog.html', data
def _render_file(self, req, context, repos, node, rev=None): req.perm(node.resource).require('FILE_VIEW') mimeview = Mimeview(self.env) # MIME type detection content = node.get_content() chunk = content.read(CHUNK_SIZE) mime_type = node.content_type if not mime_type or mime_type == 'application/octet-stream': mime_type = mimeview.get_mimetype(node.name, chunk) or \ mime_type or 'text/plain' # Eventually send the file directly format = req.args.get('format') if format in ('raw', 'txt'): req.send_response(200) req.send_header('Content-Type', 'text/plain' if format == 'txt' else mime_type) req.send_header('Content-Length', node.content_length) req.send_header('Last-Modified', http_date(node.last_modified)) if rev is None: req.send_header('Pragma', 'no-cache') req.send_header('Cache-Control', 'no-cache') req.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT') if not self.render_unsafe_content: # Force browser to download files instead of rendering # them, since they might contain malicious code enabling # XSS attacks req.send_header('Content-Disposition', 'attachment') req.end_headers() while 1: if not chunk: raise RequestDone req.write(chunk) chunk = content.read(CHUNK_SIZE) else: # The changeset corresponding to the last change on `node` # is more interesting than the `rev` changeset. changeset = repos.get_changeset(node.created_rev) # add ''Plain Text'' alternate link if needed if not is_binary(chunk) and mime_type != 'text/plain': plain_href = req.href.browser(repos.reponame or None, node.path, rev=rev, format='txt') add_link(req, 'alternate', plain_href, _('Plain Text'), 'text/plain') # add ''Original Format'' alternate link (always) raw_href = req.href.export(rev or repos.youngest_rev, repos.reponame or None, node.path) add_link(req, 'alternate', raw_href, _('Original Format'), mime_type) self.log.debug( "Rendering preview of node %s@%s with mime-type %s" % (node.name, str(rev), mime_type)) del content # the remainder of that content is not needed add_stylesheet(req, 'common/css/code.css') annotations = ['lineno'] annotate = req.args.get('annotate') if annotate: annotations.insert(0, annotate) preview_data = mimeview.preview_data(context, node.get_content(), node.get_content_length(), mime_type, node.created_path, raw_href, annotations=annotations, force_source=bool(annotate)) return { 'changeset': changeset, 'size': node.content_length, 'preview': preview_data, 'annotate': annotate, }
def _render_view(self, req, id): """Retrieve the report results and pre-process them for rendering.""" r = Report(self.env, id) title, description, sql = r.title, r.description, r.query # If this is a saved custom query, redirect to the query module # # A saved query is either an URL query (?... or query:?...), # or a query language expression (query:...). # # It may eventually contain newlines, for increased clarity. # query = ''.join(line.strip() for line in sql.splitlines()) if query and (query[0] == '?' or query.startswith('query:?')): query = query if query[0] == '?' else query[6:] report_id = 'report=%s' % id if 'report=' in query: if report_id not in query: err = _( 'When specified, the report number should be ' '"%(num)s".', num=id) req.redirect(req.href.report(id, action='edit', error=err)) else: if query[-1] != '?': query += '&' query += report_id req.redirect(req.href.query() + quote_query_string(query)) elif query.startswith('query:'): from trac.ticket.query import Query, QuerySyntaxError try: query = Query.from_string(self.env, query[6:], report=id) except QuerySyntaxError as e: req.redirect( req.href.report(id, action='edit', error=to_unicode(e))) else: req.redirect(query.get_href(req.href)) format = req.args.get('format') if format == 'sql': self._send_sql(req, id, title, description, sql) title = '{%i} %s' % (id, title) report_resource = Resource(self.realm, id) req.perm(report_resource).require('REPORT_VIEW') context = web_context(req, report_resource) page = req.args.getint('page', 1) default_max = { 'rss': self.items_per_page_rss, 'csv': 0, 'tab': 0 }.get(format, self.items_per_page) max = req.args.getint('max') limit = as_int(max, default_max, min=0) # explict max takes precedence offset = (page - 1) * limit sort_col = req.args.get('sort', '') asc = req.args.getint('asc', 0, min=0, max=1) args = {} def report_href(**kwargs): """Generate links to this report preserving user variables, and sorting and paging variables. """ params = args.copy() if sort_col: params['sort'] = sort_col if page != 1: params['page'] = page if max != default_max: params['max'] = max params.update(kwargs) params['asc'] = 1 if params.get('asc', asc) else None return req.href.report(id, params) data = { 'action': 'view', 'report': { 'id': id, 'resource': report_resource }, 'context': context, 'title': title, 'description': description, 'max': limit, 'args': args, 'show_args_form': False, 'message': None, 'paginator': None, 'report_href': report_href } try: args = self.get_var_args(req) sql = self.get_default_var_args(args, sql) except ValueError as e: data['message'] = _("Report failed: %(error)s", error=e) return 'report_view.html', data, None data.update({ 'args': args, 'title': sub_vars(title, args), 'description': sub_vars(description or '', args) }) try: res = self.execute_paginated_report(req, id, sql, args, limit, offset) except TracError as e: data['message'] = _("Report failed: %(error)s", error=e) else: if len(res) == 2: e, sql = res data['message'] = \ tag_("Report execution failed: %(error)s %(sql)s", error=tag.pre(exception_to_unicode(e)), sql=tag(tag.hr(), tag.pre(sql, style="white-space: pre"))) if data['message']: return 'report_view.html', data, None cols, results, num_items, missing_args, limit_offset = res need_paginator = limit > 0 and limit_offset need_reorder = limit_offset is None results = [list(row) for row in results] numrows = len(results) paginator = None if need_paginator: paginator = Paginator(results, page - 1, limit, num_items) data['paginator'] = paginator if paginator.has_next_page: add_link(req, 'next', report_href(page=page + 1), _('Next Page')) if paginator.has_previous_page: add_link(req, 'prev', report_href(page=page - 1), _('Previous Page')) pagedata = [] shown_pages = paginator.get_shown_pages(21) for p in shown_pages: pagedata.append([ report_href(page=p), None, str(p), _('Page %(num)d', num=p) ]) fields = ['href', 'class', 'string', 'title'] paginator.shown_pages = [dict(zip(fields, p)) for p in pagedata] paginator.current_page = { 'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title': None } numrows = paginator.num_items # Place retrieved columns in groups, according to naming conventions # * _col_ means fullrow, i.e. a group with one header # * col_ means finish the current group and start a new one field_labels = TicketSystem(self.env).get_ticket_field_labels() header_groups = [[]] for idx, col in enumerate(cols): if col in field_labels: title = field_labels[col] else: title = col.strip('_').capitalize() header = { 'col': col, 'title': title, 'hidden': False, 'asc': None, } if col == sort_col: if asc: data['asc'] = asc data['sort'] = sort_col header['asc'] = bool(asc) if not paginator and need_reorder: # this dict will have enum values for sorting # and will be used in sortkey(), if non-empty: sort_values = {} if sort_col in ('status', 'resolution', 'priority', 'severity'): # must fetch sort values for that columns # instead of comparing them as strings with self.env.db_query as db: for name, value in db( "SELECT name, %s FROM enum WHERE type=%%s" % db.cast('value', 'int'), (sort_col, )): sort_values[name] = value def sortkey(row): val = row[idx] # check if we have sort_values, then use them as keys. if sort_values: return sort_values.get(val) # otherwise, continue with string comparison: if isinstance(val, str): val = val.lower() return val results = sorted(results, key=sortkey, reverse=not asc) header_group = header_groups[-1] if col.startswith('__') and col.endswith('__'): # __col__ header['hidden'] = True elif col[0] == '_' and col[-1] == '_': # _col_ header_group = [] header_groups.append(header_group) header_groups.append([]) elif col[0] == '_': # _col header['hidden'] = True elif col[-1] == '_': # col_ header_groups.append([]) header_group.append(header) # Structure the rows and cells: # - group rows according to __group__ value, if defined # - group cells the same way headers are grouped chrome = Chrome(self.env) row_groups = [] authorized_results = [] prev_group_value = None for row_idx, result in enumerate(results): col_idx = 0 cell_groups = [] row = {'cell_groups': cell_groups} realm = TicketSystem.realm parent_realm = '' parent_id = '' email_cells = [] for header_group in header_groups: cell_group = [] for header in header_group: value = cell_value(result[col_idx]) cell = {'value': value, 'header': header, 'index': col_idx} col = header['col'] col_idx += 1 # Detect and create new group if col == '__group__' and value != prev_group_value: prev_group_value = value # Brute force handling of email in group by header row_groups.append( (value and chrome.format_author(req, value), [])) # Other row properties row['__idx__'] = row_idx if col in self._html_cols: row[col] = value if col in ('report', 'ticket', 'id', '_id'): row['id'] = value # Special casing based on column name col = col.strip('_') if col in ('reporter', 'cc', 'owner'): email_cells.append(cell) elif col == 'realm': realm = value elif col == 'parent_realm': parent_realm = value elif col == 'parent_id': parent_id = value cell_group.append(cell) cell_groups.append(cell_group) if parent_realm: resource = Resource(realm, row.get('id'), parent=Resource(parent_realm, parent_id)) else: resource = Resource(realm, row.get('id')) # FIXME: for now, we still need to hardcode the realm in the action if resource.realm.upper() + '_VIEW' not in req.perm(resource): continue authorized_results.append(result) if email_cells: for cell in email_cells: emails = chrome.format_emails(context.child(resource), cell['value']) result[cell['index']] = cell['value'] = emails row['resource'] = resource if row_groups: row_group = row_groups[-1][1] else: row_group = [] row_groups = [(None, row_group)] row_group.append(row) data.update({ 'header_groups': header_groups, 'row_groups': row_groups, 'numrows': numrows }) if format == 'rss': data['context'] = web_context(req, report_resource, absurls=True) return 'report.rss', data, 'application/rss+xml' elif format == 'csv': filename = 'report_%s.csv' % id if id else 'report.csv' self._send_csv(req, cols, authorized_results, mimetype='text/csv', filename=filename) elif format == 'tab': filename = 'report_%s.tsv' % id if id else 'report.tsv' self._send_csv(req, cols, authorized_results, '\t', mimetype='text/tab-separated-values', filename=filename) else: p = page if max is not None else None add_link(req, 'alternate', auth_link(req, report_href(format='rss', page=None)), _('RSS Feed'), 'application/rss+xml', 'rss') add_link(req, 'alternate', report_href(format='csv', page=p), _('Comma-delimited Text'), 'text/plain') add_link(req, 'alternate', report_href(format='tab', page=p), _('Tab-delimited Text'), 'text/plain') if 'REPORT_SQL_VIEW' in req.perm(self.realm, id): add_link(req, 'alternate', req.href.report(id=id, format='sql'), _('SQL Query'), 'text/plain') # reuse the session vars of the query module so that # the query navigation links on the ticket can be used to # navigate report results as well try: req.session['query_tickets'] = \ ' '.join(str(int(row['id'])) for rg in row_groups for row in rg[1]) req.session['query_href'] = \ req.session['query_href'] = report_href() # Kludge: we have to clear the other query session # variables, but only if the above succeeded for var in ('query_constraints', 'query_time'): if var in req.session: del req.session[var] except (ValueError, KeyError): pass if set(data['args']) - {'USER'}: data['show_args_form'] = True # Add values of all select-type ticket fields for autocomplete. fields = TicketSystem(self.env).get_ticket_fields() arg_values = {} for arg in set(data['args']) - {'USER'}: attrs = fields.by_name(arg.lower()) if attrs and 'options' in attrs: arg_values[attrs['name']] = attrs['options'] if arg_values: add_script_data(req, arg_values=arg_values) Chrome(self.env).add_jquery_ui(req) if missing_args: add_warning( req, _('The following arguments are missing: %(args)s', args=", ".join(missing_args))) return 'report_view.html', data, None
def _prepare_results(self, req, filters, results): page = int(req.args.get('page', '1')) results = Paginator(results, page - 1, 100) for idx, result in enumerate(results): results[idx] = {'href': result[0], 'title': result[1], 'date': format_datetime(result[2]), 'author': result[3], 'excerpt': result[4]} pagedata = [] shown_pages = results.get_shown_pages(21) for shown_page in shown_pages: page_href = req.href.reposearch([(f, 'on') for f in filters], q=req.args.get('q'), p=req.args.get('p'), f=req.args.get('f'), o=req.args.get('o'), r=req.args.get('r'), c=req.args.get('c'), page=shown_page, noquickjump=1) pagedata.append([page_href, None, str(shown_page), 'page ' + str(shown_page)]) fields = ['href', 'class', 'string', 'title'] results.shown_pages = [dict(zip(fields, p)) for p in pagedata] results.current_page = {'href': None, 'class': 'current', 'string': str(results.page + 1), 'title':None} if results.has_next_page: next_href = req.href.reposearch(zip(filters, ['on'] * len(filters)), q=req.args.get('q'), p=req.args.get('p'), f=req.args.get('f'), o=req.args.get('o'), r=req.args.get('r'), c=req.args.get('c'), page=page + 1, noquickjump=1) add_link(req, 'next', next_href, 'Next Page') if results.has_previous_page: prev_href = req.href.reposearch(zip(filters, ['on'] * len(filters)), q=req.args.get('q'), p=req.args.get('p'), f=req.args.get('f'), o=req.args.get('o'), r=req.args.get('r'), c=req.args.get('c'), page=page - 1, noquickjump=1) add_link(req, 'prev', prev_href, 'Previous Page') page_href = req.href.reposearch( zip(filters, ['on'] * len(filters)), q=req.args.get('q'), p=req.args.get('p'), f=req.args.get('f'), o=req.args.get('o'), r=req.args.get('r'), c=req.args.get('c'), noquickjump=1) return {'results': results, 'page_href': page_href}
def _render_config(self, req, config_name): db = self.env.get_db_cnx() config = BuildConfig.fetch(self.env, config_name, db=db) if not config: raise HTTPNotFound("Build configuration '%s' does not exist." \ % config_name) repos_name, repos, repos_path = get_repos(self.env, config.path, req.authname) rev = config.max_rev or repos.youngest_rev try: _has_permission(req.perm, repos, repos_path, rev=rev, raise_error=True) except NoSuchNode: raise TracError("Permission checking against repository path %s " "at revision %s failed." % (config.path, rev)) data = {'title': 'Build Configuration "%s"' \ % config.label or config.name, 'page_mode': 'view_config'} add_link(req, 'up', req.href.build(), 'Build Status') description = config.description if description: description = wiki_to_html(description, self.env, req) pending_builds = list(Build.select(self.env, config=config.name, status=Build.PENDING)) inprogress_builds = list(Build.select(self.env, config=config.name, status=Build.IN_PROGRESS)) min_chgset_url = '' if config.min_rev: min_chgset_resource = get_chgset_resource(self.env, repos_name, config.min_rev) min_chgset_url = get_resource_url(self.env, min_chgset_resource, req.href), max_chgset_url = '' if config.max_rev: max_chgset_resource = get_chgset_resource(self.env, repos_name, config.max_rev) max_chgset_url = get_resource_url(self.env, max_chgset_resource, req.href), data['config'] = { 'name': config.name, 'label': config.label, 'path': config.path, 'min_rev': config.min_rev, 'min_rev_href': min_chgset_url, 'max_rev': config.max_rev, 'max_rev_href': max_chgset_url, 'active': config.active, 'description': description, 'browser_href': req.href.browser(config.path), 'builds_pending' : len(pending_builds), 'builds_inprogress' : len(inprogress_builds) } context = Context.from_request(req, config.resource) data['context'] = context data['config']['attachments'] = AttachmentModule(self.env).attachment_data(context) platforms = list(TargetPlatform.select(self.env, config=config_name, db=db)) data['config']['platforms'] = [ { 'name': platform.name, 'id': platform.id, 'builds_pending': len(list(Build.select(self.env, config=config.name, status=Build.PENDING, platform=platform.id))), 'builds_inprogress': len(list(Build.select(self.env, config=config.name, status=Build.IN_PROGRESS, platform=platform.id))) } for platform in platforms ] has_reports = False for report in Report.select(self.env, config=config.name, db=db): has_reports = True break if has_reports: chart_generators = [] report_categories = list(self._report_categories_for_config(config)) for generator in ReportChartController(self.env).generators: for category in generator.get_supported_categories(): if category in report_categories: chart_generators.append({ 'href': req.href.build(config.name, 'chart/' + category), 'category': category, 'style': self.config.get('bitten', 'chart_style'), }) data['config']['charts'] = chart_generators page = max(1, int(req.args.get('page', 1))) more = False data['page_number'] = page builds_per_page = 12 * len(platforms) idx = 0 builds = {} revisions = [] build_order = [] for platform, rev, build in collect_changes(config,authname=req.authname): if idx >= page * builds_per_page: more = True break elif idx >= (page - 1) * builds_per_page: if rev not in builds: revisions.append(rev) builds.setdefault(rev, {}) chgset_resource = get_chgset_resource(self.env, repos_name, rev) builds[rev].setdefault('href', get_resource_url(self.env, chgset_resource, req.href)) build_order.append((rev, repos.get_changeset(rev).date)) builds[rev].setdefault('display_rev', display_rev(repos, rev)) if build and build.status != Build.PENDING: build_data = _get_build_data(self.env, req, build) build_data['steps'] = [] for step in BuildStep.select(self.env, build=build.id, db=db): build_data['steps'].append({ 'name': step.name, 'description': step.description, 'duration': to_datetime(step.stopped or int(time.time()), utc) - \ to_datetime(step.started, utc), 'status': _step_status_label[step.status], 'cls': _step_status_label[step.status].replace(' ', '-'), 'errors': step.errors, 'href': build_data['href'] + '#step_' + step.name }) builds[rev][platform.id] = build_data idx += 1 data['config']['build_order'] = [r[0] for r in sorted(build_order, key=lambda x: x[1], reverse=True)] data['config']['builds'] = builds data['config']['revisions'] = revisions if page > 1: if page == 2: prev_href = req.href.build(config.name) else: prev_href = req.href.build(config.name, page=page - 1) add_link(req, 'prev', prev_href, 'Previous Page') if more: next_href = req.href.build(config.name, page=page + 1) add_link(req, 'next', next_href, 'Next Page') if arity(prevnext_nav) == 4: # Trac 0.12 compat, see #450 prevnext_nav(req, 'Previous Page', 'Next Page') else: prevnext_nav (req, 'Page') return data
def _render_html(self, req, repos, chgset, restricted, message, diff, diff_options): """HTML version""" req.hdf['changeset'] = { 'chgset': chgset and True, 'restricted': restricted, 'href': { 'new_rev': req.href.changeset(diff.new_rev), 'old_rev': req.href.changeset(diff.old_rev), 'new_path': req.href.browser(diff.new_path, rev=diff.new_rev), 'old_path': req.href.browser(diff.old_path, rev=diff.old_rev) } } if chgset: # Changeset Mode (possibly restricted on a path) path, rev = diff.new_path, diff.new_rev # -- getting the change summary from the Changeset.get_changes def get_changes(): for npath, kind, change, opath, orev in chgset.get_changes(): old_node = new_node = None if (restricted and not (npath == path or # same path npath.startswith(path + '/') or # npath is below path.startswith(npath + '/'))): # npath is above continue if change != Changeset.ADD: old_node = repos.get_node(opath, orev) if change != Changeset.DELETE: new_node = repos.get_node(npath, rev) yield old_node, new_node, kind, change def _changeset_title(rev): if restricted: return u'Version %s pour %s' % (rev, path) else: return u'Version %s' % rev title = _changeset_title(rev) properties = [] for name, value, wikiflag, htmlclass in chgset.get_properties(): if wikiflag: value = wiki_to_html(value or '', self.env, req) properties.append({'name': name, 'value': value, 'htmlclass': htmlclass}) req.hdf['changeset'] = { 'revision': chgset.rev, 'time': format_datetime(chgset.date), 'age': pretty_timedelta(chgset.date, None, 3600), 'author': chgset.author or 'anonymous', 'message': message, 'properties': properties } oldest_rev = repos.oldest_rev if chgset.rev != oldest_rev: if restricted: prev = repos.get_node(path, rev).get_previous() if prev: prev_path, prev_rev = prev[:2] if prev_rev: prev_href = req.href.changeset(prev_rev, prev_path) else: prev_path = prev_rev = None else: add_link(req, 'first', req.href.changeset(oldest_rev), u'Version %s' % oldest_rev) prev_path = diff.old_path prev_rev = repos.previous_rev(chgset.rev) if prev_rev: prev_href = req.href.changeset(prev_rev) if prev_rev: add_link(req, 'prev', prev_href, _changeset_title(prev_rev)) youngest_rev = repos.youngest_rev if str(chgset.rev) != str(youngest_rev): if restricted: next_rev = repos.next_rev(chgset.rev, path) if next_rev: if repos.has_node(path, next_rev): next_href = req.href.changeset(next_rev, path) else: # must be a 'D'elete or 'R'ename, show full cset next_href = req.href.changeset(next_rev) else: add_link(req, 'last', req.href.changeset(youngest_rev), u'Version %s' % youngest_rev) next_rev = repos.next_rev(chgset.rev) if next_rev: next_href = req.href.changeset(next_rev) if next_rev: add_link(req, 'next', next_href, _changeset_title(next_rev)) else: # Diff Mode # -- getting the change summary from the Repository.get_changes def get_changes(): for d in repos.get_changes(**diff): yield d reverse_href = req.href.changeset(diff.old_rev, diff.old_path, old=diff.new_rev, old_path=diff.new_path) req.hdf['changeset.reverse_href'] = reverse_href req.hdf['changeset.href.log'] = req.href.log( diff.new_path, rev=diff.new_rev, stop_rev=diff.old_rev) title = self.title_for_diff(diff) req.hdf['title'] = title if not req.perm.has_permission('BROWSER_VIEW'): return def _change_info(old_node, new_node, change): info = {'change': change} if old_node: info['path.old'] = old_node.path info['rev.old'] = old_node.rev info['shortrev.old'] = repos.short_rev(old_node.rev) old_href = req.href.browser(old_node.created_path, rev=old_node.created_rev) # Reminder: old_node.path may not exist at old_node.rev # as long as old_node.rev==old_node.created_rev # ... and diff.old_rev may have nothing to do # with _that_ node specific history... info['browser_href.old'] = old_href if new_node: info['path.new'] = new_node.path info['rev.new'] = new_node.rev # created rev. info['shortrev.new'] = repos.short_rev(new_node.rev) new_href = req.href.browser(new_node.created_path, rev=new_node.created_rev) # (same remark as above) info['browser_href.new'] = new_href return info hidden_properties = self.config.getlist('browser', 'hide_properties') def _prop_changes(old_node, new_node): old_props = old_node.get_properties() new_props = new_node.get_properties() changed_props = {} if old_props != new_props: for k,v in old_props.items(): if not k in new_props: changed_props[k] = { 'old': render_node_property(self.env, k, v)} elif v != new_props[k]: changed_props[k] = { 'old': render_node_property(self.env, k, v), 'new': render_node_property(self.env, k, new_props[k])} for k,v in new_props.items(): if not k in old_props: changed_props[k] = { 'new': render_node_property(self.env, k, v)} for k in hidden_properties: if k in changed_props: del changed_props[k] changed_properties = [] for name, props in changed_props.iteritems(): props.update({'name': name}) changed_properties.append(props) return changed_properties def _estimate_changes(old_node, new_node): old_size = old_node.get_content_length() new_size = new_node.get_content_length() return old_size + new_size def _content_changes(old_node, new_node): """Returns the list of differences. The list is empty when no differences between comparable files are detected, but the return value is None for non-comparable files. """ old_content = old_node.get_content().read() if is_binary(old_content): return None new_content = new_node.get_content().read() if is_binary(new_content): return None mview = Mimeview(self.env) old_content = mview.to_unicode(old_content, old_node.content_type) new_content = mview.to_unicode(new_content, new_node.content_type) if old_content != new_content: context = 3 options = diff_options[1] for option in options: if option.startswith('-U'): context = int(option[2:]) break if context < 0: context = None tabwidth = self.config['diff'].getint('tab_width') or \ self.config['mimeviewer'].getint('tab_width', 8) return hdf_diff(old_content.splitlines(), new_content.splitlines(), context, tabwidth, ignore_blank_lines='-B' in options, ignore_case='-i' in options, ignore_space_changes='-b' in options) else: return [] if req.perm.has_permission('FILE_VIEW'): diff_bytes = diff_files = 0 if self.max_diff_bytes or self.max_diff_files: for old_node, new_node, kind, change in get_changes(): if change in Changeset.DIFF_CHANGES and kind == Node.FILE: diff_files += 1 diff_bytes += _estimate_changes(old_node, new_node) show_diffs = (not self.max_diff_files or \ diff_files <= self.max_diff_files) and \ (not self.max_diff_bytes or \ diff_bytes <= self.max_diff_bytes or \ diff_files == 1) else: show_diffs = False idx = 0 for old_node, new_node, kind, change in get_changes(): show_entry = change != Changeset.EDIT if change in Changeset.DIFF_CHANGES and \ req.perm.has_permission('FILE_VIEW'): assert old_node and new_node props = _prop_changes(old_node, new_node) if props: req.hdf['changeset.changes.%d.props' % idx] = props show_entry = True if kind == Node.FILE and show_diffs: diffs = _content_changes(old_node, new_node) if diffs != []: if diffs: req.hdf['changeset.changes.%d.diff' % idx] = diffs # elif None (means: manually compare to (previous)) show_entry = True if show_entry or not show_diffs: info = _change_info(old_node, new_node, change) if change in Changeset.DIFF_CHANGES and not show_diffs: if chgset: diff_href = req.href.changeset(new_node.rev, new_node.path) else: diff_href = req.href.changeset( new_node.created_rev, new_node.created_path, old=old_node.created_rev, old_path=old_node.created_path) info['diff_href'] = diff_href req.hdf['changeset.changes.%d' % idx] = info idx += 1 # the sequence should be immutable
def process_request(self, req): req.perm.require('BUILD_VIEW') db = self.env.get_db_cnx() build_id = int(req.args.get('id')) build = Build.fetch(self.env, build_id, db=db) if not build: raise HTTPNotFound("Build '%s' does not exist." \ % build_id) if req.method == 'POST': if req.args.get('action') == 'invalidate': self._do_invalidate(req, build, db) req.redirect(req.href.build(build.config, build.id)) add_link(req, 'up', req.href.build(build.config), 'Build Configuration') data = {'title': 'Build %s - %s' % (build_id, _status_title[build.status]), 'page_mode': 'view_build', 'build': {}} config = BuildConfig.fetch(self.env, build.config, db=db) data['build']['config'] = { 'name': config.label or config.name, 'href': req.href.build(config.name) } context = Context.from_request(req, build.resource) data['context'] = context data['build']['attachments'] = AttachmentModule(self.env).attachment_data(context) formatters = [] for formatter in self.log_formatters: formatters.append(formatter.get_formatter(req, build)) summarizers = {} # keyed by report type for summarizer in self.report_summarizers: categories = summarizer.get_supported_categories() summarizers.update(dict([(cat, summarizer) for cat in categories])) repos_name, repos, repos_path = get_repos(self.env, config.path, req.authname) _has_permission(req.perm, repos, repos_path, rev=build.rev, raise_error=True) data['build'].update(_get_build_data(self.env, req, build, repos_name)) steps = [] for step in BuildStep.select(self.env, build=build.id, db=db): steps.append({ 'name': step.name, 'description': step.description, 'duration': pretty_timedelta(step.started, step.stopped or int(time.time())), 'status': _step_status_label[step.status], 'cls': _step_status_label[step.status].replace(' ', '-'), 'errors': step.errors, 'log': self._render_log(req, build, formatters, step), 'reports': self._render_reports(req, config, build, summarizers, step) }) data['build']['steps'] = steps data['build']['can_delete'] = ('BUILD_DELETE' in req.perm \ and build.status != build.PENDING) chgset = repos.get_changeset(build.rev) data['build']['chgset_author'] = chgset.author data['build']['display_rev'] = display_rev(repos, build.rev) add_script(req, 'common/js/folding.js') add_script(req, 'bitten/tabset.js') add_script(req, 'bitten/jquery.flot.js') add_stylesheet(req, 'bitten/bitten.css') return 'bitten_build.html', data, None
def process_request(self, req): """ Processing the request. """ req.perm('blog').assert_permission('BLOG_VIEW') blog_core = FullBlogCore(self.env) format = req.args.get('format', '').lower() command, pagename, path_items, listing_data = self._parse_path(req) action = req.args.get('action', 'view').lower() try: version = int(req.args.get('version', 0)) except: version = 0 data = {} template = 'fullblog_view.html' data['blog_about'] = BlogPost(self.env, 'about') data['blog_infotext'] = blog_core.get_bloginfotext() blog_month_names = map_month_names( self.env.config.getlist('fullblog', 'month_names')) data['blog_month_names'] = blog_month_names self.env.log.debug( "Blog debug: command=%r, pagename=%r, path_items=%r" % (command, pagename, path_items)) if not command: # Request for just root (display latest) data['blog_post_list'] = [] count = 0 maxcount = self.num_items blog_posts = get_blog_posts(self.env) for post in blog_posts: bp = BlogPost(self.env, post[0], post[1]) if 'BLOG_VIEW' in req.perm(bp.resource): data['blog_post_list'].append(bp) count += 1 if maxcount and count == maxcount: # Only display a certain number on front page (from config) break data['blog_list_title'] = "Recent posts" + \ (len(blog_posts) > maxcount and \ " (max %d) - Browse or Archive for more" % (maxcount,) \ or '') add_link(req, 'alternate', req.href.blog(format='rss'), 'RSS Feed', 'application/rss+xml', 'rss') elif command == 'archive': # Requesting the archive page template = 'fullblog_archive.html' data['blog_archive'] = [] for period, period_posts in group_posts_by_month( get_blog_posts(self.env)): allowed_posts = [] for post in period_posts: bp = BlogPost(self.env, post[0], post[1]) if 'BLOG_VIEW' in req.perm(bp.resource): allowed_posts.append(post) if allowed_posts: data['blog_archive'].append((period, allowed_posts)) add_link(req, 'alternate', req.href.blog(format='rss'), 'RSS Feed', 'application/rss+xml', 'rss') elif command == 'view' and pagename: # Requesting a specific blog post the_post = BlogPost(self.env, pagename, version) req.perm(the_post.resource).require('BLOG_VIEW') if not the_post.version: raise HTTPNotFound("No blog post named '%s'." % pagename) if req.method == 'POST': # Adding/Previewing a comment # Permission? req.perm(the_post.resource).require('BLOG_COMMENT') comment = BlogComment(self.env, pagename) comment.comment = req.args.get('comment', '') comment.author = (req.authname != 'anonymous' and req.authname) \ or req.args.get('author') comment.time = datetime.datetime.now(utc) warnings = [] if 'cancelcomment' in req.args: req.redirect(req.href.blog(pagename)) elif 'previewcomment' in req.args: warnings.extend( blog_core.create_comment(req, comment, verify_only=True)) elif 'submitcomment' in req.args and not warnings: warnings.extend(blog_core.create_comment(req, comment)) if not warnings: req.redirect( req.href.blog(pagename) + '#comment-' + str(comment.number)) data['blog_comment'] = comment # Push all warnings out to the user. for field, reason in warnings: if field: add_warning(req, "Field '%s': %s" % (field, reason)) else: add_warning(req, reason) data['blog_post'] = the_post context = Context.from_request(req, the_post.resource) data['context'] = context data['blog_attachments'] = AttachmentModule( self.env).attachment_data(context) # Previous and Next ctxtnav prev, next = blog_core.get_prev_next_posts(req.perm, the_post.name) if prev: add_link(req, 'prev', req.href.blog(prev), prev) if next: add_link(req, 'next', req.href.blog(next), next) if arity(prevnext_nav) == 4: # 0.12 compat following trac:changeset:8597 prevnext_nav(req, 'Previous Post', 'Next Post') else: prevnext_nav(req, 'Post') elif command in ['create', 'edit']: template = 'fullblog_edit.html' default_pagename = blog_core._get_default_postname(req.authname) the_post = BlogPost(self.env, pagename or default_pagename) warnings = [] if command == 'create' and req.method == 'GET' and not the_post.version: # Support appending query arguments for populating intial fields the_post.update_fields(req.args) if command == 'create' and the_post.version: # Post with name or suggested name already exists if 'BLOG_CREATE' in req.perm and the_post.name == default_pagename \ and not req.method == 'POST': if default_pagename: add_notice( req, "Suggestion for new name already exists " "('%s'). Please make a new name." % the_post.name) elif pagename: warnings.append( ('', "A post named '%s' already exists. Enter new name." % the_post.name)) the_post = BlogPost(self.env, '') if command == 'edit': req.perm(the_post.resource).require( 'BLOG_VIEW') # Starting point if req.method == 'POST': # Create or edit a blog post if 'blog-cancel' in req.args: if req.args.get('action', '') == 'edit': req.redirect(req.href.blog(pagename)) else: req.redirect(req.href.blog()) # Assert permissions if command == 'create': req.perm(Resource('blog', None)).require('BLOG_CREATE') elif command == 'edit': if the_post.author == req.authname: req.perm(the_post.resource).require('BLOG_MODIFY_OWN') else: req.perm(the_post.resource).require('BLOG_MODIFY_ALL') # Check input orig_author = the_post.author if not the_post.update_fields(req.args): warnings.append(('', "None of the fields have changed.")) version_comment = req.args.get('new_version_comment', '') if 'blog-preview' in req.args: warnings.extend( blog_core.create_post(req, the_post, req.authname, version_comment, verify_only=True)) elif 'blog-save' in req.args and not warnings: warnings.extend( blog_core.create_post(req, the_post, req.authname, version_comment)) if not warnings: req.redirect(req.href.blog(the_post.name)) context = Context.from_request(req, the_post.resource) data['context'] = context data['blog_attachments'] = AttachmentModule( self.env).attachment_data(context) data['blog_action'] = 'preview' data['blog_version_comment'] = version_comment if (orig_author and orig_author != the_post.author) and ( not 'BLOG_MODIFY_ALL' in req.perm(the_post.resource)): add_notice(req, "If you change the author you cannot " \ "edit the post again due to restricted permissions.") data['blog_orig_author'] = orig_author for field, reason in warnings: if field: add_warning(req, "Field '%s': %s" % (field, reason)) else: add_warning(req, reason) data['blog_edit'] = the_post elif command == 'delete': bp = BlogPost(self.env, pagename) req.perm(bp.resource).require('BLOG_DELETE') if 'blog-cancel' in req.args: req.redirect(req.href.blog(pagename)) comment = int(req.args.get('comment', '0')) warnings = [] if comment: # Deleting a specific comment bc = BlogComment(self.env, pagename, comment) if not bc.number: raise TracError( "Cannot delete. Blog post name and/or comment number missing." ) if req.method == 'POST' and comment and pagename: warnings.extend(blog_core.delete_comment(bc)) if not warnings: add_notice(req, "Blog comment %d deleted." % comment) req.redirect(req.href.blog(pagename)) template = 'fullblog_delete.html' data['blog_comment'] = bc else: # Delete a version of a blog post or all versions # with comments and attachments if only version. if not bp.version: raise TracError( "Cannot delete. Blog post '%s' does not exist." % (bp.name)) version = int(req.args.get('version', '0')) if req.method == 'POST': if 'blog-version-delete' in req.args: if bp.version != version: raise TracError( "Cannot delete. Can only delete most recent version." ) warnings.extend( blog_core.delete_post(bp, version=bp.versions[-1])) elif 'blog-delete' in req.args: version = 0 warnings.extend( blog_core.delete_post(bp, version=version)) if not warnings: if version > 1: add_notice( req, "Blog post '%s' version %d deleted." % (pagename, version)) req.redirect(req.href.blog(pagename)) else: add_notice(req, "Blog post '%s' deleted." % pagename) req.redirect(req.href.blog()) template = 'fullblog_delete.html' data['blog_post'] = bp for field, reason in warnings: if field: add_warning(req, "Field '%s': %s" % (field, reason)) else: add_warning(req, reason) elif command.startswith('listing-'): # 2007/10 or category/something or author/theuser title = category = author = '' from_dt = to_dt = None if command == 'listing-month': from_dt = listing_data['from_dt'] to_dt = listing_data['to_dt'] title = "Posts for the month of %s %d" % ( blog_month_names[from_dt.month - 1], from_dt.year) add_link(req, 'alternate', req.href.blog(format='rss'), 'RSS Feed', 'application/rss+xml', 'rss') elif command == 'listing-category': category = listing_data['category'] if category: title = "Posts in category %s" % category add_link(req, 'alternate', req.href.blog('category', category, format='rss'), 'RSS Feed', 'application/rss+xml', 'rss') elif command == 'listing-author': author = listing_data['author'] if author: title = "Posts by author %s" % author add_link(req, 'alternate', req.href.blog('author', author, format='rss'), 'RSS Feed', 'application/rss+xml', 'rss') if not (author or category or (from_dt and to_dt)): raise HTTPNotFound("Not a valid path for viewing blog posts.") blog_posts = [] for post in get_blog_posts(self.env, category=category, author=author, from_dt=from_dt, to_dt=to_dt): bp = BlogPost(self.env, post[0], post[1]) if 'BLOG_VIEW' in req.perm(bp.resource): blog_posts.append(bp) data['blog_post_list'] = blog_posts data['blog_list_title'] = title else: raise HTTPNotFound("Not a valid blog path.") if (not command or command.startswith('listing-')) and format == 'rss': data['context'] = Context.from_request(req, absurls=True) data['blog_num_items'] = self.num_items return 'fullblog.rss', data, 'application/rss+xml' data['blog_months'], data['blog_authors'], data['blog_categories'], \ data['blog_total'] = \ blog_core.get_months_authors_categories( user=req.authname, perm=req.perm) if 'BLOG_CREATE' in req.perm('blog'): add_ctxtnav(req, 'New Post', href=req.href.blog('create'), title="Create new Blog Post") add_stylesheet(req, 'tracfullblog/css/fullblog.css') add_stylesheet(req, 'common/css/code.css') data['blog_personal_blog'] = self.env.config.getbool( 'fullblog', 'personal_blog') return (template, data, None)