def _prepare_attrs(self, req, attr): page = int(req.args.get('page', '1')) # Paginator can't deal with dict, so convert to list. attr_lst = [(k,v) for k,v in attr.iteritems()] max_per_page = as_int(req.args.get('max_per_page'), None) if max_per_page is None: max_per_page = self.ACCTS_PER_PAGE attr = Paginator(attr_lst, page - 1, max_per_page) pagedata = [] shown_pages = attr.get_shown_pages(21) for shown_page in shown_pages: page_href = req.href.admin('accounts', 'users', page=shown_page, max_per_page=max_per_page) pagedata.append([page_href, None, str(shown_page), _("page %(num)s", num=str(shown_page))]) fields = ['href', 'class', 'string', 'title'] attr.shown_pages = [dict(zip(fields, p)) for p in pagedata] attr.current_page = {'href': None, 'class': 'current', 'string': str(attr.page + 1), 'title':None} if attr.has_next_page: next_href = req.href.admin('accounts', 'users', page=page + 1, max_per_page=max_per_page) add_link(req, 'next', next_href, _('Next Page')) if attr.has_previous_page: prev_href = req.href.admin('accounts', 'users', page=page - 1, max_per_page=max_per_page) add_link(req, 'prev', prev_href, _('Previous Page')) page_href = req.href.admin('accounts', 'cleanup') return {'attr': attr, 'page_href': page_href}
def _prepare_results(self, req, filters, results): page = req.args.get('page', 1) page = as_int(page, default=1, min=1) try: results = Paginator(results, page - 1, self.RESULTS_PER_PAGE) except TracError: add_warning(req, _("Page %(page)s is out of range.", page=page)) page = 1 results = Paginator(results, page - 1, self.RESULTS_PER_PAGE) for idx, result in enumerate(results): results[idx] = { 'href': result[0], 'title': result[1], 'date': user_time(req, format_datetime, result[2]), 'author': result[3], 'excerpt': result[4] } pagedata = [] shown_pages = results.get_shown_pages(21) for shown_page in shown_pages: page_href = req.href.search([(f, 'on') for f in filters], q=req.args.get('q'), page=shown_page, noquickjump=1) pagedata.append([ page_href, None, str(shown_page), _("Page %(num)d", num=shown_page) ]) fields = ['href', 'class', 'string', 'title'] results.shown_pages = [dict(zip(fields, p)) for p in pagedata] results.current_page = { 'href': None, 'class': 'current', 'string': str(results.page + 1), 'title': None } if results.has_next_page: next_href = req.href.search(zip(filters, ['on'] * len(filters)), q=req.args.get('q'), page=page + 1, noquickjump=1) add_link(req, 'next', next_href, _('Next Page')) if results.has_previous_page: prev_href = req.href.search(zip(filters, ['on'] * len(filters)), q=req.args.get('q'), page=page - 1, noquickjump=1) add_link(req, 'prev', prev_href, _('Previous Page')) page_href = req.href.search(zip(filters, ['on'] * len(filters)), q=req.args.get('q'), noquickjump=1) return {'results': results, 'page_href': page_href}
def get_paginator(self): def href_with_page(page): args = copy.copy(self.req.args) args['page'] = page return self.req.href(self.href, args) comment_count = Comments(self.req, self.env).count(self.args) paginator = Paginator(self.data['comments'], self.page - 1, self.per_page, comment_count) if paginator.has_next_page: add_link(self.req, 'next', href_with_page(self.page + 1), 'Next Page') if paginator.has_previous_page: add_link(self.req, 'prev', href_with_page(self.page - 1), 'Previous Page') shown_pages = paginator.get_shown_pages(page_index_count=11) links = [{ 'href': href_with_page(page), 'class': None, 'string': str(page), 'title': 'Page %d' % page } for page in shown_pages] paginator.shown_pages = links paginator.current_page = { 'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title': None } return paginator
def page_paginator(self,req,iids,page): results = Paginator(iids, int(page) - 1, self.items_per_page) apath = args_path(req.args) if req: if results.has_next_page: next_href = req.href(req.path_info, max=self.items_per_page, page=page + 1)+apath add_link(req, 'next', next_href, 'Next Page') if results.has_previous_page: prev_href = req.href(req.path_info, max=self.items_per_page, page=page - 1)+apath add_link(req, 'prev', prev_href, 'Previous Page') else: results.show_index = False pagedata = [] shown_pages = results.get_shown_pages(21) for p in shown_pages: pagedata.append([req.href(req.path_info, page=p)+apath, None, str(p), 'Page ' + str(p) + 'd']) results.shown_pages = [dict(zip(['href', 'class', 'string', 'title'], p)) for p in pagedata] results.current_page = {'href': None, 'class': 'current', 'string': str(results.page + 1), 'title':None} return results
def get_paginator(self): def href_with_page(page): args = copy.copy(self.req.args) args['page'] = page return self.req.href(self.href, args) paginator = Paginator(self.data['comments'], self.page - 1, self.per_page, Comments(self.req, self.env).count(self.args)) if paginator.has_next_page: add_link(self.req, 'next', href_with_page(self.page + 1), 'Next Page') if paginator.has_previous_page: add_link(self.req, 'prev', href_with_page(self.page - 1), 'Previous Page') shown_pages = paginator.get_shown_pages(page_index_count=11) links = [{ 'href': href_with_page(page), 'class': None, 'string': str(page), 'title': 'Page %d' % page } for page in shown_pages] paginator.shown_pages = links paginator.current_page = { 'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title': None } return paginator
def _paginate(self, req, results): self.query = req.args.get('q', None) current_page = as_int(req.args.get('listtagged_page'), 1) items_per_page = as_int(req.args.get('listtagged_per_page'), None) if items_per_page is None: items_per_page = self.items_per_page result = Paginator(results, current_page - 1, items_per_page) pagedata = [] shown_pages = result.get_shown_pages(21) for page in shown_pages: page_href = self.get_href(req, items_per_page, page) pagedata.append([page_href, None, str(page), _("Page %(num)d", num=page)]) attributes = ['href', 'class', 'string', 'title'] result.shown_pages = [dict(zip(attributes, p)) for p in pagedata] result.current_page = {'href': None, 'class': 'current', 'string': str(result.page + 1), 'title': None} if result.has_next_page: next_href = self.get_href(req, items_per_page, current_page + 1) add_link(req, 'next', next_href, _('Next Page')) if result.has_previous_page: prev_href = self.get_href(req, items_per_page, current_page - 1) add_link(req, 'prev', prev_href, _('Previous Page')) return result
def _paginate(self, req, results): self.query = req.args.get('q', None) current_page = as_int(req.args.get('listtagged_page'), 1) items_per_page = as_int(req.args.get('listtagged_per_page'), None) if items_per_page is None: items_per_page = self.items_per_page result = Paginator(results, current_page - 1, items_per_page) pagedata = [] shown_pages = result.get_shown_pages(21) for page in shown_pages: page_href = self.get_href(req, items_per_page, page) pagedata.append( [page_href, None, str(page), _("Page %(num)d", num=page)]) attributes = ['href', 'class', 'string', 'title'] result.shown_pages = [dict(zip(attributes, p)) for p in pagedata] result.current_page = { 'href': None, 'class': 'current', 'string': str(result.page + 1), 'title': None } if result.has_next_page: next_href = self.get_href(req, items_per_page, current_page + 1) add_link(req, 'next', next_href, _('Next Page')) if result.has_previous_page: prev_href = self.get_href(req, items_per_page, current_page - 1) add_link(req, 'prev', prev_href, _('Previous Page')) return result
def process_translations_request(self, req): match = re.match(r'^/translations' r'(?:/([0-9]+)?)?' # catalog id r'(?:/([A-Za-z\-_]+)?)?' # locale name r'(?:/([0-9]+)?)?', # page req.path_info) if not match: raise ResourceNotFound("Bad URL") catalog_id, locale_name, page = match.groups() Session = session(self.env) if not catalog_id: # List available catalogs data = {'projects': Session.query(Project).all()} return 'l10n_catalogs_list.html', data, None if not locale_name: # List available locales catalog = Session.query(Catalog).get(int(catalog_id)) if not catalog: req.redirect(req.href.translations()) data = {'catalog': catalog} return 'l10n_locales_list.html', data, None # List messages of specified locale catalog_id, page = int(catalog_id), int(page or 1) locale = Session.query(Locale).filter_by(locale=locale_name, catalog_id=catalog_id).first() if not locale: req.redirect(req.href.translations(catalog_id)) data = {'locale': locale, 'catalog_id': catalog_id} paginator = Paginator(list(locale.catalog.messages), page-1, 5) data['messages'] = paginator shown_pages = paginator.get_shown_pages(25) pagedata = [] for show_page in shown_pages: page_href = req.href.translations(catalog_id, locale_name, show_page) pagedata.append([page_href, None, str(show_page), 'page %s' % show_page]) fields = ['href', 'class', 'string', 'title'] paginator.shown_pages = [dict(zip(fields, p)) for p in pagedata] paginator.current_page = {'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title':None} if paginator.has_next_page: add_link(req, 'next', req.href.translations(catalog_id, locale_name, page+1), _('Next Page')) if paginator.has_previous_page: add_link(req, 'prev', req.href.translations(catalog_id, locale_name, page-1), _('Previous Page')) return 'l10n_messages.html', data, None
def _paginate(self, req, results, realms): query = req.args.get('q', None) current_page = as_int(req.args.get('listtagged_page'), 1, min=1) items_per_page = as_int(req.args.get('listtagged_per_page'), self.items_per_page) if items_per_page < 1: items_per_page = self.items_per_page try: result = Paginator(results, current_page - 1, items_per_page) except (AssertionError, TracError) as e: # AssertionError raised in Trac < 1.0.10, TracError otherwise self.log.warn("ListTagged macro: %s", e) current_page = 1 result = Paginator(results, current_page - 1, items_per_page) pagedata = [] shown_pages = result.get_shown_pages(21) for page in shown_pages: page_href = self.get_href(req, realms, query, items_per_page, page) pagedata.append( [page_href, None, str(page), _("Page %(num)d", num=page)]) attributes = ['href', 'class', 'string', 'title'] result.shown_pages = [dict(zip(attributes, p)) for p in pagedata] result.current_page = { 'href': None, 'class': 'current', 'string': str(result.page + 1), 'title': None } if result.has_next_page: next_href = self.get_href(req, realms, query, items_per_page, current_page + 1) add_link(req, 'next', next_href, _('Next Page')) if result.has_previous_page: prev_href = self.get_href(req, realms, query, items_per_page, current_page - 1) add_link(req, 'prev', prev_href, _('Previous Page')) return result
def _prepare_results(self, req, filters, results): page = int(req.args.get('page', '1')) results = Paginator(results, page - 1, self.RESULTS_PER_PAGE) for idx, result in enumerate(results): results[idx] = {'href': result[0], 'title': result[1], 'date': format_datetime(result[2]), 'author': result[3], 'excerpt': result[4]} pagedata = [] shown_pages = results.get_shown_pages(21) for shown_page in shown_pages: page_href = req.href.search([(f, 'on') for f in filters], q=req.args.get('q'), page=shown_page, noquickjump=1) pagedata.append([page_href, None, str(shown_page), 'page ' + str(shown_page)]) fields = ['href', 'class', 'string', 'title'] results.shown_pages = [dict(zip(fields, p)) for p in pagedata] results.current_page = {'href': None, 'class': 'current', 'string': str(results.page + 1), 'title':None} if results.has_next_page: next_href = req.href.search(zip(filters, ['on'] * len(filters)), q=req.args.get('q'), page=page + 1, noquickjump=1) add_link(req, 'next', next_href, _('Next Page')) if results.has_previous_page: prev_href = req.href.search(zip(filters, ['on'] * len(filters)), q=req.args.get('q'), page=page - 1, noquickjump=1) add_link(req, 'prev', prev_href, _('Previous Page')) page_href = req.href.search( zip(filters, ['on'] * len(filters)), q=req.args.get('q'), noquickjump=1) return {'results': results, 'page_href': page_href}
def _pagelize_list(self, req, results, data): # get page from req(default page = max_page) page = int(req.args.get('page', '-1')) num_item_per_page = int(self.env.config.get('mailarchive', 'items_page','50')) num_shown_pages = int(self.env.config.get('mailarchive', 'shown_pages','30')) if page == -1: results_temp = Paginator(results, 0, num_item_per_page) page = results_temp.num_pages results = Paginator(results, page - 1, num_item_per_page) pagedata = [] data['page_results'] = results shown_pages = results.get_shown_pages(num_shown_pages) for shown_page in shown_pages: page_href = req.href.mailarchive(category=req.args.get('category',None), page=shown_page, noquickjump=1) pagedata.append([page_href, None, str(shown_page), 'page ' + str(shown_page)]) fields = ['href', 'class', 'string', 'title'] results.shown_pages = [dict(zip(fields, p)) for p in pagedata] results.current_page = {'href': None, 'class': 'current', 'string': str(results.page + 1), 'title':None} if results.has_next_page: next_href = req.href.mailarchive(category=req.args.get('category',None), page=page + 1) add_link(req, 'next', next_href, _('Next Page')) if results.has_previous_page: prev_href = req.href.mailarchive(category=req.args.get('category',None), page=page - 1) add_link(req, 'prev', prev_href, _('Previous Page')) data['page_href'] = req.href.mailarchive(category=req.args.get('category',None)) return results
def _prepare_results(self, req, filters, results): page = int(req.args.get("page", "1")) results = Paginator(results, page - 1, self.RESULTS_PER_PAGE) for idx, result in enumerate(results): results[idx] = { "href": result[0], "title": result[1], "date": user_time(req, format_datetime, result[2]), "author": result[3], "excerpt": result[4], } pagedata = [] shown_pages = results.get_shown_pages(21) for shown_page in shown_pages: page_href = req.href.search( [(f, "on") for f in filters], q=req.args.get("q"), page=shown_page, noquickjump=1 ) pagedata.append([page_href, None, str(shown_page), _("Page %(num)d", num=shown_page)]) fields = ["href", "class", "string", "title"] results.shown_pages = [dict(zip(fields, p)) for p in pagedata] results.current_page = {"href": None, "class": "current", "string": str(results.page + 1), "title": None} if results.has_next_page: next_href = req.href.search( zip(filters, ["on"] * len(filters)), q=req.args.get("q"), page=page + 1, noquickjump=1 ) add_link(req, "next", next_href, _("Next Page")) if results.has_previous_page: prev_href = req.href.search( zip(filters, ["on"] * len(filters)), q=req.args.get("q"), page=page - 1, noquickjump=1 ) add_link(req, "prev", prev_href, _("Previous Page")) page_href = req.href.search(zip(filters, ["on"] * len(filters)), q=req.args.get("q"), noquickjump=1) return {"results": results, "page_href": page_href}
def build_paginator(self, results, page, limit, total, action): items = results if not results: items = range(0, limit) paginator = Paginator(items, page - 1, limit, total) def report_href(**kwargs): params = {} params['page'] = page if limit: params['max'] = limit params.update(kwargs) return self.req.href.qa(action, params) if paginator.has_next_page: add_link(self.req, 'next', report_href(page=page + 1), _('Next Page')) if paginator.has_previous_page: add_link(self.req, 'prev', report_href(page=page - 1), _('Previous Page')) pagedata = [] shown_pages = paginator.get_shown_pages(21) for p in shown_pages: pagedata.append( [report_href(page=p), None, str(p), _('Page %(num)d', num=p)]) fields = ['href', 'class', 'string', 'title'] paginator.shown_pages = [dict(zip(fields, p)) for p in pagedata] paginator.current_page = { 'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title': None } #p = max is not None and page or None return paginator
def _prepare_results(self, req, filters, results): page = int(req.args.get('page', '1')) results = Paginator(results, page - 1, 100) for idx, result in enumerate(results): results[idx] = {'href': result[0], 'title': result[1], 'date': format_datetime(result[2]), 'author': result[3], 'excerpt': result[4]} pagedata = [] shown_pages = results.get_shown_pages(21) for shown_page in shown_pages: page_href = req.href.reposearch([(f, 'on') for f in filters], q=req.args.get('q'), p=req.args.get('p'), f=req.args.get('f'), o=req.args.get('o'), r=req.args.get('r'), c=req.args.get('c'), page=shown_page, noquickjump=1) pagedata.append([page_href, None, str(shown_page), 'page ' + str(shown_page)]) fields = ['href', 'class', 'string', 'title'] results.shown_pages = [dict(zip(fields, p)) for p in pagedata] results.current_page = {'href': None, 'class': 'current', 'string': str(results.page + 1), 'title':None} if results.has_next_page: next_href = req.href.reposearch(zip(filters, ['on'] * len(filters)), q=req.args.get('q'), p=req.args.get('p'), f=req.args.get('f'), o=req.args.get('o'), r=req.args.get('r'), c=req.args.get('c'), page=page + 1, noquickjump=1) add_link(req, 'next', next_href, 'Next Page') if results.has_previous_page: prev_href = req.href.reposearch(zip(filters, ['on'] * len(filters)), q=req.args.get('q'), p=req.args.get('p'), f=req.args.get('f'), o=req.args.get('o'), r=req.args.get('r'), c=req.args.get('c'), page=page - 1, noquickjump=1) add_link(req, 'prev', prev_href, 'Previous Page') page_href = req.href.reposearch( zip(filters, ['on'] * len(filters)), q=req.args.get('q'), p=req.args.get('p'), f=req.args.get('f'), o=req.args.get('o'), r=req.args.get('r'), c=req.args.get('c'), noquickjump=1) return {'results': results, 'page_href': page_href}
def render_grid(self, req): """Retrieve the droplets and pre-process them for rendering.""" self.log.debug('Rendering grid..') index = self.grid_index columns = self.fields.get_list('grid_columns') format = req.args.get('format') resource = Resource('cloud', self.name) context = Context.from_request(req, resource) page = int(req.args.get('page', '1')) default_max = {'rss': self.items_per_page_rss, 'csv': 0, 'tab': 0}.get(format, self.items_per_page) max = req.args.get('max') query = req.args.get('query') groupby = req.args.get('groupby', self.grid_group) groupby_fields = [(field.label,field.name) for field in columns] limit = as_int(max, default_max, min=0) # explicit max takes precedence offset = (page - 1) * limit # explicit sort takes precedence over config sort = groupby or req.args.get('sort', self.grid_sort) asc = req.args.get('asc', self.grid_asc) asc = bool(int(asc)) # string '0' or '1' to int/boolean def droplet_href(**kwargs): """Generate links to this cloud droplet preserving user variables, and sorting and paging variables. """ params = {} if sort: params['sort'] = sort params['page'] = page if max: params['max'] = max if query: params['query'] = query if groupby: params['groupby'] = groupby params.update(kwargs) params['asc'] = params.get('asc', asc) and '1' or '0' return req.href.cloud(self.name, params) data = {'action': 'view', 'buttons': [], 'resource': resource, 'context': context, 'title': self.title, 'description': self.description, 'label': self.label, 'columns': columns, 'id_field': self.id_field, 'max': limit, 'query': query, 'groupby': groupby, 'groupby_fields': [('','')] + groupby_fields, 'message': None, 'paginator': None, 'droplet_href': droplet_href, } try: self.log.debug('About to search chef..') sort_ = sort.strip('_') # handle dynamic attributes rows,total = self.chefapi.search(index, sort_, asc, limit, offset, query or '*:*') numrows = len(rows) self.log.debug('Chef search returned %s rows' % numrows) except Exception: import traceback; msg = "Oops...\n" + traceback.format_exc()+"\n" data['message'] = _(to_unicode(msg)) self.log.debug(data['message']) return 'droplet_grid.html', data, None paginator = None if limit > 0: paginator = Paginator(rows, page - 1, limit, total) data['paginator'] = paginator if paginator.has_next_page: add_link(req, 'next', droplet_href(page=page + 1), _('Next Page')) if paginator.has_previous_page: add_link(req, 'prev', droplet_href(page=page - 1), _('Previous Page')) pagedata = [] shown_pages = paginator.get_shown_pages(21) for p in shown_pages: pagedata.append([droplet_href(page=p), None, str(p), _('Page %(num)d', num=p)]) fields = ['href', 'class', 'string', 'title'] paginator.shown_pages = [dict(zip(fields, p)) for p in pagedata] paginator.current_page = {'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title': None} numrows = paginator.num_items # Place retrieved columns in groups, according to naming conventions # * _col_ means fullrow, i.e. a group with one header # * col_ means finish the current group and start a new one header_groups = [[]] for field in columns: header = { 'col': field.name, 'title': field.label, 'hidden': False, 'asc': None, } if field.name == sort: header['asc'] = asc header_group = header_groups[-1] header_group.append(header) # Structure the rows and cells: # - group rows according to __group__ value, if defined # - group cells the same way headers are grouped row_groups = [] authorized_results = [] prev_group_value = None for row_idx, item in enumerate(rows): col_idx = 0 cell_groups = [] row = {'cell_groups': cell_groups} for header_group in header_groups: cell_group = [] for header in header_group: col = header['col'] field = self.fields[col] value = field.get(item, req) cell = {'value': value, 'header': header, 'index': col_idx} col_idx += 1 # Detect and create new group if col == groupby and value != prev_group_value: prev_group_value = value row_groups.append( (value,[]) ) # Other row properties row['__idx__'] = row_idx if col == self.id_field: row['id'] = value cell_group.append(cell) cell_groups.append(cell_group) resource = Resource('cloud', '%s/%s' % (self.name,row['id'])) if 'CLOUD_VIEW' not in req.perm(resource): continue authorized_results.append(item) row['resource'] = resource if row_groups: row_group = row_groups[-1][1] else: row_group = [] row_groups = [(None, row_group)] row_group.append(row) data.update({'header_groups': header_groups, 'row_groups': row_groups, 'numrows': numrows, 'sorting_enabled': len(row_groups) == 1}) # FIXME: implement formats # if format == 'rss': # data['email_map'] = Chrome(self.env).get_email_map() # data['context'] = Context.from_request(req, report_resource, # absurls=True) # return 'report.rss', data, 'application/rss+xml' # elif format == 'csv': # filename = id and 'report_%s.csv' % id or 'report.csv' # self._send_csv(req, cols, authorized_results, mimetype='text/csv', # filename=filename) # elif format == 'tab': # filename = id and 'report_%s.tsv' % id or 'report.tsv' # self._send_csv(req, cols, authorized_results, '\t', # mimetype='text/tab-separated-values', # filename=filename) # else: page = max is not None and page or None add_link(req, 'alternate', droplet_href(format='rss', page=None), _('RSS Feed'), 'application/rss+xml', 'rss') add_link(req, 'alternate', droplet_href(format='csv', page=page), _('Comma-delimited Text'), 'text/plain') add_link(req, 'alternate', droplet_href(format='tab', page=page), _('Tab-delimited Text'), 'text/plain') self.log.debug('Rendered grid') return 'droplet_grid.html', data, None
def render_grid(self, req): """Retrieve the droplets and pre-process them for rendering.""" self.log.debug('Rendering grid..') index = self.grid_index columns = self.fields.get_list('grid_columns') format = req.args.get('format') resource = Resource('cloud', self.name) context = Context.from_request(req, resource) page = int(req.args.get('page', '1')) default_max = { 'rss': self.items_per_page_rss, 'csv': 0, 'tab': 0 }.get(format, self.items_per_page) max = req.args.get('max') query = req.args.get('query') groupby = req.args.get('groupby', self.grid_group) groupby_fields = [(field.label, field.name) for field in columns] limit = as_int(max, default_max, min=0) # explicit max takes precedence offset = (page - 1) * limit # explicit sort takes precedence over config sort = groupby or req.args.get('sort', self.grid_sort) asc = req.args.get('asc', self.grid_asc) asc = bool(int(asc)) # string '0' or '1' to int/boolean def droplet_href(**kwargs): """Generate links to this cloud droplet preserving user variables, and sorting and paging variables. """ params = {} if sort: params['sort'] = sort params['page'] = page if max: params['max'] = max if query: params['query'] = query if groupby: params['groupby'] = groupby params.update(kwargs) params['asc'] = params.get('asc', asc) and '1' or '0' return req.href.cloud(self.name, params) data = { 'action': 'view', 'buttons': [], 'resource': resource, 'context': context, 'title': self.title, 'description': self.description, 'label': self.label, 'columns': columns, 'id_field': self.id_field, 'max': limit, 'query': query, 'groupby': groupby, 'groupby_fields': [('', '')] + groupby_fields, 'message': None, 'paginator': None, 'droplet_href': droplet_href, } try: self.log.debug('About to search chef..') sort_ = sort.strip('_') # handle dynamic attributes rows, total = self.chefapi.search(index, sort_, asc, limit, offset, query or '*:*') numrows = len(rows) self.log.debug('Chef search returned %s rows' % numrows) except Exception: import traceback msg = "Oops...\n" + traceback.format_exc() + "\n" data['message'] = _(to_unicode(msg)) self.log.debug(data['message']) return 'droplet_grid.html', data, None paginator = None if limit > 0: paginator = Paginator(rows, page - 1, limit, total) data['paginator'] = paginator if paginator.has_next_page: add_link(req, 'next', droplet_href(page=page + 1), _('Next Page')) if paginator.has_previous_page: add_link(req, 'prev', droplet_href(page=page - 1), _('Previous Page')) pagedata = [] shown_pages = paginator.get_shown_pages(21) for p in shown_pages: pagedata.append([ droplet_href(page=p), None, str(p), _('Page %(num)d', num=p) ]) fields = ['href', 'class', 'string', 'title'] paginator.shown_pages = [dict(zip(fields, p)) for p in pagedata] paginator.current_page = { 'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title': None } numrows = paginator.num_items # Place retrieved columns in groups, according to naming conventions # * _col_ means fullrow, i.e. a group with one header # * col_ means finish the current group and start a new one header_groups = [[]] for field in columns: header = { 'col': field.name, 'title': field.label, 'hidden': False, 'asc': None, } if field.name == sort: header['asc'] = asc header_group = header_groups[-1] header_group.append(header) # Structure the rows and cells: # - group rows according to __group__ value, if defined # - group cells the same way headers are grouped row_groups = [] authorized_results = [] prev_group_value = None for row_idx, item in enumerate(rows): col_idx = 0 cell_groups = [] row = {'cell_groups': cell_groups} for header_group in header_groups: cell_group = [] for header in header_group: col = header['col'] field = self.fields[col] value = field.get(item, req) cell = {'value': value, 'header': header, 'index': col_idx} col_idx += 1 # Detect and create new group if col == groupby and value != prev_group_value: prev_group_value = value row_groups.append((value, [])) # Other row properties row['__idx__'] = row_idx if col == self.id_field: row['id'] = value cell_group.append(cell) cell_groups.append(cell_group) resource = Resource('cloud', '%s/%s' % (self.name, row['id'])) if 'CLOUD_VIEW' not in req.perm(resource): continue authorized_results.append(item) row['resource'] = resource if row_groups: row_group = row_groups[-1][1] else: row_group = [] row_groups = [(None, row_group)] row_group.append(row) data.update({ 'header_groups': header_groups, 'row_groups': row_groups, 'numrows': numrows, 'sorting_enabled': len(row_groups) == 1 }) # FIXME: implement formats # if format == 'rss': # data['email_map'] = Chrome(self.env).get_email_map() # data['context'] = Context.from_request(req, report_resource, # absurls=True) # return 'report.rss', data, 'application/rss+xml' # elif format == 'csv': # filename = id and 'report_%s.csv' % id or 'report.csv' # self._send_csv(req, cols, authorized_results, mimetype='text/csv', # filename=filename) # elif format == 'tab': # filename = id and 'report_%s.tsv' % id or 'report.tsv' # self._send_csv(req, cols, authorized_results, '\t', # mimetype='text/tab-separated-values', # filename=filename) # else: page = max is not None and page or None add_link(req, 'alternate', droplet_href(format='rss', page=None), _('RSS Feed'), 'application/rss+xml', 'rss') add_link(req, 'alternate', droplet_href(format='csv', page=page), _('Comma-delimited Text'), 'text/plain') add_link(req, 'alternate', droplet_href(format='tab', page=page), _('Tab-delimited Text'), 'text/plain') self.log.debug('Rendered grid') return 'droplet_grid.html', data, None
def process_request(self, req): data={} if req.authname and req.authname!='anonymous': my_all_projects=self._all_my_projects(req.authname) #all approved projects data['projects_to_show']=my_all_projects if req.method=="POST": pending=req.args.get('pending','off') approved=req.args.get('approved','off') rejected=req.args.get('rejected','off') if pending=='on' or approved=='on' or rejected=='on': cnx=self.env.get_db_cnx() cur=cnx.cursor() projects_to_show=[] if pending=='on': projects_to_show+=self._my_pending_projects(req.authname) if approved=='on': projects_to_show+=self._my_approved_projects(req.authname) if rejected=='on': projects_to_show+=self._my_rejected_projects(req.authname) data['projects_to_show']=projects_to_show else: data['projects_to_show']=self._all_my_projects(req.authname) #paginate items=data['projects_to_show'] quote_to_show=items page=int(req.args.get('page','1')) results=Paginator(items,page-1,max_per_page=10) pagedata=[] shown_pages = results.get_shown_pages(21) for shown_page in shown_pages: page_href = req.href.myproject(pending=pending,approved=approved,rejected=rejected,page=shown_page) pagedata.append([page_href,None, str(shown_page), 'page ' + str(shown_page)]) fields = ['href', 'class', 'string', 'title'] results.shown_pages = [dict(zip(fields, p)) for p in pagedata] results.current_page = {'href': None, 'class': 'current', 'string': str(results.page + 1), 'title':None} if results.has_next_page: next_href=req.href.myproject(pending=pending,approved=approved,rejected=rejected,page=page+1) add_link(req,'next',next_href,_('Next Page')) if results.has_previous_page: prev_href=req.href.myproject(pending=pending,approved=approved,rejected=rejected,page=page-1) add_link(req,'prev',prev_href,_('Previous Page')) data['page_href']=req.href.myproject() data['projects_to_show']=results.items data['paginator']=results return "my_projects.html",data,None else: pending=req.args.get('pending','on') approved=req.args.get('approved','on') rejected=req.args.get('rejected','on') if pending=='on' or approved=='on' or rejected=='on': cnx=self.env.get_db_cnx() cur=cnx.cursor() projects_to_show=[] if pending=='on': projects_to_show+=self._my_pending_projects(req.authname) if approved=='on': projects_to_show+=self._my_approved_projects(req.authname) if rejected=='on': projects_to_show+=self._my_rejected_projects(req.authname) data['projects_to_show']=projects_to_show else: data['projects_to_show']=self._all_my_projects(req.authname) items=data['projects_to_show'] page=int(req.args.get('page','1')) results=Paginator(items,page-1,max_per_page=10) pagedata=[] shown_pages = results.get_shown_pages(21) for shown_page in shown_pages: page_href = req.href.myproject(pending=pending,approved=approved,rejected=rejected,page=shown_page) pagedata.append([page_href,None, str(shown_page), 'page ' + str(shown_page)]) fields = ['href', 'class', 'string', 'title'] results.shown_pages = [dict(zip(fields, p)) for p in pagedata] results.current_page = {'href': None, 'class': 'current', 'string': str(results.page + 1), 'title':None} if results.has_next_page: next_href=req.href.myproject(pending=pending,approved=approved,rejected=rejected,page=page+1) add_link(req,'next',next_href,_('Next Page')) if results.has_previous_page: prev_href=req.href.myproject(pending=pending,approved=approved,rejected=rejected,page=page-1) add_link(req,'prev',prev_href,_('Previous Page')) data['page_href']=req.href.myproject() data['projects_to_show']=results.items data['paginator']=results return "my_projects.html",data,None else: req.redirect(req.href.login())
def process_request(self, req): data={} data['base_url']=self.env.config.get('projectsmanager','base_url') if req.authname and req.authname!='anonymous': if 'PROJECT_ADMIN' in req.perm: pending_projects=self._pending_projects() approved_projects=self._approved_projects() rejected_projects=self._rejected_projects() # to get the particular items on that page if req.method=="POST": p=int(req.args.get('p','1')) a=int(req.args.get('a','1')) r=int(req.args.get('r','1')) results_p=Paginator(pending_projects,p-1,max_per_page=10) data['pending_projects']=results_p.items results_a=Paginator(approved_projects,a-1,max_per_page=10) data['approved_projects']=results_a.items results_r=Paginator(rejected_projects,r-1,max_per_page=10) data['rejected_projects']=results_r.items for i in data['pending_projects']: action=req.args.get(i['proj_name']) if action=='approve': self._approve_a_project(self,i['proj_name'],req) self._create_a_project(self,i['owner'],i['proj_name'],i['proj_full_name'],i['description'],req) if action=='reject': self._reject_a_project(i['proj_name'],req) for i in data['approved_projects']: action=req.args.get(i['proj_name']) if action=='delete': self._delete_a_project(i['proj_name'],req) for i in data['rejected_projects']: action=req.args.get(i['proj_name']) if action=='delete': self._delete_a_project(i['proj_name'],req) if action=='approve': self._approve_a_project(self,i['proj_name'],req) self._create_a_project(self,i['owner'],i['proj_name'],i['proj_full_name'],i['description'],req) req.redirect(req.href.manage(p=p,a=a,r=r)) else: p=int(req.args.get('p','1')) a=int(req.args.get('a','1')) r=int(req.args.get('r','1')) #page pending results1=Paginator(pending_projects,p-1,max_per_page=10) pagedata1=[] data['p']=results1 shown_pages1 = results1.get_shown_pages(21) for shown_page in shown_pages1: page_href = req.href.manage(p=shown_page,a=a,r=r) pagedata1.append([page_href,None, str(shown_page), 'page ' + str(shown_page)]) fields1 = ['href', 'class', 'string', 'title'] results1.shown_pages = [dict(zip(fields1, i)) for i in pagedata1] results1.current_page = {'href': None, 'class': 'current', 'string': str(results1.page + 1), 'title':None} if results1.has_next_page: next_href=req.href.manage(p=p+1,a=a,r=r) data['next_href1']=next_href if results1.has_previous_page: prev_href=req.href.manage(p=p-1,a=a,r=r) data['prev_href1']=prev_href data['pending_projects']=results1.items #page approved results2=Paginator(approved_projects,a-1,max_per_page=10) pagedata2=[] data['a']=results2 shown_pages2 = results2.get_shown_pages(21) for shown_page in shown_pages2: page_href = req.href.manage(p=p,a=shown_page,r=r) pagedata2.append([page_href,None, str(shown_page), 'page ' + str(shown_page)]) fields2 = ['href', 'class', 'string', 'title'] results2.shown_pages = [dict(zip(fields2, i)) for i in pagedata2] results2.current_page = {'href': None, 'class': 'current', 'string': str(results2.page + 1), 'title':None} if results2.has_next_page: next_href=req.href.manage(p=p,a=a+1,r=r) data['next_href2']=next_href if results2.has_previous_page: prev_href=req.href.manage(p=p,a=a-1,r=r) data['prev_href2']=prev_href data['approved_projects']=results2.items #page rejected results3=Paginator(rejected_projects,r-1,max_per_page=10) pagedata3=[] data['r']=results3 shown_pages3 = results3.get_shown_pages(21) for shown_page in shown_pages3: page_href = req.href.manage(p=p,a=a,r=shown_page) pagedata3.append([page_href,None, str(shown_page), 'page ' + str(shown_page)]) fields3 = ['href', 'class', 'string', 'title'] results3.shown_pages = [dict(zip(fields3, i)) for i in pagedata3] results3.current_page = {'href': None, 'class': 'current', 'string': str(results3.page + 1), 'title':None} if results3.has_next_page: next_href=req.href.manage(p=p,a=a,r=r+1) data['next_href3']=next_href if results3.has_previous_page: prev_href=req.href.manage(p=p,a=a,r=r-1) data['prev_href3']=prev_href data['rejected_projects']=results3.items data['page_href']=req.href.manage() data['current_pending_page']=p data['current_approved_page']=a data['current_rejected_page']=r return "projects_manage.html",data,None else: # for authorized username items=self._approved_projects() page=int(req.args.get('page','1')) results=Paginator(items,page-1,max_per_page=10) pagedata=[] data['paginator']=results shown_pages = results.get_shown_pages(21) for shown_page in shown_pages: page_href = req.href.manage(page=shown_page) pagedata.append([page_href,None, str(shown_page), 'page ' + str(shown_page)]) fields = ['href', 'class', 'string', 'title'] results.shown_pages = [dict(zip(fields, p)) for p in pagedata] results.current_page = {'href': None, 'class': 'current', 'string': str(results.page + 1), 'title':None} if results.has_next_page: next_href=req.href.manage(page=page+1) add_link(req,'next',next_href,_('Next Page')) if results.has_previous_page: prev_href=req.href.manage(page=page-1) add_link(req,'prev',prev_href,_('Previous Page')) data['page_href']=req.href.manage() data['approved_projects']=results.items return "auth_index.html",data,None else: #page for anonymous items=self._approved_projects() page=int(req.args.get('page','1')) results=Paginator(items,page-1,max_per_page=10) pagedata=[] data['paginator']=results shown_pages = results.get_shown_pages(21) for shown_page in shown_pages: page_href = req.href.project(page=shown_page) pagedata.append([page_href,None, str(shown_page), 'page ' + str(shown_page)]) fields = ['href', 'class', 'string', 'title'] results.shown_pages = [dict(zip(fields, p)) for p in pagedata] results.current_page = {'href': None, 'class': 'current', 'string': str(results.page + 1), 'title':None} if results.has_next_page: next_href=req.href.project(page=page+1) add_link(req,'next',next_href,_('Next Page')) if results.has_previous_page: prev_href=req.href.project(page=page-1) add_link(req,'prev',prev_href,_('Previous Page')) data['page_href']=req.href.project() data['approved_projects']=results.items return 'anonymous.html',data,None
def process_request(self, req): req.perm.assert_permission('SEARCH_VIEW') if req.path_info == '/search/opensearch': return ('opensearch.xml', {}, 'application/opensearchdescription+xml') available_filters = [] for source in self.search_sources: available_filters += source.get_search_filters(req) filters = [f[0] for f in available_filters if req.args.has_key(f[0])] if not filters: filters = [f[0] for f in available_filters if len(f) < 3 or len(f) > 2 and f[2]] data = {'filters': [{'name': f[0], 'label': f[1], 'active': f[0] in filters} for f in available_filters], 'quickjump': None, 'results': []} query = req.args.get('q') data['query'] = query if query: data['quickjump'] = self._check_quickjump(req, query) if query.startswith('!'): query = query[1:] terms = self._get_search_terms(query) # Refuse queries that obviously would result in a huge result set if not terms or \ len(terms) == 1 and len(terms[0]) < self.min_query_length: raise TracError(_('Search query too short. Query must be at ' 'least %(num)s characters long.', num=self.min_query_length), _('Search Error')) results = [] for source in self.search_sources: results += list(source.get_search_results(req, terms, filters)) results.sort(lambda x,y: cmp(y[2], x[2])) page = int(req.args.get('page', '1')) results = Paginator(results, page - 1, self.RESULTS_PER_PAGE) for idx, result in enumerate(results): results[idx] = {'href': result[0], 'title': result[1], 'date': format_datetime(result[2]), 'author': result[3], 'excerpt': result[4]} pagedata = [] data['results'] = results shown_pages = results.get_shown_pages(21) for shown_page in shown_pages: page_href = req.href.search([(f, 'on') for f in filters], q=req.args.get('q'), page=shown_page, noquickjump=1) pagedata.append([page_href, None, str(shown_page), 'page ' + str(shown_page)]) fields = ['href', 'class', 'string', 'title'] results.shown_pages = [dict(zip(fields, p)) for p in pagedata] results.current_page = {'href': None, 'class': 'current', 'string': str(results.page + 1), 'title':None} if results.has_next_page: next_href = req.href.search(zip(filters, ['on'] * len(filters)), q=req.args.get('q'), page=page + 1, noquickjump=1) add_link(req, 'next', next_href, _('Next Page')) if results.has_previous_page: prev_href = req.href.search(zip(filters, ['on'] * len(filters)), q=req.args.get('q'), page=page - 1, noquickjump=1) add_link(req, 'prev', prev_href, _('Previous Page')) data['page_href'] = req.href.search( zip(filters, ['on'] * len(filters)), q=req.args.get('q'), noquickjump=1) add_stylesheet(req, 'common/css/search.css') return 'search.html', data, None
def _render_view(self, req, id): """Retrieve the report results and pre-process them for rendering.""" title, description, sql = self.get_report(id) try: args = self.get_var_args(req) except ValueError as e: raise TracError(_("Report failed: %(error)s", error=e)) # If this is a saved custom query, redirect to the query module # # A saved query is either an URL query (?... or query:?...), # or a query language expression (query:...). # # It may eventually contain newlines, for increased clarity. # query = ''.join([line.strip() for line in sql.splitlines()]) if query and (query[0] == '?' or query.startswith('query:?')): query = query if query[0] == '?' else query[6:] report_id = 'report=%s' % id if 'report=' in query: if not report_id in query: err = _('When specified, the report number should be ' '"%(num)s".', num=id) req.redirect(req.href.report(id, action='edit', error=err)) else: if query[-1] != '?': query += '&' query += report_id req.redirect(req.href.query() + quote_query_string(query)) elif query.startswith('query:'): try: from trac.ticket.query import Query, QuerySyntaxError query = Query.from_string(self.env, query[6:], report=id) req.redirect(query.get_href(req.href)) except QuerySyntaxError as e: req.redirect(req.href.report(id, action='edit', error=to_unicode(e))) format = req.args.get('format') if format == 'sql': self._send_sql(req, id, title, description, sql) title = '{%i} %s' % (id, title) report_resource = Resource('report', id) req.perm(report_resource).require('REPORT_VIEW') context = web_context(req, report_resource) page = int(req.args.get('page', '1')) default_max = {'rss': self.items_per_page_rss, 'csv': 0, 'tab': 0}.get(format, self.items_per_page) max = req.args.get('max') limit = as_int(max, default_max, min=0) # explict max takes precedence offset = (page - 1) * limit sort_col = req.args.get('sort', '') asc = req.args.get('asc', 1) asc = bool(int(asc)) # string '0' or '1' to int/boolean def report_href(**kwargs): """Generate links to this report preserving user variables, and sorting and paging variables. """ params = args.copy() if sort_col: params['sort'] = sort_col params['page'] = page if max: params['max'] = max params.update(kwargs) params['asc'] = '1' if params.get('asc', asc) else '0' return req.href.report(id, params) data = {'action': 'view', 'report': {'id': id, 'resource': report_resource}, 'context': context, 'title': sub_vars(title, args), 'description': sub_vars(description or '', args), 'max': limit, 'args': args, 'show_args_form': False, 'message': None, 'paginator': None, 'report_href': report_href, } res = self.execute_paginated_report(req, id, sql, args, limit, offset) if len(res) == 2: e, sql = res data['message'] = \ tag_("Report execution failed: %(error)s %(sql)s", error=tag.pre(exception_to_unicode(e)), sql=tag(tag.hr(), tag.pre(sql, style="white-space: pre"))) return 'report_view.html', data, None cols, results, num_items, missing_args, limit_offset = res need_paginator = limit > 0 and limit_offset need_reorder = limit_offset is None results = [list(row) for row in results] numrows = len(results) paginator = None if need_paginator: paginator = Paginator(results, page - 1, limit, num_items) data['paginator'] = paginator if paginator.has_next_page: add_link(req, 'next', report_href(page=page + 1), _('Next Page')) if paginator.has_previous_page: add_link(req, 'prev', report_href(page=page - 1), _('Previous Page')) pagedata = [] shown_pages = paginator.get_shown_pages(21) for p in shown_pages: pagedata.append([report_href(page=p), None, str(p), _('Page %(num)d', num=p)]) fields = ['href', 'class', 'string', 'title'] paginator.shown_pages = [dict(zip(fields, p)) for p in pagedata] paginator.current_page = {'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title': None} numrows = paginator.num_items # Place retrieved columns in groups, according to naming conventions # * _col_ means fullrow, i.e. a group with one header # * col_ means finish the current group and start a new one field_labels = TicketSystem(self.env).get_ticket_field_labels() header_groups = [[]] for idx, col in enumerate(cols): if col in field_labels: title = field_labels[col] else: title = col.strip('_').capitalize() header = { 'col': col, 'title': title, 'hidden': False, 'asc': None, } if col == sort_col: header['asc'] = asc if not paginator and need_reorder: # this dict will have enum values for sorting # and will be used in sortkey(), if non-empty: sort_values = {} if sort_col in ('status', 'resolution', 'priority', 'severity'): # must fetch sort values for that columns # instead of comparing them as strings with self.env.db_query as db: for name, value in db( "SELECT name, %s FROM enum WHERE type=%%s" % db.cast('value', 'int'), (sort_col,)): sort_values[name] = value def sortkey(row): val = row[idx] # check if we have sort_values, then use them as keys. if sort_values: return sort_values.get(val) # otherwise, continue with string comparison: if isinstance(val, basestring): val = val.lower() return val results = sorted(results, key=sortkey, reverse=(not asc)) header_group = header_groups[-1] if col.startswith('__') and col.endswith('__'): # __col__ header['hidden'] = True elif col[0] == '_' and col[-1] == '_': # _col_ header_group = [] header_groups.append(header_group) header_groups.append([]) elif col[0] == '_': # _col header['hidden'] = True elif col[-1] == '_': # col_ header_groups.append([]) header_group.append(header) # Structure the rows and cells: # - group rows according to __group__ value, if defined # - group cells the same way headers are grouped chrome = Chrome(self.env) row_groups = [] authorized_results = [] prev_group_value = None for row_idx, result in enumerate(results): col_idx = 0 cell_groups = [] row = {'cell_groups': cell_groups} realm = self.realm parent_realm = '' parent_id = '' email_cells = [] for header_group in header_groups: cell_group = [] for header in header_group: value = cell_value(result[col_idx]) cell = {'value': value, 'header': header, 'index': col_idx} col = header['col'] col_idx += 1 # Detect and create new group if col == '__group__' and value != prev_group_value: prev_group_value = value # Brute force handling of email in group by header row_groups.append( (value and chrome.format_author(req, value), [])) # Other row properties row['__idx__'] = row_idx if col in self._html_cols: row[col] = value if col in ('report', 'ticket', 'id', '_id'): row['id'] = value # Special casing based on column name col = col.strip('_') if col in ('reporter', 'cc', 'owner'): email_cells.append(cell) elif col == 'realm': realm = value elif col == 'parent_realm': parent_realm = value elif col == 'parent_id': parent_id = value cell_group.append(cell) cell_groups.append(cell_group) if parent_realm: resource = Resource(realm, row.get('id'), parent=Resource(parent_realm, parent_id)) else: resource = Resource(realm, row.get('id')) # FIXME: for now, we still need to hardcode the realm in the action if resource.realm.upper()+'_VIEW' not in req.perm(resource): continue authorized_results.append(result) if email_cells: for cell in email_cells: emails = chrome.format_emails(context.child(resource), cell['value']) result[cell['index']] = cell['value'] = emails row['resource'] = resource if row_groups: row_group = row_groups[-1][1] else: row_group = [] row_groups = [(None, row_group)] row_group.append(row) data.update({'header_groups': header_groups, 'row_groups': row_groups, 'numrows': numrows}) if format == 'rss': data['context'] = web_context(req, report_resource, absurls=True) return 'report.rss', data, 'application/rss+xml' elif format == 'csv': filename = 'report_%s.csv' % id if id else 'report.csv' self._send_csv(req, cols, authorized_results, mimetype='text/csv', filename=filename) elif format == 'tab': filename = 'report_%s.tsv' % id if id else 'report.tsv' self._send_csv(req, cols, authorized_results, '\t', mimetype='text/tab-separated-values', filename=filename) else: p = page if max is not None else None add_link(req, 'alternate', auth_link(req, report_href(format='rss', page=None)), _('RSS Feed'), 'application/rss+xml', 'rss') add_link(req, 'alternate', report_href(format='csv', page=p), _('Comma-delimited Text'), 'text/plain') add_link(req, 'alternate', report_href(format='tab', page=p), _('Tab-delimited Text'), 'text/plain') if 'REPORT_SQL_VIEW' in req.perm('report', id): add_link(req, 'alternate', req.href.report(id=id, format='sql'), _('SQL Query'), 'text/plain') # reuse the session vars of the query module so that # the query navigation links on the ticket can be used to # navigate report results as well try: req.session['query_tickets'] = \ ' '.join([str(int(row['id'])) for rg in row_groups for row in rg[1]]) req.session['query_href'] = \ req.session['query_href'] = report_href() # Kludge: we have to clear the other query session # variables, but only if the above succeeded for var in ('query_constraints', 'query_time'): if var in req.session: del req.session[var] except (ValueError, KeyError): pass if set(data['args']) - set(['USER']): data['show_args_form'] = True add_script(req, 'common/js/folding.js') if missing_args: add_warning(req, _( 'The following arguments are missing: %(args)s', args=", ".join(missing_args))) return 'report_view.html', data, None
paginator = None if need_paginator: paginator = Paginator(results, page - 1, limit, num_items) data["paginator"] = paginator if paginator.has_next_page: add_link(req, "next", report_href(page=page + 1), _("Next Page")) if paginator.has_previous_page: add_link(req, "prev", report_href(page=page - 1), _("Previous Page")) pagedata = [] shown_pages = paginator.get_shown_pages(21) for p in shown_pages: pagedata.append([report_href(page=p), None, str(p), _("Page %(num)d", num=p)]) fields = ["href", "class", "string", "title"] paginator.shown_pages = [dict(zip(fields, p)) for p in pagedata] paginator.current_page = { "href": None, "class": "current", "string": str(paginator.page + 1), "title": None, } numrows = paginator.num_items # Place retrieved columns in groups, according to naming conventions # * _col_ means fullrow, i.e. a group with one header # * col_ means finish the current group and start a new one field_labels = TicketSystem(self.env).get_ticket_field_labels() header_groups = [[]]
paginator = Paginator(results, page - 1, limit, num_items) data['paginator'] = paginator if paginator.has_next_page: add_link(req, 'next', report_href(page=page + 1), _('Next Page')) if paginator.has_previous_page: add_link(req, 'prev', report_href(page=page - 1), _('Previous Page')) pagedata = [] shown_pages = paginator.get_shown_pages(21) for p in shown_pages: pagedata.append([report_href(page=p), None, str(p), _('Page %(num)d', num=p)]) fields = ['href', 'class', 'string', 'title'] paginator.shown_pages = [dict(zip(fields, p)) for p in pagedata] paginator.current_page = {'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title': None} numrows = paginator.num_items # Place retrieved columns in groups, according to naming conventions # * _col_ means fullrow, i.e. a group with one header # * col_ means finish the current group and start a new one field_labels = TicketSystem(self.env).get_ticket_field_labels() header_groups = [[]] for idx, col in enumerate(cols): if col in field_labels: title = field_labels[col]
def _render_view(self, req, id): """Retrieve the report results and pre-process them for rendering.""" r = Report(self.env, id) title, description, sql = r.title, r.description, r.query # If this is a saved custom query, redirect to the query module # # A saved query is either an URL query (?... or query:?...), # or a query language expression (query:...). # # It may eventually contain newlines, for increased clarity. # query = ''.join(line.strip() for line in sql.splitlines()) if query and (query[0] == '?' or query.startswith('query:?')): query = query if query[0] == '?' else query[6:] report_id = 'report=%s' % id if 'report=' in query: if report_id not in query: err = _('When specified, the report number should be ' '"%(num)s".', num=id) req.redirect(req.href.report(id, action='edit', error=err)) else: if query[-1] != '?': query += '&' query += report_id req.redirect(req.href.query() + quote_query_string(query)) elif query.startswith('query:'): from trac.ticket.query import Query, QuerySyntaxError try: query = Query.from_string(self.env, query[6:], report=id) except QuerySyntaxError as e: req.redirect(req.href.report(id, action='edit', error=to_unicode(e))) else: req.redirect(query.get_href(req.href)) format = req.args.get('format') if format == 'sql': self._send_sql(req, id, title, description, sql) title = '{%i} %s' % (id, title) report_resource = Resource(self.realm, id) req.perm(report_resource).require('REPORT_VIEW') context = web_context(req, report_resource) page = req.args.getint('page', 1) default_max = {'rss': self.items_per_page_rss, 'csv': 0, 'tab': 0}.get(format, self.items_per_page) max = req.args.getint('max') limit = as_int(max, default_max, min=0) # explict max takes precedence offset = (page - 1) * limit sort_col = req.args.get('sort', '') asc = req.args.getint('asc', 0, min=0, max=1) args = {} def report_href(**kwargs): """Generate links to this report preserving user variables, and sorting and paging variables. """ params = args.copy() if sort_col: params['sort'] = sort_col if page != 1: params['page'] = page if max != default_max: params['max'] = max params.update(kwargs) params['asc'] = 1 if params.get('asc', asc) else None return req.href.report(id, params) data = {'action': 'view', 'report': {'id': id, 'resource': report_resource}, 'context': context, 'title': title, 'description': description, 'max': limit, 'args': args, 'show_args_form': False, 'message': None, 'paginator': None, 'report_href': report_href} try: args = self.get_var_args(req) sql = self.get_default_var_args(args, sql) except ValueError as e: data['message'] = _("Report failed: %(error)s", error=e) return 'report_view.html', data, None data.update({'args': args, 'title': sub_vars(title, args), 'description': sub_vars(description or '', args)}) try: res = self.execute_paginated_report(req, id, sql, args, limit, offset) except TracError as e: data['message'] = _("Report failed: %(error)s", error=e) else: if len(res) == 2: e, sql = res data['message'] = \ tag_("Report execution failed: %(error)s %(sql)s", error=tag.pre(exception_to_unicode(e)), sql=tag(tag.hr(), tag.pre(sql, style="white-space: pre"))) if data['message']: return 'report_view.html', data, None cols, results, num_items, missing_args, limit_offset = res need_paginator = limit > 0 and limit_offset need_reorder = limit_offset is None results = [list(row) for row in results] numrows = len(results) paginator = None if need_paginator: paginator = Paginator(results, page - 1, limit, num_items) data['paginator'] = paginator if paginator.has_next_page: add_link(req, 'next', report_href(page=page + 1), _('Next Page')) if paginator.has_previous_page: add_link(req, 'prev', report_href(page=page - 1), _('Previous Page')) pagedata = [] shown_pages = paginator.get_shown_pages(21) for p in shown_pages: pagedata.append([report_href(page=p), None, str(p), _('Page %(num)d', num=p)]) fields = ['href', 'class', 'string', 'title'] paginator.shown_pages = [dict(zip(fields, p)) for p in pagedata] paginator.current_page = {'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title': None} numrows = paginator.num_items # Place retrieved columns in groups, according to naming conventions # * _col_ means fullrow, i.e. a group with one header # * col_ means finish the current group and start a new one field_labels = TicketSystem(self.env).get_ticket_field_labels() header_groups = [[]] for idx, col in enumerate(cols): if col in field_labels: title = field_labels[col] else: title = col.strip('_').capitalize() header = { 'col': col, 'title': title, 'hidden': False, 'asc': None, } if col == sort_col: if asc: data['asc'] = asc data['sort'] = sort_col header['asc'] = bool(asc) if not paginator and need_reorder: # this dict will have enum values for sorting # and will be used in sortkey(), if non-empty: sort_values = {} if sort_col in ('status', 'resolution', 'priority', 'severity'): # must fetch sort values for that columns # instead of comparing them as strings with self.env.db_query as db: for name, value in db( "SELECT name, %s FROM enum WHERE type=%%s" % db.cast('value', 'int'), (sort_col,)): sort_values[name] = value def sortkey(row): val = row[idx] # check if we have sort_values, then use them as keys. if sort_values: return sort_values.get(val) # otherwise, continue with string comparison: if isinstance(val, basestring): val = val.lower() return val results = sorted(results, key=sortkey, reverse=not asc) header_group = header_groups[-1] if col.startswith('__') and col.endswith('__'): # __col__ header['hidden'] = True elif col[0] == '_' and col[-1] == '_': # _col_ header_group = [] header_groups.append(header_group) header_groups.append([]) elif col[0] == '_': # _col header['hidden'] = True elif col[-1] == '_': # col_ header_groups.append([]) header_group.append(header) # Structure the rows and cells: # - group rows according to __group__ value, if defined # - group cells the same way headers are grouped chrome = Chrome(self.env) row_groups = [] authorized_results = [] prev_group_value = None for row_idx, result in enumerate(results): col_idx = 0 cell_groups = [] row = {'cell_groups': cell_groups} realm = TicketSystem.realm parent_realm = '' parent_id = '' email_cells = [] for header_group in header_groups: cell_group = [] for header in header_group: value = cell_value(result[col_idx]) cell = {'value': value, 'header': header, 'index': col_idx} col = header['col'] col_idx += 1 # Detect and create new group if col == '__group__' and value != prev_group_value: prev_group_value = value # Brute force handling of email in group by header row_groups.append( (value and chrome.format_author(req, value), [])) # Other row properties row['__idx__'] = row_idx if col in self._html_cols: row[col] = value if col in ('report', 'ticket', 'id', '_id'): row['id'] = value # Special casing based on column name col = col.strip('_') if col in ('reporter', 'cc', 'owner'): email_cells.append(cell) elif col == 'realm': realm = value elif col == 'parent_realm': parent_realm = value elif col == 'parent_id': parent_id = value cell_group.append(cell) cell_groups.append(cell_group) if parent_realm: resource = Resource(realm, row.get('id'), parent=Resource(parent_realm, parent_id)) else: resource = Resource(realm, row.get('id')) # FIXME: for now, we still need to hardcode the realm in the action if resource.realm.upper() + '_VIEW' not in req.perm(resource): continue authorized_results.append(result) if email_cells: for cell in email_cells: emails = chrome.format_emails(context.child(resource), cell['value']) result[cell['index']] = cell['value'] = emails row['resource'] = resource if row_groups: row_group = row_groups[-1][1] else: row_group = [] row_groups = [(None, row_group)] row_group.append(row) data.update({'header_groups': header_groups, 'row_groups': row_groups, 'numrows': numrows}) if format == 'rss': data['context'] = web_context(req, report_resource, absurls=True) return 'report.rss', data, 'application/rss+xml' elif format == 'csv': filename = 'report_%s.csv' % id if id else 'report.csv' self._send_csv(req, cols, authorized_results, mimetype='text/csv', filename=filename) elif format == 'tab': filename = 'report_%s.tsv' % id if id else 'report.tsv' self._send_csv(req, cols, authorized_results, '\t', mimetype='text/tab-separated-values', filename=filename) else: p = page if max is not None else None add_link(req, 'alternate', auth_link(req, report_href(format='rss', page=None)), _('RSS Feed'), 'application/rss+xml', 'rss') add_link(req, 'alternate', report_href(format='csv', page=p), _('Comma-delimited Text'), 'text/plain') add_link(req, 'alternate', report_href(format='tab', page=p), _('Tab-delimited Text'), 'text/plain') if 'REPORT_SQL_VIEW' in req.perm(self.realm, id): add_link(req, 'alternate', req.href.report(id=id, format='sql'), _('SQL Query'), 'text/plain') # reuse the session vars of the query module so that # the query navigation links on the ticket can be used to # navigate report results as well try: req.session['query_tickets'] = \ ' '.join(str(int(row['id'])) for rg in row_groups for row in rg[1]) req.session['query_href'] = \ req.session['query_href'] = report_href() # Kludge: we have to clear the other query session # variables, but only if the above succeeded for var in ('query_constraints', 'query_time'): if var in req.session: del req.session[var] except (ValueError, KeyError): pass if set(data['args']) - {'USER'}: data['show_args_form'] = True # Add values of all select-type ticket fields for autocomplete. fields = TicketSystem(self.env).get_ticket_fields() arg_values = {} for arg in set(data['args']) - {'USER'}: attrs = fields.by_name(arg.lower()) if attrs and 'options' in attrs: arg_values[attrs['name']] = attrs['options'] if arg_values: add_script_data(req, arg_values=arg_values) Chrome(self.env).add_jquery_ui(req) if missing_args: add_warning(req, _( 'The following arguments are missing: %(args)s', args=", ".join(missing_args))) return 'report_view.html', data, None
def process_request(self, req): if crashdump_use_jinja2: metadata = {'content_type': 'text/html'} else: metadata = None action = req.args.get('action', 'view') if action == 'crash_list': page = req.args.getint('page', 1) default_max = self.items_per_page max = req.args.getint('max') limit = as_int(max, default_max, min=0) # explict max takes precedence offset = (page - 1) * limit sort_col = req.args.get('sort', '') asc = req.args.getint('asc', 0, min=0, max=1) title = '' description = '' data = { 'action': 'crash_list', 'max': limit, 'numrows': 0, 'title': title, 'description': description, 'message': None, 'paginator': None } req_status = req.args.get('status') or 'active' #results = CrashDump.query(env=self.env, status=req_status) results = CrashDump.query(env=self.env, status=None) data['results'] = results limit_offset = 0 need_paginator = limit > 0 and limit_offset need_reorder = limit_offset is None numrows = len(results) paginator = None if need_paginator: paginator = Paginator(results, page - 1, limit, num_items) data['paginator'] = paginator if paginator.has_next_page: add_link(req, 'next', report_href(page=page + 1), _('Next Page')) if paginator.has_previous_page: add_link(req, 'prev', report_href(page=page - 1), _('Previous Page')) pagedata = [] shown_pages = paginator.get_shown_pages(21) for p in shown_pages: pagedata.append([ report_href(page=p), None, str(p), _('Page %(num)d', num=p) ]) fields = ['href', 'class', 'string', 'title'] paginator.shown_pages = [ dict(zip(fields, p)) for p in pagedata ] paginator.current_page = { 'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title': None } numrows = paginator.num_items data['paginator'] = paginator add_script_data(req, {'comments_prefs': self._get_prefs(req)}) if not crashdump_use_jinja2: add_script(req, 'common/js/folding.js') add_script(req, 'crashdump/crashdump.js') add_stylesheet(req, 'crashdump/crashdump.css') return 'list.html', data, metadata start = time.time() if 'crashuuid' in req.args: crashobj = CrashDump.find_by_uuid(self.env, req.args['crashuuid']) if not crashobj: raise ResourceNotFound( _("Crash %(id)s does not exist.", id=req.args['crashuuid']), _("Invalid crash identifier")) elif 'crashid' in req.args: crashobj = CrashDump.find_by_id(self.env, req.args['crashid']) if not crashobj: raise ResourceNotFound( _("Crash %(id)s does not exist.", id=req.args['crashid']), _("Invalid crash identifier")) else: raise ResourceNotFound(_("No crash identifier specified.")) end = time.time() xhr = req.get_header('X-Requested-With') == 'XMLHttpRequest' #req.perm('crash', id, version).require('TICKET_VIEW') params = _get_list_from_args(req.args, 'params', None) self.log.debug('process_request %s:%s-%s' % (action, type(params), params)) if action is None or action == 'view': data = self._prepare_data(req, crashobj) xmlfile = data['xmlfile'] if 'xmlfile' in data else None data['dbtime'] = end - start field_changes = {} data.update({ 'action': action, 'params': params, # Store a timestamp for detecting "mid air collisions" 'start_time': crashobj['changetime'] }) self._insert_crashdump_data(req, crashobj, data, get_reporter_id(req, 'author'), field_changes) if params is None: add_script_data(req, {'comments_prefs': self._get_prefs(req)}) if not crashdump_use_jinja2: add_script(req, 'common/js/folding.js') add_script(req, 'crashdump/crashdump.js') add_stylesheet(req, 'crashdump/crashdump.css') data['show_delete_crash'] = self.show_delete_crash linked_tickets = [] for tkt_id in crashobj.linked_tickets: a = self._link_ticket_by_id(req, tkt_id) if a: linked_tickets.append(a) data['linked_tickets'] = linked_tickets return 'report.html', data, metadata else: if params[0] in [ 'sysinfo', 'sysinfo_ex', 'fast_protect_version_info', 'exception', 'memory_blocks', 'memory_regions', 'modules', 'threads', 'stackdumps', 'file_info' ]: return params[0] + '.html', data, metadata elif params[0] == 'memory_block': block_base = safe_list_get_as_int(params, 1, 0) memory_block = None for b in data['memory_blocks']: if b.base == block_base: memory_block = b break data.update({ 'memory_block': memory_block, 'memory_block_base': block_base }) return 'memory_block.html', data, metadata elif params[0] == 'stackdump': threadid = safe_list_get_as_int(params, 1, 0) stackdump = None if threadid in data['stackdumps']: stackdump = data['stackdumps'][threadid] self.log.debug('stackdump %s' % stackdump) data.update({'stackdump': stackdump, 'threadid': threadid}) return 'stackdump.html', data, metadata else: raise ResourceNotFound( _("Invalid sub-page request %(param)s for crash %(uuid)s.", param=str(params[0]), uuid=str(crashobj.uuid))) elif action == 'sysinfo_report': data = self._prepare_data(req, crashobj) data['dbtime'] = end - start if 'xmlreport' in data: xmlfile = data['xmlreport'] data['sysinfo_report'] = None if isinstance(xmlfile, XMLReport) or (isinstance(xmlfile, string) and os.path.isfile(xmlfile)): try: data['sysinfo_report'] = SystemInfoReport( xmlreport=xmlfile) except SystemInfoReport.SystemInfoReportException as e: data['xmlfile_error'] = str(e) else: data['xmlfile_error'] = _( "XML file %(file)s is unavailable", file=xmlfile) data.update({ 'action': action, 'params': params, # Store a timestamp for detecting "mid air collisions" 'start_time': crashobj['changetime'] }) if params is None: add_script_data(req, {'comments_prefs': self._get_prefs(req)}) if not crashdump_use_jinja2: add_script(req, 'common/js/folding.js') add_script(req, 'crashdump/crashdump.js') add_stylesheet(req, 'crashdump/crashdump.css') linked_tickets = [] for tkt_id in crashobj.linked_tickets: a = self._link_ticket_by_id(req, tkt_id) if a: linked_tickets.append(a) data['linked_tickets'] = linked_tickets return 'sysinfo_report.html', data, metadata else: if params[0] in [ 'sysinfo', 'sysinfo_ex', 'sysinfo_opengl', 'sysinfo_env', 'sysinfo_terra4d_dirs', 'sysinfo_cpu', 'sysinfo_locale', 'sysinfo_network', 'sysinfo_rawdata' ]: return params[0] + '.html', data, metadata else: raise ResourceNotFound( _("Invalid sub-page request %(param)s for crash %(uuid)s.", param=str(params[0]), uuid=str(crashobj.uuid))) elif action == 'systeminfo_raw': data = self._prepare_data(req, crashobj) xmlfile = data['xmlfile'] if 'xmlfile' in data else None data['dbtime'] = end - start fast_protect_system_info = data[ 'fast_protect_system_info'] if 'fast_protect_system_info' in data else None if fast_protect_system_info: if crashobj['crashhostname']: filename = "%s_%s.terra4d-system-info" % (str( crashobj.uuid), str(crashobj['crashhostname'])) else: filename = "%s.terra4d-system-info" % str(crashobj.uuid) if fast_protect_system_info.rawdata: return self._send_data( req, fast_protect_system_info.rawdata.raw, filename=filename) raise ResourceNotFound( _("No system information available for crash %(uuid)s.", uuid=str(crashobj.uuid))) elif action == 'delete': add_script_data(req, {'comments_prefs': self._get_prefs(req)}) add_script(req, 'crashdump/crashdump.js') add_stylesheet(req, 'crashdump/crashdump.css') data = {'id': crashobj.id, 'uuid': crashobj.uuid} crashobj.delete(self.dumpdata_dir) return 'deleted.html', data, metadata elif action == 'minidump_raw': return self._send_file(req, crashobj, 'minidumpfile') elif action == 'minidump_text': return self._send_file(req, crashobj, 'minidumpreporttextfile') elif action == 'minidump_xml': return self._send_file(req, crashobj, 'minidumpreportxmlfile') elif action == 'minidump_html': return self._send_file(req, crashobj, 'minidumpreporthtmlfile') elif action == 'coredump_raw': return self._send_file(req, crashobj, 'coredumpfile') elif action == 'coredump_text': return self._send_file(req, crashobj, 'coredumpreporttextfile') elif action == 'coredump_xml': return self._send_file(req, crashobj, 'coredumpreportxmlfile') elif action == 'coredump_html': return self._send_file(req, crashobj, 'coredumpreporthtmlfile') elif action == 'raw': if crashobj['minidumpfile']: return self._send_file(req, crashobj, 'minidumpfile') elif crashobj['coredumpfile']: return self._send_file(req, crashobj, 'coredumpfile') elif action == 'xml': if crashobj['minidumpreportxmlfile']: return self._send_file(req, crashobj, 'minidumpreportxmlfile') elif crashobj['coredumpreportxmlfile']: return self._send_file(req, crashobj, 'coredumpreportxmlfile') elif action == 'html': if crashobj['minidumpreporthtmlfile']: return self._send_file(req, crashobj, 'minidumpreporthtmlfile') elif crashobj['coredumpreporthtmlfile']: return self._send_file(req, crashobj, 'coredumpreporthtmlfile') elif action == 'text': if crashobj['minidumpreporttextfile']: return self._send_file(req, crashobj, 'minidumpreporttextfile') elif crashobj['coredumpreporttextfile']: return self._send_file(req, crashobj, 'coredumpreporttextfile') raise ResourceNotFound( _("Invalid action %(action)s for crash %(uuid)s specified.", action=str(action), uuid=str(crashobj.uuid)))
def process_request(self, req): uriparts = req.path_info.split('/') add_stylesheet(req, 'redports/redports.css') add_ctxtnav(req, _('All'), req.href.buildarchive()) if len(uriparts) == 2 or (len(uriparts) == 3 and len(uriparts[2]) < 1): # Buildarchive list page = int(req.args.get('page', '1')) limit = self.items_per_page offset = (page - 1) * limit if limit < 0 or page < 1: raise TracError('Invalid page') builds = BuildarchiveIterator(self.env) builds.filter(req.args.get('owner', None), None, None, True) builddata = list(builds.get_data(limit, offset)) paginator = Paginator(builddata, page - 1, limit, builds.count()) pagedata = [] shown_pages = paginator.get_shown_pages() for p in shown_pages: pagedata.append([ req.href.buildarchive(page=p, owner=req.args.get('owner', None)), None, str(p), _('Page %(num)d', num=p) ]) if paginator.has_next_page: add_link( req, 'next', req.href.buildarchive(page=page + 1, owner=req.args.get('owner', None)), _('Next Page')) if paginator.has_previous_page: add_link( req, 'prev', req.href.buildarchive(page=page - 1, owner=req.args.get('owner', None)), _('Previous Page')) fields = ['href', 'class', 'string', 'title'] paginator.shown_pages = [dict(zip(fields, p)) for p in pagedata] paginator.current_page = { 'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title': None } if req.args.get('format') == 'rss': return ('buildarchive.rss', { 'builds': builddata, 'paginator': paginator }, 'application/rss+xml') add_link( req, 'alternate', req.href.buildarchive(page=page, owner=req.args.get('owner', None), format='rss'), _('RSS Feed'), 'application/rss+xml', 'rss') return ('buildarchive.html', { 'builds': builddata, 'paginator': paginator }, None) else: # Buildarchive details builds = BuildarchiveIterator(self.env) # svn revision, git revision or queueid if uriparts[2].startswith("r"): builds.filter(None, None, uriparts[2][1:]) elif len(uriparts[2]) == 40: builds.filter(None, None, uriparts[2]) else: builds.filter(None, uriparts[2]) if req.args.get('format') == 'rss': return ('buildarchivedetails.rss', { 'builds': builds.get_data(), }, 'application/rss+xml') add_link(req, 'alternate', req.href.buildarchive(uriparts[2], format='rss'), _('RSS Feed'), 'application/rss+xml', 'rss') return ('buildarchivedetails.html', { 'builds': builds.get_data() }, None)
def template_data(self, context, tickets, orig_list=None, orig_time=None, req=None): constraints = {} for k, v in self.constraints.items(): constraint = {'values': [], 'mode': ''} for val in v: neg = val.startswith('!') if neg: val = val[1:] mode = '' if val[:1] in ('~', '^', '$') \ and not val in self.substitutions: mode, val = val[:1], val[1:] constraint['mode'] = (neg and '!' or '') + mode constraint['values'].append(val) constraints[k] = constraint cols = self.get_columns() labels = dict([(f['name'], f['label']) for f in self.fields]) wikify = set([f['name'] for f in self.fields if f['type'] == 'text' and f.get('format') == 'wiki']) # TODO: remove after adding time/changetime to the api.py labels['changetime'] = _('Modified') labels['time'] = _('Created') headers = [{ 'name': col, 'label': labels.get(col, _('Ticket')), 'wikify': col in wikify, 'href': self.get_href(context.href, order=col, desc=(col == self.order and not self.desc)) } for col in cols] fields = {} for field in self.fields: if field['name'] == 'owner' and field['type'] == 'select': # Make $USER work when restrict_owner = true field['options'].insert(0, '$USER') field_data = {} field_data.update(field) del field_data['name'] fields[field['name']] = field_data modes = {} modes['text'] = [ {'name': _("contains"), 'value': "~"}, {'name': _("doesn't contain"), 'value': "!~"}, {'name': _("begins with"), 'value': "^"}, {'name': _("ends with"), 'value': "$"}, {'name': _("is"), 'value': ""}, {'name': _("is not"), 'value': "!"} ] modes['textarea'] = [ {'name': _("contains"), 'value': "~"}, {'name': _("doesn't contain"), 'value': "!~"}, ] modes['select'] = [ {'name': _("is"), 'value': ""}, {'name': _("is not"), 'value': "!"} ] groups = {} groupsequence = [] for ticket in tickets: if orig_list: # Mark tickets added or changed since the query was first # executed if ticket['time'] > orig_time: ticket['added'] = True elif ticket['changetime'] > orig_time: ticket['changed'] = True if self.group: group_key = ticket[self.group] groups.setdefault(group_key, []).append(ticket) if not groupsequence or group_key not in groupsequence: groupsequence.append(group_key) groupsequence = [(value, groups[value]) for value in groupsequence] # detect whether the last group continues on the next page, # by checking if the extra (max+1)th ticket is in the last group last_group_is_partial = False if groupsequence and self.max and len(tickets) == self.max + 1: del tickets[-1] if len(groupsequence[-1][1]) == 1: # additional ticket started a new group del groupsequence[-1] # remove that additional group else: # additional ticket stayed in the group last_group_is_partial = True del groupsequence[-1][1][-1] # remove the additional ticket results = Paginator(tickets, self.page - 1, self.max, self.num_items) if req: if results.has_next_page: next_href = self.get_href(req.href, max=self.max, page=self.page + 1) add_link(req, 'next', next_href, _('Next Page')) if results.has_previous_page: prev_href = self.get_href(req.href, max=self.max, page=self.page - 1) add_link(req, 'prev', prev_href, _('Previous Page')) else: results.show_index = False pagedata = [] shown_pages = results.get_shown_pages(21) for page in shown_pages: pagedata.append([self.get_href(context.href, page=page), None, str(page), _('Page %(num)d', num=page)]) results.shown_pages = [dict(zip(['href', 'class', 'string', 'title'], p)) for p in pagedata] results.current_page = {'href': None, 'class': 'current', 'string': str(results.page + 1), 'title':None} return {'query': self, 'context': context, 'col': cols, 'row': self.rows, 'constraints': constraints, 'labels': labels, 'headers': headers, 'fields': fields, 'modes': modes, 'tickets': tickets, 'groups': groupsequence or [(None, tickets)], 'last_group_is_partial': last_group_is_partial, 'paginator': results}
def render_packages_panel(self, req, category, panel, path_info): package_repo_mod = PackageRepositoryModule(self.env) repositories = package_repo_mod.get_all_repository_types() # Detail view? if path_info: id = path_info file = PackageRepositoryFile.select_by_id(self.env, id) if not file: raise TracError("Package does not exist!") if req.method == 'POST': if req.args.get('save'): file.repository = req.args.get('repository') file.package = req.args.get('package') file.version = req.args.get('version') file.filename = req.args.get('filename') file.comment = req.args.get('comment') PackageRepositoryFile.update(self.env, file) add_notice(req, 'Your changes have been saved.') req.redirect(req.href.admin(category, panel)) elif req.args.get('cancel'): req.redirect(req.href.admin(category, panel)) Chrome(self.env).add_wiki_toolbars(req) data = { 'view': 'detail', 'file': file, 'repositories': repositories, } else: if req.method == 'POST': if req.args.get('add'): # Add file repotype = req.args.get('repository') filename, fileobj, filesize = req.args.getfile('file') if not filename: raise TracError("No file uploaded") package_repo_mod.save_package(repotype, filename, fileobj) file = PackageRepositoryFile(None, None, None, None, None, None) file.repository = repotype file.package = req.args.get('package') file.version = req.args.get('version') file.filename = filename file.comment = req.args.get('comment') PackageRepositoryFile.add(self.env, file) add_notice(req, 'The file has been added.') req.redirect(req.href.admin(category, panel)) elif req.args.get('remove'): # Remove files file_ids = req.args.getlist('sel') if not file_ids: raise TracError('No files selected') for id in file_ids: file = PackageRepositoryFile.select_by_id(self.env, id) package_repo_mod.delete_package( file.repository, file.filename) PackageRepositoryFile.delete_by_ids(self.env, file_ids) add_notice(req, 'The files have been removed.') req.redirect(req.href.admin(category, panel)) # Pagination page = int(req.args.get('page', 1)) max_per_page = int(req.args.get('max', 10)) files = PackageRepositoryFile.select_paginated( self.env, page, max_per_page) total_count = PackageRepositoryFile.total_count(self.env) paginator = Paginator(files, page - 1, max_per_page, total_count) if paginator.has_next_page: next_href = req.href.admin(category, panel, max=max_per_page, page=page + 1) add_link(req, 'next', next_href, 'Next Page') if paginator.has_previous_page: prev_href = req.href.admin(category, panel, max=max_per_page, page=page - 1) add_link(req, 'prev', prev_href, 'Previous Page') pagedata = [] shown_pages = paginator.get_shown_pages(21) for page in shown_pages: pagedata.append([ req.href.admin(category, panel, max=max_per_page, page=page), None, str(page), 'Page %d' % (page, ) ]) paginator.shown_pages = [ dict(zip(['href', 'class', 'string', 'title'], p)) for p in pagedata ] paginator.current_page = { 'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title': None } data = { 'view': 'list', 'paginator': paginator, 'max_per_page': max_per_page, 'files': files, 'repositories': repositories, } return 'packagerepository_admin_files.html', data, None
def process_request(self, req): offset = req.args.get("offset",0) page = req.args.get('page', 1) try: offset = int(offset) except: raise TracError(_('Invalid offset used: %(offset)s', offset=offset)) try: page = int(page) except: raise TracError(_('Invalid page used: %(page)s', page=page)) offset = (page - 1) * self.limit add_stylesheet(req, 'mailinglist/css/mailinglist.css') add_javascript(req, 'mailinglist/mailinglist.js') mailinglists = [m for m in Mailinglist.select(self.env) if "MAILINGLIST_VIEW" in req.perm(m.resource)] data = {"mailinglists": mailinglists, "offset": offset, "limit": self.limit} if req.method == 'POST': if 'subscribe' in req.args: subscribe = True unsubscribe = False mailinglist_email = req.args.get('subscribe') elif 'unsubscribe' in req.args: subscribe = False unsubscribe = True mailinglist_email = req.args.get('unsubscribe') else: # at the moment we only post subscription info to # mailing list page - so if there is none in req.args we # can just redirect to mailing list page req.redirect(req.href.mailinglist()) # get mailing list object and check permissions mailinglist = Mailinglist.select_by_address(self.env, mailinglist_email, localpart=True) req.perm(mailinglist.resource).require("MAILINGLIST_VIEW") if subscribe: mailinglist.subscribe(user=req.authname) # subscribe does not return a value to indicate if it # was successful, so we have to explicitly check if mailinglist.is_subscribed(req.authname): add_notice(req, _('You have been subscribed to %s.' % mailinglist.name)) else: add_notice(req, _('Unable to subscribe to %s.' % mailinglist.name)) elif unsubscribe: mailinglist.unsubscribe(user=req.authname) # unsubscribe does not return a value to indicate if it # was successful, so we have to explicitly check if not mailinglist.is_subscribed(req.authname): add_notice(req, _('You have been unsubscribed from %s.' % mailinglist.name)) else: add_notice(req, _('Unable to unsubscribe from %s.' % mailinglist.name)) if req.path_info.endswith('/mailinglist'): # overview mailing list page req.redirect(req.href.mailinglist()) elif 'conversationid' in req.args: # individual mailing list conversation log req.redirect(req.href.mailinglist(mailinglist_email, req.args['conversationid'])) else: # individual mailing list homepage req.redirect(req.href.mailinglist(mailinglist_email)) #for mailinglist in mailinglists: # add_ctxtnav(req, # _("List: %s") % mailinglist.name, # req.href.mailinglist(mailinglist.emailaddress)) if 'messageid' in req.args: message = MailinglistMessage(self.env, req.args['messageid']) # leaks the subject of the email in the error, wonder if # that's a problem... req.perm(message.resource).require("MAILINGLIST_VIEW") if req.args.get('format') == "raw": req.send_header('Content-Disposition', 'attachment') req.send_response(200) content = message.raw.bytes req.send_header('Content-Type', 'application/mbox') req.send_header('Content-Length', len(content)) req.end_headers() if req.method != 'HEAD': req.write(content) return context = Context.from_request(req, message.resource) data['message'] = message data['attachments'] = AttachmentModule(self.env).attachment_data(context) add_link(req, 'up', get_resource_url(self.env, message.conversation.resource, req.href, offset=data['offset']), _("Back to conversation")) prevnext_nav(req, _("Newer message"), _("Older message"), _("Back to conversation")) raw_href = get_resource_url(self.env, message.resource, req.href, format='raw') add_link(req, 'alternate', raw_href, _('mbox'), "application/mbox") if 'MAILINGLIST_ADMIN' in req.perm: add_ctxtnav(req, tag.a(tag.i(class_="fa fa-cog"), ' Manage List', href=req.href.admin('mailinglist', 'lists', message.conversation.mailinglist.emailaddress), title='Manage and subscribe users to the %s mailing list' % message.conversation.mailinglist.name)) return 'mailinglist_message.html', data, None if 'conversationid' in req.args: conversation = MailinglistConversation(self.env, req.args['conversationid']) # also leaks the subject of the first email in the error message req.perm(conversation.resource).require("MAILINGLIST_VIEW") data['conversation'] = conversation data['attachmentselect'] = partial(Attachment.select, self.env) results = Paginator(conversation.messages(), page - 1, self.limit) if results.has_next_page: next_href = get_resource_url(self.env, conversation.resource, req.href, page=page + 1) add_link(req, 'next', next_href, _('Next Page')) if results.has_previous_page: prev_href = get_resource_url(self.env, conversation.resource, req.href, page=page - 1) add_link(req, 'prev', prev_href, _('Previous Page')) shown_pages = results.get_shown_pages() pagedata = [{'href': get_resource_url(self.env, conversation.resource, req.href, page=page), 'class': None, 'string': str(page), 'title': _('Page %(num)d', num=page)} for page in shown_pages] results.shown_pages = pagedata results.current_page = {'href': None, 'class': 'current', 'string': str(results.page + 1), 'title': None} data['paginator'] = results add_link(req, 'up', get_resource_url(self.env, conversation.mailinglist.resource, req.href, offset=data['offset']), _("List of conversations")) prevnext_nav(req, _("Newer conversation"), _("Older conversation"), _("Back to list of conversations")) if 'MAILINGLIST_ADMIN' in req.perm: add_ctxtnav(req, tag.a(tag.i(class_="fa fa-cog"), ' Manage List', href=req.href.admin('mailinglist', 'lists', conversation.mailinglist.emailaddress), title='Manage and subscribe users to the %s mailing list' % conversation.mailinglist.name)) # Check if user is already subscribed to mailing list # and add the appropriate subscribe / unsubscribe ribbon option if conversation.mailinglist.is_subscribed(req.authname): add_ctxtnav(req, tag.form(tag.input(tag.a(tag.i(class_='fa fa-eye-slash'), ' Unsubscribe', title='Unsubscribe from the %s mailing list' % conversation.mailinglist.name, id='subscribe-link'), name='unsubscribe', value=conversation.mailinglist.emailaddress, class_='hidden'), method_='post', action='', id='subscribe-form', class_='hidden')) else: add_ctxtnav(req, tag.form(tag.input(tag.a(tag.i(class_='fa fa-eye'), ' Subscribe', title='Subscribe to the %s mailing list' % conversation.mailinglist.name, id='subscribe-link'), name='subscribe', value=conversation.mailinglist.emailaddress, class_='hidden'), method_='post', action='', id='subscribe-form', class_='hidden')) return 'mailinglist_conversation.html', data, None elif 'listname' in req.args: mailinglist = Mailinglist.select_by_address(self.env, req.args['listname'], localpart=True) # leaks the name of the mailinglist req.perm(mailinglist.resource).require("MAILINGLIST_VIEW") data['mailinglist'] = mailinglist results = Paginator(mailinglist.conversations(), page - 1, self.limit) if results.has_next_page: next_href = get_resource_url(self.env, mailinglist.resource, req.href, page=page + 1) add_link(req, 'next', next_href, _('Next Page')) if results.has_previous_page: prev_href = get_resource_url(self.env, mailinglist.resource, req.href, page=page - 1) add_link(req, 'prev', prev_href, _('Previous Page')) shown_pages = results.get_shown_pages() pagedata = [{'href': get_resource_url(self.env, mailinglist.resource, req.href, page=page), 'class': None, 'string': str(page), 'title': _('Page %(num)d', num=page)} for page in shown_pages] results.shown_pages = pagedata results.current_page = {'href': None, 'class': 'current', 'string': str(results.page + 1), 'title': None} data['paginator'] = results if data['offset'] + data['limit'] < mailinglist.count_conversations(): add_link(req, 'next', get_resource_url(self.env, mailinglist.resource, req.href, offset=data['offset']+data['limit']), _("Older conversations")) if offset > 0: add_link(req, 'prev', get_resource_url(self.env, mailinglist.resource, req.href, offset=data['offset']-data['limit']), _("Newer conversations")) add_link(req, 'up', req.href.mailinglist(), _("List of mailinglists")) prevnext_nav(req, _("Newer conversations"), _("Older conversations"), ("Back to Mailinglists")) if 'MAILINGLIST_ADMIN' in req.perm: add_ctxtnav(req, tag.a(tag.i(class_="fa fa-cog"), ' Manage List', href=req.href.admin('mailinglist', 'lists', mailinglist.emailaddress), title='Manage and subscribe users to the %s mailing list' % mailinglist.name)) # Check if user is already subscribed to mailing list # and add the appropriate subscribe / unsubscribe ribbon option if mailinglist.is_subscribed(req.authname): add_ctxtnav(req, tag.form(tag.input(tag.a(tag.i(class_='fa fa-eye-slash'), ' Unsubscribe', title='Unsubscribe from the %s mailing list' % mailinglist.name, id='subscribe-link'), name='unsubscribe', value=mailinglist.emailaddress, class_='hidden'), method_='post', action='', id='subscribe-form', class_='hidden')) else: add_ctxtnav(req, tag.form(tag.input(tag.a(tag.i(class_='fa fa-eye'), ' Subscribe', title='Subscribe to the %s mailing list' % mailinglist.name, id='subscribe-link'), name='subscribe', value=mailinglist.emailaddress, class_='hidden'), method_='post', action='', id='subscribe-form', class_='hidden')) return 'mailinglist_conversations.html', data, None else: return 'mailinglist_list.html', data, None