def _paginate(self, req, results): self.query = req.args.get('q', None) current_page = as_int(req.args.get('listtagged_page'), 1) items_per_page = as_int(req.args.get('listtagged_per_page'), None) if items_per_page is None: items_per_page = self.items_per_page result = Paginator(results, current_page - 1, items_per_page) pagedata = [] shown_pages = result.get_shown_pages(21) for page in shown_pages: page_href = self.get_href(req, items_per_page, page) pagedata.append([page_href, None, str(page), _("Page %(num)d", num=page)]) attributes = ['href', 'class', 'string', 'title'] result.shown_pages = [dict(zip(attributes, p)) for p in pagedata] result.current_page = {'href': None, 'class': 'current', 'string': str(result.page + 1), 'title': None} if result.has_next_page: next_href = self.get_href(req, items_per_page, current_page + 1) add_link(req, 'next', next_href, _('Next Page')) if result.has_previous_page: prev_href = self.get_href(req, items_per_page, current_page - 1) add_link(req, 'prev', prev_href, _('Previous Page')) return result
def backup(self, dest_file): from subprocess import Popen, PIPE db_url = self.env.config.get("trac", "database") scheme, db_prop = _parse_db_str(db_url) db_params = db_prop.setdefault("params", {}) db_name = os.path.basename(db_prop["path"]) args = [self.mysqldump_path] if "host" in db_prop: args.extend(["-h", db_prop["host"]]) if "port" in db_prop: args.extend(["-P", str(db_prop["port"])]) if "user" in db_prop: args.extend(["-u", db_prop["user"]]) for name, value in db_params.iteritems(): if name == "compress" and as_int(value, 0): args.append("--compress") elif name == "named_pipe" and as_int(value, 0): args.append("--protocol=pipe") elif name == "read_default_file": # Must be first args.insert(1, "--defaults-file=" + value) elif name == "unix_socket": args.extend(["--protocol=socket", "--socket=" + value]) elif name not in ("init_command", "read_default_group"): self.log.warning("Invalid connection string parameter '%s'", name) args.extend(["-r", dest_file, db_name]) environ = os.environ.copy() if "password" in db_prop: environ["MYSQL_PWD"] = str(db_prop["password"]) try: p = Popen(args, env=environ, stderr=PIPE, close_fds=close_fds) except OSError, e: raise TracError(_("Unable to run %(path)s: %(msg)s", path=self.mysqldump_path, msg=exception_to_unicode(e)))
def _paginate(self, req, results): self.query = req.args.get('q', None) current_page = as_int(req.args.get('listtagged_page'), 1) items_per_page = as_int(req.args.get('listtagged_per_page'), None) if items_per_page is None: items_per_page = self.items_per_page result = Paginator(results, current_page - 1, items_per_page) pagedata = [] shown_pages = result.get_shown_pages(21) for page in shown_pages: page_href = self.get_href(req, items_per_page, page) pagedata.append( [page_href, None, str(page), _("Page %(num)d", num=page)]) attributes = ['href', 'class', 'string', 'title'] result.shown_pages = [dict(zip(attributes, p)) for p in pagedata] result.current_page = { 'href': None, 'class': 'current', 'string': str(result.page + 1), 'title': None } if result.has_next_page: next_href = self.get_href(req, items_per_page, current_page + 1) add_link(req, 'next', next_href, _('Next Page')) if result.has_previous_page: prev_href = self.get_href(req, items_per_page, current_page - 1) add_link(req, 'prev', prev_href, _('Previous Page')) return result
def _paginate(self, req, results, realms): query = req.args.get('q', None) current_page = as_int(req.args.get('listtagged_page'), 1, min=1) items_per_page = as_int(req.args.get('listtagged_per_page'), self.items_per_page) if items_per_page < 1: items_per_page = self.items_per_page try: result = Paginator(results, current_page - 1, items_per_page) except (AssertionError, TracError), e: self.log.warn("ListTagged macro: %s", e) current_page = 1 result = Paginator(results, current_page - 1, items_per_page)
def _paginate(self, req, results, realms): query = req.args.get('q', None) current_page = as_int(req.args.get('listtagged_page'), 1, min=1) items_per_page = as_int(req.args.get('listtagged_per_page'), self.items_per_page) if items_per_page < 1: items_per_page = self.items_per_page try: result = Paginator(results, current_page - 1, items_per_page) except (AssertionError, TracError), e: # AssertionError raised in Trac < 1.0.10, TracError otherwise self.log.warn("ListTagged macro: %s", e) current_page = 1 result = Paginator(results, current_page - 1, items_per_page)
def __init__(self, path, log, user=None, password=None, host=None, port=None, params={}): if path.startswith('/'): path = path[1:] if password == None: password = '' if port == None: port = 3306 opts = {} for name, value in params.iteritems(): if name in ('init_command', 'read_default_file', 'read_default_group', 'unix_socket'): opts[name] = value elif name in ('compress', 'named_pipe'): opts[name] = as_int(value, 0) else: self.log.warning("Invalid connection string parameter '%s'", name) cnx = MySQLdb.connect(db=path, user=user, passwd=password, host=host, port=port, charset='utf8', **opts) if hasattr(cnx, 'encoders'): # 'encoders' undocumented but present since 1.2.1 (r422) cnx.encoders[Markup] = cnx.encoders[types.UnicodeType] cursor = cnx.cursor() cursor.execute("SHOW VARIABLES WHERE " " variable_name='character_set_database'") self.charset = cursor.fetchone()[1] if self.charset != 'utf8': cnx.query("SET NAMES %s" % self.charset) cnx.store_result() ConnectionWrapper.__init__(self, cnx, log) self._is_closed = False
def _check_quickjump(self, req, kwd): """Look for search shortcuts""" noquickjump = as_int(req.args.get('noquickjump'), 0) # Source quickjump FIXME: delegate to ISearchSource.search_quickjump quickjump_href = None if kwd[0] == '/': quickjump_href = req.href.browser(kwd) name = kwd description = _('Browse repository path %(path)s', path=kwd) else: context = web_context(req, 'search') link = find_element(extract_link(self.env, context, kwd), 'href') if link is not None: quickjump_href = link.attrib.get('href') name = link.children description = link.attrib.get('title', '') if quickjump_href: # Only automatically redirect to local quickjump links if not quickjump_href.startswith(req.base_path or '/'): noquickjump = True if noquickjump: return { 'href': quickjump_href, 'name': tag.em(name), 'description': description } else: req.redirect(quickjump_href)
def _prepare_results(self, req, filters, results): page = req.args.get('page', 1) page = as_int(page, default=1, min=1) try: results = Paginator(results, page - 1, self.RESULTS_PER_PAGE) except TracError: add_warning(req, _("Page %(page)s is out of range.", page=page)) page = 1 results = Paginator(results, page - 1, self.RESULTS_PER_PAGE) for idx, result in enumerate(results): results[idx] = { 'href': result[0], 'title': result[1], 'date': user_time(req, format_datetime, result[2]), 'author': result[3], 'excerpt': result[4] } pagedata = [] shown_pages = results.get_shown_pages(21) for shown_page in shown_pages: page_href = req.href.search([(f, 'on') for f in filters], q=req.args.get('q'), page=shown_page, noquickjump=1) pagedata.append([ page_href, None, str(shown_page), _("Page %(num)d", num=shown_page) ]) fields = ['href', 'class', 'string', 'title'] results.shown_pages = [dict(zip(fields, p)) for p in pagedata] results.current_page = { 'href': None, 'class': 'current', 'string': str(results.page + 1), 'title': None } if results.has_next_page: next_href = req.href.search(zip(filters, ['on'] * len(filters)), q=req.args.get('q'), page=page + 1, noquickjump=1) add_link(req, 'next', next_href, _('Next Page')) if results.has_previous_page: prev_href = req.href.search(zip(filters, ['on'] * len(filters)), q=req.args.get('q'), page=page - 1, noquickjump=1) add_link(req, 'prev', prev_href, _('Previous Page')) page_href = req.href.search(zip(filters, ['on'] * len(filters)), q=req.args.get('q'), noquickjump=1) return {'results': results, 'page_href': page_href}
def _do_remove_comment(self, ticket_number, comment_number): ticket_number = as_int(ticket_number, None) if ticket_number is None: raise AdminCommandError(_('<ticket#> must be a number')) comment_number = as_int(comment_number, None) if comment_number is None: raise AdminCommandError(_('<comment#> must be a number')) with self.env.db_transaction: ticket = model.Ticket(self.env, ticket_number) change = ticket.get_change(comment_number) if not change: raise AdminCommandError(_("Comment %(num)s not found", num=comment_number)) ticket.delete_change(comment_number) printout(_("The ticket comment %(num)s on ticket #%(id)s has been " "deleted.", num=comment_number, id=ticket_number))
def expand_macro(self, formatter, name, content): env = formatter.env req = formatter.req if not 'VOTE_VIEW' in req.perm: return # Simplify function calls. format_author = partial(Chrome(self.env).format_author, req) if not content: args = [] compact = None kw = {} top = 5 else: args, kw = parse_args(content) compact = 'compact' in args and True top = as_int(kw.get('top'), 5, min=0) if name == 'LastVoted': lst = tag.ul() for i in self.get_votes(req, top=top): resource = Resource(i[0], i[1]) # Anotate who and when. voted = ('by %s at %s' % (format_author(i[3]), format_datetime(to_datetime(i[4])))) lst(tag.li(tag.a( get_resource_description(env, resource, compact and 'compact' or 'default'), href=get_resource_url(env, resource, formatter.href), title=(compact and '%+i %s' % (i[2], voted) or None)), (not compact and Markup(' %s %s' % (tag.b('%+i' % i[2]), voted)) or ''))) return lst elif name == 'TopVoted': realm = kw.get('realm') lst = tag.ul() for i in self.get_top_voted(req, realm=realm, top=top): if 'up-only' in args and i[2] < 1: break resource = Resource(i[0], i[1]) lst(tag.li(tag.a( get_resource_description(env, resource, compact and 'compact' or 'default'), href=get_resource_url(env, resource, formatter.href), title=(compact and '%+i' % i[2] or None)), (not compact and ' (%+i)' % i[2] or ''))) return lst elif name == 'VoteList': lst = tag.ul() resource = resource_from_path(env, req.path_info) for i in self.get_votes(req, resource, top=top): vote = ('at %s' % format_datetime(to_datetime(i[4]))) lst(tag.li( compact and format_author(i[3]) or Markup(u'%s by %s %s' % (tag.b('%+i' % i[2]), tag(format_author(i[3])), vote)), title=(compact and '%+i %s' % (i[2], vote) or None))) return lst
def _prepare_attrs(self, req, attr): page = int(req.args.get('page', '1')) # Paginator can't deal with dict, so convert to list. attr_lst = [(k,v) for k,v in attr.iteritems()] max_per_page = as_int(req.args.get('max_per_page'), None) if max_per_page is None: max_per_page = self.ACCTS_PER_PAGE attr = Paginator(attr_lst, page - 1, max_per_page) pagedata = [] shown_pages = attr.get_shown_pages(21) for shown_page in shown_pages: page_href = req.href.admin('accounts', 'users', page=shown_page, max_per_page=max_per_page) pagedata.append([page_href, None, str(shown_page), _("page %(num)s", num=str(shown_page))]) fields = ['href', 'class', 'string', 'title'] attr.shown_pages = [dict(zip(fields, p)) for p in pagedata] attr.current_page = {'href': None, 'class': 'current', 'string': str(attr.page + 1), 'title':None} if attr.has_next_page: next_href = req.href.admin('accounts', 'users', page=page + 1, max_per_page=max_per_page) add_link(req, 'next', next_href, _('Next Page')) if attr.has_previous_page: prev_href = req.href.admin('accounts', 'users', page=page - 1, max_per_page=max_per_page) add_link(req, 'prev', prev_href, _('Previous Page')) page_href = req.href.admin('accounts', 'cleanup') return {'attr': attr, 'page_href': page_href}
def _arg_as_int(val, key=None, min=None, max=None): int_val = as_int(val, None, min=min, max=max) if int_val is None: raise MacroError(tag_("Invalid macro argument %(expr)s", expr=tag.code("%s=%s" % (key, val)) if key else tag.code(val))) return int_val
def _format_comment_link(self, formatter, ns, target, label): resource = None if ':' in target: elts = target.split(':') if len(elts) == 3: cnum, realm, id = elts if cnum != 'description' and cnum and not cnum[0].isdigit(): realm, id, cnum = elts # support old comment: style resource = formatter.resource(realm, id) else: resource = formatter.resource cnum = target if resource and resource.realm == 'ticket': id = as_int(resource.id, None) if id is not None: href = "%s#comment:%s" % (formatter.href.ticket(resource.id), cnum) title = _("Comment %(cnum)s for Ticket #%(id)s", cnum=cnum, id=resource.id) if 'TICKET_VIEW' in formatter.perm(resource): for status, in self.env.db_query( "SELECT status FROM ticket WHERE id=%s", (id,)): return tag.a(label, href=href, title=title, class_=status) return tag.a(label, href=href, title=title) return label
def save(self, req): if req.args and req.args.has_key('action') \ and req.args['action'] == 'save': for key in SESSION_KEYS.values(): if req.args.has_key(key): if key == 'wiki.href': wiki_href = req.args[key] if wiki_href == '': req.session[key] = '' continue validated = WikiSystem(self.env).has_page(wiki_href) if validated: req.session[key] = req.args[key] else: add_warning(req, Markup(tag.span(Markup(_( "%(page)s is not a valid Wiki page", page=tag.b(wiki_href) ))))) elif key == 'tickets.href': ticket_href = req.args[key] if ticket_href == '': req.session[key] = '' continue reports = self.get_report_list() self.log.info('reports: %s' % reports) if ticket_href in ('report', 'query') \ or as_int(ticket_href, 0) in reports: req.session[key] = req.args[key] else: add_warning(req, Markup(tag.span(Markup(_( "%(report)s is not a valid report", report=tag.b(ticket_href) ))))) else: req.session[key] = req.args[key]
def __init__(self, path, log, user=None, password=None, host=None, port=None, params={}): if path.startswith('/'): path = path[1:] if password == None: password = '' if port == None: port = 3306 opts = {} for name, value in params.iteritems(): if name in ('init_command', 'read_default_file', 'read_default_group', 'unix_socket'): opts[name] = value elif name in ('compress', 'named_pipe'): opts[name] = as_int(value, 0) else: self.log.warning("Invalid connection string parameter '%s'", name) cnx = MySQLdb.connect(db=path, user=user, passwd=password, host=host, port=port, charset='utf8', **opts) if hasattr(cnx, 'encoders'): # 'encoders' undocumented but present since 1.2.1 (r422) cnx.encoders[Markup] = cnx.encoders[types.UnicodeType] ConnectionWrapper.__init__(self, cnx, log) self._is_closed = False
def _format_comment_link(self, formatter, ns, target, label): resource = None if ':' in target: elts = target.split(':') if len(elts) == 3: cnum, realm, id = elts if cnum != 'description' and cnum and not cnum[0].isdigit(): realm, id, cnum = elts # support old comment: style resource = formatter.resource(realm, id) else: resource = formatter.resource cnum = target if resource and resource.realm == 'ticket': id = as_int(resource.id, None) if id is not None: href = "%s#comment:%s" % (formatter.href.ticket( resource.id), cnum) title = _("Comment %(cnum)s for Ticket #%(id)s", cnum=cnum, id=resource.id) if 'TICKET_VIEW' in formatter.perm(resource): for status, in self.env.db_query( "SELECT status FROM ticket WHERE id=%s", (id, )): return tag.a(label, href=href, title=title, class_=status) return tag.a(label, href=href, title=title) return label
def __init__(self, path, log, user=None, password=None, host=None, port=None, params={}): if path.startswith("/"): path = path[1:] if password == None: password = "" if port == None: port = 3306 opts = {} for name, value in params.iteritems(): if name in ("init_command", "read_default_file", "read_default_group", "unix_socket"): opts[name] = value elif name in ("compress", "named_pipe"): opts[name] = as_int(value, 0) else: self.log.warning("Invalid connection string parameter '%s'", name) cnx = MySQLdb.connect(db=path, user=user, passwd=password, host=host, port=port, charset="utf8", **opts) if hasattr(cnx, "encoders"): # 'encoders' undocumented but present since 1.2.1 (r422) cnx.encoders[Markup] = cnx.encoders[types.UnicodeType] cursor = cnx.cursor() cursor.execute("SHOW VARIABLES WHERE " " variable_name='character_set_database'") self.charset = cursor.fetchone()[1] if self.charset != "utf8": cnx.query("SET NAMES %s" % self.charset) cnx.store_result() ConnectionWrapper.__init__(self, cnx, log) self._is_closed = False
def _do_remove(self, number): number = as_int(number, None) if number is None: raise AdminCommandError(_("<ticket#> must be a number")) with self.env.db_transaction: model.Ticket(self.env, number).delete() printout(_("Ticket #%(num)s and all associated data removed.", num=number))
def _move_rule(self, arg, req): tokens = [as_int(val, 0) for val in arg.split('-', 1)] if len(tokens) == 2: rule_id, priority = tokens if rule_id > 0 and priority > 0: session = req.session Subscription.move(self.env, rule_id, priority, session.sid, session.authenticated)
def _format_comment_link(self, formatter, ns, target, label): resource = None if ':' in target: elts = target.split(':') if len(elts) == 3: cnum, realm, id = elts if cnum != 'description' and cnum and not cnum[0].isdigit(): realm, id, cnum = elts # support old comment: style id = as_int(id, None) if realm in ('bug', 'issue'): realm = 'ticket' resource = formatter.resource(realm, id) else: resource = formatter.resource cnum = target if resource and resource.id and resource.realm == self.realm and \ cnum and (cnum.isdigit() or cnum == 'description'): href = title = class_ = None if self.resource_exists(resource): from trac.ticket.model import Ticket ticket = Ticket(self.env, resource.id) if cnum != 'description' and not ticket.get_change(cnum): title = _("ticket comment does not exist") class_ = 'missing ticket' elif 'TICKET_VIEW' in formatter.perm(resource): href = formatter.href.ticket(resource.id) + \ "#comment:%s" % cnum if resource.id != formatter.resource.id: summary = self.format_summary(ticket['summary'], ticket['status'], ticket['resolution'], ticket['type']) if cnum == 'description': title = _("Description for #%(id)s: %(summary)s", id=resource.id, summary=summary) else: title = _( "Comment %(cnum)s for #%(id)s: " "%(summary)s", cnum=cnum, id=resource.id, summary=summary) class_ = ticket['status'] + ' ticket' else: title = _("Description") if cnum == 'description' \ else _("Comment %(cnum)s", cnum=cnum) class_ = 'ticket' else: title = _("no permission to view ticket") class_ = 'forbidden ticket' else: title = _("ticket does not exist") class_ = 'missing ticket' return tag.a(label, class_=class_, href=href, title=title) return label
def __init__(self, path, log, user=None, password=None, host=None, port=None, params={}): if path.startswith('/'): path = path[1:] if password is None: password = '' if port is None: port = 3306 opts = {'charset': 'utf8'} for name, value in params.items(): if name == 'read_default_group': opts[name] = value elif name == 'init_command': opts[name] = value elif name in ('read_default_file', 'unix_socket'): opts[name] = value elif name in ('compress', 'named_pipe'): opts[name] = as_int(value, 0) elif name == 'charset': value = value.lower() if value in ('utf8', 'utf8mb4'): opts[name] = value else: self.log.warning( "Invalid connection string parameter " "'%s=%s'", name, value) else: self.log.warning("Invalid connection string parameter '%s'", name) cnx = pymysql.connect(db=path, user=user, passwd=password, host=host, port=port, **opts) cursor = cnx.cursor() cursor.execute("SHOW VARIABLES WHERE " " variable_name='character_set_database'") self.charset = cursor.fetchone()[1] cursor.close() if self.charset != opts['charset']: cnx.close() opts['charset'] = self.charset cnx = pymysql.connect(db=path, user=user, passwd=password, host=host, port=port, **opts) self.schema = path ConnectionWrapper.__init__(self, cnx, log) self._is_closed = False
def backup(self, dest_file): from subprocess import Popen, PIPE db_url = self.env.config.get('trac', 'database') scheme, db_prop = parse_connection_uri(db_url) db_params = db_prop.setdefault('params', {}) db_name = os.path.basename(db_prop['path']) args = [self.mysqldump_path, '--no-defaults'] if 'host' in db_prop: args.extend(['-h', db_prop['host']]) if 'port' in db_prop: args.extend(['-P', str(db_prop['port'])]) if 'user' in db_prop: args.extend(['-u', db_prop['user']]) for name, value in db_params.iteritems(): if name == 'compress' and as_int(value, 0): args.append('--compress') elif name == 'named_pipe' and as_int(value, 0): args.append('--protocol=pipe') elif name == 'read_default_file': # Must be first args.insert(1, '--defaults-file=' + value) elif name == 'unix_socket': args.extend(['--protocol=socket', '--socket=' + value]) elif name not in ('init_command', 'read_default_group'): self.log.warning("Invalid connection string parameter '%s'", name) args.extend(['-r', dest_file, db_name]) environ = os.environ.copy() if 'password' in db_prop: environ['MYSQL_PWD'] = str(db_prop['password']) try: p = Popen(args, env=environ, stderr=PIPE, close_fds=close_fds) except OSError as e: raise TracError( _("Unable to run %(path)s: %(msg)s", path=self.mysqldump_path, msg=exception_to_unicode(e))) errmsg = p.communicate()[1] if p.returncode != 0: raise TracError( _("mysqldump failed: %(msg)s", msg=to_unicode(errmsg.strip()))) if not os.path.exists(dest_file): raise TracError(_("No destination file created")) return dest_file
def backup(self, dest_file): from subprocess import Popen, PIPE db_url = self.env.config.get('trac', 'database') scheme, db_prop = parse_connection_uri(db_url) db_params = db_prop.setdefault('params', {}) db_name = os.path.basename(db_prop['path']) args = [self.mysqldump_path] if 'host' in db_prop: args.extend(['-h', db_prop['host']]) if 'port' in db_prop: args.extend(['-P', str(db_prop['port'])]) if 'user' in db_prop: args.extend(['-u', db_prop['user']]) for name, value in db_params.iteritems(): if name == 'compress' and as_int(value, 0): args.append('--compress') elif name == 'named_pipe' and as_int(value, 0): args.append('--protocol=pipe') elif name == 'read_default_file': # Must be first args.insert(1, '--defaults-file=' + value) elif name == 'unix_socket': args.extend(['--protocol=socket', '--socket=' + value]) elif name not in ('init_command', 'read_default_group'): self.log.warning("Invalid connection string parameter '%s'", name) args.extend(['-r', dest_file, db_name]) environ = os.environ.copy() if 'password' in db_prop: environ['MYSQL_PWD'] = str(db_prop['password']) try: p = Popen(args, env=environ, stderr=PIPE, close_fds=close_fds) except OSError as e: raise TracError(_("Unable to run %(path)s: %(msg)s", path=self.mysqldump_path, msg=exception_to_unicode(e))) errmsg = p.communicate()[1] if p.returncode != 0: raise TracError(_("mysqldump failed: %(msg)s", msg=to_unicode(errmsg.strip()))) if not os.path.exists(dest_file): raise TracError(_("No destination file created")) return dest_file
def _paginate(self, req, results, realms): query = req.args.get('q', None) current_page = as_int(req.args.get('listtagged_page'), 1, min=1) items_per_page = as_int(req.args.get('listtagged_per_page'), self.items_per_page) if items_per_page < 1: items_per_page = self.items_per_page try: result = Paginator(results, current_page - 1, items_per_page) except (AssertionError, TracError) as e: # AssertionError raised in Trac < 1.0.10, TracError otherwise self.log.warn("ListTagged macro: %s", e) current_page = 1 result = Paginator(results, current_page - 1, items_per_page) pagedata = [] shown_pages = result.get_shown_pages(21) for page in shown_pages: page_href = self.get_href(req, realms, query, items_per_page, page) pagedata.append( [page_href, None, str(page), _("Page %(num)d", num=page)]) attributes = ['href', 'class', 'string', 'title'] result.shown_pages = [dict(zip(attributes, p)) for p in pagedata] result.current_page = { 'href': None, 'class': 'current', 'string': str(result.page + 1), 'title': None } if result.has_next_page: next_href = self.get_href(req, realms, query, items_per_page, current_page + 1) add_link(req, 'next', next_href, _('Next Page')) if result.has_previous_page: prev_href = self.get_href(req, realms, query, items_per_page, current_page - 1) add_link(req, 'prev', prev_href, _('Previous Page')) return result
def render_cloud(self, req, cloud, renderer=None, caseless_sort=False, mincount=None, realms=()): """Render a tag cloud. :cloud: Dictionary of {object: count} representing the cloud. :param renderer: A callable with signature (tag, count, percent) used to render the cloud objects. :param caseless_sort: Boolean, whether tag cloud should be sorted case-sensitive. :param mincount: Integer threshold to hide tags with smaller count. """ min_px = 10.0 max_px = 30.0 scale = 1.0 if renderer is None: def default_renderer(tag, count, percent): href = self.get_href(req, realms, tag=Resource('tag', tag)) return builder.a(tag, rel='tag', title='%i' % count, href=href, style='font-size: %ipx' % int(min_px + percent * (max_px - min_px))) renderer = default_renderer # A LUT from count to n/len(cloud) size_lut = dict([(c, float(i)) for i, c in enumerate(sorted(set([r for r in cloud.values()])))]) if size_lut: scale = 1.0 / len(size_lut) if caseless_sort: # Preserve upper-case precedence within similar tags. items = reversed(sorted(cloud.iteritems(), key=lambda t: t[0].lower(), reverse=True)) else: items = sorted(cloud.iteritems()) ul = li = None for i, (tag, count) in enumerate(items): percent = size_lut[count] * scale if mincount and count < as_int(mincount, 1): # Tag count is too low. continue if ul: # Found new tag for cloud; now add previously prepared one. ul('\n', li) else: # Found first tag for cloud; now create the list. ul = builder.ul(class_='tagcloud') # Prepare current tag entry. li = builder.li(renderer(tag, count, percent)) if li: # All tags checked; mark latest tag as last one (no tailing colon). li(class_='last') ul('\n', li, '\n') return ul and ul or _("No tags found")
def _handle_partial_source(self, src, start, end): # we want to only show a certain number of lines, so we # break the source into lines and set our numbers for 1-based # line numbering. lines = src.split('\n') linecount = len(lines) start_regex = False start = as_int(start, start) if isinstance(start, basestring): start = start.strip("'\"") start_regex = True match = re.search(start, src, re.M) if match: start = src.count('\n', 0, match.start()) else: raise ParseError('start regexp "%s" does not match any text' % start) if end: end = as_int(end, end) if isinstance(end, basestring): end = end.strip("'\"") src2 = '\n'.join(lines[start+1:linecount]) match = re.search(end, src2, re.M) if match: end = start + src2.count('\n', 0, match.start()) else: raise ParseError('end regexp "%s" does not match any text' % end) else: if start_regex: end += start else: end = linecount src = lines[start - 1:end] # calculate actual startline for display purposes if start < 0: start = linecount + start return '\n'.join(src), start, end
def test_as_int(self): self.assertEqual(1, util.as_int('1')) self.assertEqual(1, util.as_int('1', None)) self.assertIsNone(util.as_int('A', None)) self.assertEqual(2, util.as_int('A', 2)) self.assertEqual(2, util.as_int('1', None, min=2)) self.assertEqual(0, util.as_int('1', None, max=0))
def report_href(**kwargs): """Generate links to this report preserving user variables, and sorting and paging variables. """ params = args.copy() if sort_col: params['sort'] = sort_col params['page'] = page if max: params['max'] = max params.update(kwargs) params['asc'] = as_int(params.get('asc'), asc, min=0, max=1) return req.href.report(id, params)
def index(self, *args): template = 'qa_tc_index.html' data = {} page = int(self.req.args.get('page', '1')) testsuite_id = int(self.req.args.get('testsuite_id', '0')) default_max = 50 max = self.req.args.get('max') limit = as_int(max, default_max, min=0) offset = (page - 1) * limit data = { 'title': 'Test Cases', 'description': 'Some description', 'max': limit, 'args': args, 'show_args_form': False, 'message': None, 'paginator': None, 'ts_list': [], 'testsuite_id': testsuite_id } data['ts_list'] = TestSuite.find(self.env, fields=['title']) results, num_items = get_testcases_grouped(self.env, testsuite_id, limit=limit, offset=offset) data['tc_list'] = results if limit > 0: data['paginator'] = self.build_paginator([], page, limit, num_items, 'testcase/index') #try: # self.req.session['query_href'] = \ # self.req.session['query_href'] = report_href() # for var in ('query_constraints', 'query_time'): # if var in self.req.session: # del self.req.session[var] #except (ValueError, KeyError): # pass #if set(data['args']) - set(['USER']): # data['show_args_form'] = True # add_script(self.req, 'common/js/folding.js') # if missing_args: # add_warning(self.req, _( # 'The following arguments are missing: %(args)s', # args=", ".join(missing_args))) return 'qa_tc_index.html', data
def _format_comment_link(self, formatter, ns, target, label): resource = None if ":" in target: elts = target.split(":") if len(elts) == 3: cnum, realm, id = elts if cnum != "description" and cnum and not cnum[0].isdigit(): realm, id, cnum = elts # support old comment: style id = as_int(id, None) resource = formatter.resource(realm, id) else: resource = formatter.resource cnum = target if ( resource and resource.id and resource.realm == "ticket" and cnum and (all(c.isdigit() for c in cnum) or cnum == "description") ): href = title = class_ = None if self.resource_exists(resource): from trac.ticket.model import Ticket ticket = Ticket(self.env, resource.id) if cnum != "description" and not ticket.get_change(cnum): title = _("ticket comment does not exist") class_ = "missing ticket" elif "TICKET_VIEW" in formatter.perm(resource): href = formatter.href.ticket(resource.id) + "#comment:%s" % cnum if resource.id != formatter.resource.id: if cnum == "description": title = _("Description for Ticket #%(id)s", id=resource.id) else: title = _("Comment %(cnum)s for Ticket #%(id)s", cnum=cnum, id=resource.id) class_ = ticket["status"] + " ticket" else: title = _("Description") if cnum == "description" else _("Comment %(cnum)s", cnum=cnum) class_ = "ticket" else: title = _("no permission to view ticket") class_ = "forbidden ticket" else: title = _("ticket does not exist") class_ = "missing ticket" return tag.a(label, class_=class_, href=href, title=title) return label
def as_int(self, key, default=None, min=None, max=None): """Return the value as an integer. Return `default` if if an exception is raised while converting the value to an integer. :param key: the name of the session attribute :keyword default: the value to return if the parameter does not exist or an exception occurs converting the value to an integer. :keyword min: lower bound to which the value is limited :keyword max: upper bound to which the value is limited :since: 1.2 """ if key not in self: return default return as_int(self[key], default, min, max)
def as_int(self, name, default=None, min=None, max=None): """Return the value as an integer. Return `default` if if an exception is raised while converting the value to an integer. :param name: the name of the request parameter :keyword default: the value to return if the parameter is not specified or an exception occurs converting the value to an integer. :keyword min: lower bound to which the value is limited :keyword max: upper bound to which the value is limited :since: 1.2 """ if name not in self: return default return as_int(self.getfirst(name), default, min, max)
def show(self, id): template = 'qa_tp_view.html' page = int(self.req.args.get('page', '1')) default_max = 50 max = self.req.args.get('max') limit = as_int(max, default_max, min=0) offset = (page - 1) * limit data = {} data['tp'] = TestPlan.findone(self.env, id, fields=['title', 'description']) data['ts_list'] = TestSuite.find(self.env, fields=['title']) testcases, count = get_testcases_by_testplan(self.env, id, limit, offset) data['tc_groups'] = testcases data['paginator'] = self.build_paginator( [], page, limit, count, '%s/%s' % ('testplan/show', id)) return (template, data)
def show(self, id): template = 'qa_tr_view.html' page = int(self.req.args.get('page', '1')) default_max = 50 max = self.req.args.get('max') limit = as_int(max, default_max, min=0) offset = (page - 1) * limit data = {} testrun = TestRun.findone( self.env, id, fields=['title', 'description', 'testplan_id']) qacore = QaCore(self.env) data['tr'] = testrun data['author_list'] = qacore.get_qa_authors() tc_list, total = get_testcases_by_testrun(self.env, testrun.id, limit, offset) data['tc_list'] = tc_list data['paginator'] = self.build_paginator( [], page, limit, total, '%s/%s' % ('testrun/show', id)) return (template, data)
def _ticket_last_comment(self, ticket): cnum = -1 for entry in ticket.get_changelog(): (time, author, field, oldvalue, newvalue, permanent) = entry if field != 'comment': continue n = as_int(oldvalue, None) if n is None: continue if cnum < n: cnum = n if cnum == -1: return None else: return cnum
def getint(self, name, default=None, min=None, max=None): """Return the value as an integer. Raise an `HTTPBadRequest` exception if an exception occurs while converting the value to an integer. :param name: the name of the request parameter :keyword default: the value to return if the parameter is not specified :keyword min: lower bound to which the value is limited :keyword max: upper bound to which the value is limited :since: 1.2 """ if name not in self: return default value = as_int(self[name], None, min, max) if value is None: raise HTTPBadRequest(tag_("Invalid value for request argument " "%(name)s.", name=tag.em(name))) return value
def show(self, id): template = 'qa_ts_view.html' data = {'paginator': None} page = int(self.req.args.get('page', '1')) default_max = 50 max = self.req.args.get('max') limit = as_int(max, default_max, min=0) offset = (page - 1) * limit data['ts'] = TestSuite.findone(self.env, id, fields=['title', 'description']) results, total = TestCase.findpaged(self.env, limit=limit, offset=offset, fields=['title'], testsuite_id=id) data['tc_list'] = results if limit > 0: data['paginator'] = self.build_paginator( [], page, limit, total, '%s/%s' % ('testsuite/show', id)) return (template, data)
def _render_confirm_delete(self, req, page): if page.readonly: req.perm(page.resource).require('WIKI_ADMIN') else: req.perm(page.resource).require('WIKI_DELETE') version = None if 'delete_version' in req.args: version = int(req.args.get('version', 0)) old_version = as_int(req.args.get('old_version'), version) what = 'multiple' if version and old_version \ and version - old_version > 1 \ else 'single' if version else 'page' num_versions = 0 new_date = None old_date = None for v, t, author, comment, ipnr in page.get_history(): if (v <= version or what == 'page') and new_date is None: new_date = t if (v <= old_version and what == 'multiple' or num_versions > 1 and what == 'single'): break num_versions += 1 old_date = t data = self._page_data(req, page, 'delete') data.update({ 'what': what, 'new_version': None, 'old_version': None, 'num_versions': num_versions, 'new_date': new_date, 'old_date': old_date }) if version is not None: data.update({'new_version': version, 'old_version': old_version}) self._wiki_ctxtnav(req, page) return 'wiki_delete.html', data, None
def process_request(self, req): # Allow all POST requests (with a valid __FORM_TOKEN, ensuring that # the client has at least some permission). Additionally, allow GET # requests from TRAC_ADMIN for testing purposes. if req.method != 'POST': req.perm.require('TRAC_ADMIN') realm = req.args.get('realm', 'wiki') id = req.args.get('id') version = as_int(req.args.get('version'), None) text = req.args.get('text', '') flavor = req.args.get('flavor') options = {} if 'escape_newlines' in req.args: options['escape_newlines'] = bool(int(req.args['escape_newlines'] or 0)) if 'shorten' in req.args: options['shorten'] = bool(int(req.args['shorten'] or 0)) resource = Resource(realm, id=id, version=version) context = web_context(req, resource) rendered = format_to(self.env, flavor, context, text, **options) req.send(rendered.encode('utf-8'))
def process_request(self, req): # Allow all POST requests (with a valid __FORM_TOKEN, ensuring that # the client has at least some permission). Additionally, allow GET # requests from TRAC_ADMIN for testing purposes. if req.method != 'POST': req.perm.require('TRAC_ADMIN') realm = req.args.get('realm', 'wiki') id = req.args.get('id') version = as_int(req.args.get('version'), None) text = req.args.get('text', '') flavor = req.args.get('flavor') options = {} if 'escape_newlines' in req.args: options['escape_newlines'] = bool( int(req.args['escape_newlines'] or 0)) if 'shorten' in req.args: options['shorten'] = bool(int(req.args['shorten'] or 0)) resource = Resource(realm, id=id, version=version) context = web_context(req, resource) rendered = format_to(self.env, flavor, context, text, **options) req.send(rendered.encode('utf-8'))
def del_user_attribute(env, username=None, authenticated=1, attribute=None): """Delete one or more Trac user attributes for one or more users.""" columns = [] constraints = [] if username is not None: columns.append('sid') constraints.append(username) if authenticated is not None: columns.append('authenticated') constraints.append(as_int(authenticated, 0, min=0, max=1)) if attribute is not None: columns.append('name') constraints.append(attribute) if len(columns) > 0: where_stmt = ''.join(['WHERE ', '=%s AND '.join(columns), '=%s']) else: where_stmt = '' sql = "DELETE FROM session_attribute %s" % where_stmt sql_args = tuple(constraints) env.db_transaction(sql, sql_args) if hasattr(env, 'invalidate_known_users_cache'): env.invalidate_known_users_cache()
def __init__(self, path, log, user=None, password=None, host=None, port=None, params={}): if path.startswith('/'): path = path[1:] if password == None: password = '' if port == None: port = 3306 opts = {} for name, value in params.iteritems(): key = name.encode('utf-8') if name == 'read_default_group': opts[key] = value elif name == 'init_command': opts[key] = value.encode('utf-8') elif name in ('read_default_file', 'unix_socket'): opts[key] = value.encode(sys.getfilesystemencoding()) elif name in ('compress', 'named_pipe'): opts[key] = as_int(value, 0) else: self.log.warning("Invalid connection string parameter '%s'", name) cnx = MySQLdb.connect(db=path, user=user, passwd=password, host=host, port=port, charset='utf8', **opts) self.schema = path if hasattr(cnx, 'encoders'): # 'encoders' undocumented but present since 1.2.1 (r422) cnx.encoders[Markup] = cnx.encoders[types.UnicodeType] cursor = cnx.cursor() cursor.execute("SHOW VARIABLES WHERE " " variable_name='character_set_database'") self.charset = cursor.fetchone()[1] if self.charset != 'utf8': cnx.query("SET NAMES %s" % self.charset) cnx.store_result() ConnectionWrapper.__init__(self, cnx, log) self._is_closed = False
def render_grid(self, req): """Retrieve the droplets and pre-process them for rendering.""" self.log.debug('Rendering grid..') index = self.grid_index columns = self.fields.get_list('grid_columns') format = req.args.get('format') resource = Resource('cloud', self.name) context = Context.from_request(req, resource) page = int(req.args.get('page', '1')) default_max = {'rss': self.items_per_page_rss, 'csv': 0, 'tab': 0}.get(format, self.items_per_page) max = req.args.get('max') query = req.args.get('query') groupby = req.args.get('groupby', self.grid_group) groupby_fields = [(field.label,field.name) for field in columns] limit = as_int(max, default_max, min=0) # explicit max takes precedence offset = (page - 1) * limit # explicit sort takes precedence over config sort = groupby or req.args.get('sort', self.grid_sort) asc = req.args.get('asc', self.grid_asc) asc = bool(int(asc)) # string '0' or '1' to int/boolean def droplet_href(**kwargs): """Generate links to this cloud droplet preserving user variables, and sorting and paging variables. """ params = {} if sort: params['sort'] = sort params['page'] = page if max: params['max'] = max if query: params['query'] = query if groupby: params['groupby'] = groupby params.update(kwargs) params['asc'] = params.get('asc', asc) and '1' or '0' return req.href.cloud(self.name, params) data = {'action': 'view', 'buttons': [], 'resource': resource, 'context': context, 'title': self.title, 'description': self.description, 'label': self.label, 'columns': columns, 'id_field': self.id_field, 'max': limit, 'query': query, 'groupby': groupby, 'groupby_fields': [('','')] + groupby_fields, 'message': None, 'paginator': None, 'droplet_href': droplet_href, } try: self.log.debug('About to search chef..') sort_ = sort.strip('_') # handle dynamic attributes rows,total = self.chefapi.search(index, sort_, asc, limit, offset, query or '*:*') numrows = len(rows) self.log.debug('Chef search returned %s rows' % numrows) except Exception: import traceback; msg = "Oops...\n" + traceback.format_exc()+"\n" data['message'] = _(to_unicode(msg)) self.log.debug(data['message']) return 'droplet_grid.html', data, None paginator = None if limit > 0: paginator = Paginator(rows, page - 1, limit, total) data['paginator'] = paginator if paginator.has_next_page: add_link(req, 'next', droplet_href(page=page + 1), _('Next Page')) if paginator.has_previous_page: add_link(req, 'prev', droplet_href(page=page - 1), _('Previous Page')) pagedata = [] shown_pages = paginator.get_shown_pages(21) for p in shown_pages: pagedata.append([droplet_href(page=p), None, str(p), _('Page %(num)d', num=p)]) fields = ['href', 'class', 'string', 'title'] paginator.shown_pages = [dict(zip(fields, p)) for p in pagedata] paginator.current_page = {'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title': None} numrows = paginator.num_items # Place retrieved columns in groups, according to naming conventions # * _col_ means fullrow, i.e. a group with one header # * col_ means finish the current group and start a new one header_groups = [[]] for field in columns: header = { 'col': field.name, 'title': field.label, 'hidden': False, 'asc': None, } if field.name == sort: header['asc'] = asc header_group = header_groups[-1] header_group.append(header) # Structure the rows and cells: # - group rows according to __group__ value, if defined # - group cells the same way headers are grouped row_groups = [] authorized_results = [] prev_group_value = None for row_idx, item in enumerate(rows): col_idx = 0 cell_groups = [] row = {'cell_groups': cell_groups} for header_group in header_groups: cell_group = [] for header in header_group: col = header['col'] field = self.fields[col] value = field.get(item, req) cell = {'value': value, 'header': header, 'index': col_idx} col_idx += 1 # Detect and create new group if col == groupby and value != prev_group_value: prev_group_value = value row_groups.append( (value,[]) ) # Other row properties row['__idx__'] = row_idx if col == self.id_field: row['id'] = value cell_group.append(cell) cell_groups.append(cell_group) resource = Resource('cloud', '%s/%s' % (self.name,row['id'])) if 'CLOUD_VIEW' not in req.perm(resource): continue authorized_results.append(item) row['resource'] = resource if row_groups: row_group = row_groups[-1][1] else: row_group = [] row_groups = [(None, row_group)] row_group.append(row) data.update({'header_groups': header_groups, 'row_groups': row_groups, 'numrows': numrows, 'sorting_enabled': len(row_groups) == 1}) # FIXME: implement formats # if format == 'rss': # data['email_map'] = Chrome(self.env).get_email_map() # data['context'] = Context.from_request(req, report_resource, # absurls=True) # return 'report.rss', data, 'application/rss+xml' # elif format == 'csv': # filename = id and 'report_%s.csv' % id or 'report.csv' # self._send_csv(req, cols, authorized_results, mimetype='text/csv', # filename=filename) # elif format == 'tab': # filename = id and 'report_%s.tsv' % id or 'report.tsv' # self._send_csv(req, cols, authorized_results, '\t', # mimetype='text/tab-separated-values', # filename=filename) # else: page = max is not None and page or None add_link(req, 'alternate', droplet_href(format='rss', page=None), _('RSS Feed'), 'application/rss+xml', 'rss') add_link(req, 'alternate', droplet_href(format='csv', page=page), _('Comma-delimited Text'), 'text/plain') add_link(req, 'alternate', droplet_href(format='tab', page=page), _('Tab-delimited Text'), 'text/plain') self.log.debug('Rendered grid') return 'droplet_grid.html', data, None
def findreasonvalue(self, name): for r in self.reasons: if r[0] == name: return as_int(r[1], 0) return 0
def _render_view(self, req, id): """Retrieve the report results and pre-process them for rendering.""" title, description, sql = self.get_report(id) try: args = self.get_var_args(req) except ValueError as e: raise TracError(_("Report failed: %(error)s", error=e)) # If this is a saved custom query, redirect to the query module # # A saved query is either an URL query (?... or query:?...), # or a query language expression (query:...). # # It may eventually contain newlines, for increased clarity. # query = ''.join([line.strip() for line in sql.splitlines()]) if query and (query[0] == '?' or query.startswith('query:?')): query = query if query[0] == '?' else query[6:] report_id = 'report=%s' % id if 'report=' in query: if not report_id in query: err = _('When specified, the report number should be ' '"%(num)s".', num=id) req.redirect(req.href.report(id, action='edit', error=err)) else: if query[-1] != '?': query += '&' query += report_id req.redirect(req.href.query() + quote_query_string(query)) elif query.startswith('query:'): try: from trac.ticket.query import Query, QuerySyntaxError query = Query.from_string(self.env, query[6:], report=id) req.redirect(query.get_href(req.href)) except QuerySyntaxError as e: req.redirect(req.href.report(id, action='edit', error=to_unicode(e))) format = req.args.get('format') if format == 'sql': self._send_sql(req, id, title, description, sql) title = '{%i} %s' % (id, title) report_resource = Resource('report', id) req.perm(report_resource).require('REPORT_VIEW') context = web_context(req, report_resource) page = int(req.args.get('page', '1')) default_max = {'rss': self.items_per_page_rss, 'csv': 0, 'tab': 0}.get(format, self.items_per_page) max = req.args.get('max') limit = as_int(max, default_max, min=0) # explict max takes precedence offset = (page - 1) * limit sort_col = req.args.get('sort', '') asc = req.args.get('asc', 1) asc = bool(int(asc)) # string '0' or '1' to int/boolean def report_href(**kwargs): """Generate links to this report preserving user variables, and sorting and paging variables. """ params = args.copy() if sort_col: params['sort'] = sort_col params['page'] = page if max: params['max'] = max params.update(kwargs) params['asc'] = '1' if params.get('asc', asc) else '0' return req.href.report(id, params) data = {'action': 'view', 'report': {'id': id, 'resource': report_resource}, 'context': context, 'title': sub_vars(title, args), 'description': sub_vars(description or '', args), 'max': limit, 'args': args, 'show_args_form': False, 'message': None, 'paginator': None, 'report_href': report_href, } res = self.execute_paginated_report(req, id, sql, args, limit, offset) if len(res) == 2: e, sql = res data['message'] = \ tag_("Report execution failed: %(error)s %(sql)s", error=tag.pre(exception_to_unicode(e)), sql=tag(tag.hr(), tag.pre(sql, style="white-space: pre"))) return 'report_view.html', data, None cols, results, num_items, missing_args, limit_offset = res need_paginator = limit > 0 and limit_offset need_reorder = limit_offset is None results = [list(row) for row in results] numrows = len(results) paginator = None if need_paginator: paginator = Paginator(results, page - 1, limit, num_items) data['paginator'] = paginator if paginator.has_next_page: add_link(req, 'next', report_href(page=page + 1), _('Next Page')) if paginator.has_previous_page: add_link(req, 'prev', report_href(page=page - 1), _('Previous Page')) pagedata = [] shown_pages = paginator.get_shown_pages(21) for p in shown_pages: pagedata.append([report_href(page=p), None, str(p), _('Page %(num)d', num=p)]) fields = ['href', 'class', 'string', 'title'] paginator.shown_pages = [dict(zip(fields, p)) for p in pagedata] paginator.current_page = {'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title': None} numrows = paginator.num_items # Place retrieved columns in groups, according to naming conventions # * _col_ means fullrow, i.e. a group with one header # * col_ means finish the current group and start a new one field_labels = TicketSystem(self.env).get_ticket_field_labels() header_groups = [[]] for idx, col in enumerate(cols): if col in field_labels: title = field_labels[col] else: title = col.strip('_').capitalize() header = { 'col': col, 'title': title, 'hidden': False, 'asc': None, } if col == sort_col: header['asc'] = asc if not paginator and need_reorder: # this dict will have enum values for sorting # and will be used in sortkey(), if non-empty: sort_values = {} if sort_col in ('status', 'resolution', 'priority', 'severity'): # must fetch sort values for that columns # instead of comparing them as strings with self.env.db_query as db: for name, value in db( "SELECT name, %s FROM enum WHERE type=%%s" % db.cast('value', 'int'), (sort_col,)): sort_values[name] = value def sortkey(row): val = row[idx] # check if we have sort_values, then use them as keys. if sort_values: return sort_values.get(val) # otherwise, continue with string comparison: if isinstance(val, basestring): val = val.lower() return val results = sorted(results, key=sortkey, reverse=(not asc)) header_group = header_groups[-1] if col.startswith('__') and col.endswith('__'): # __col__ header['hidden'] = True elif col[0] == '_' and col[-1] == '_': # _col_ header_group = [] header_groups.append(header_group) header_groups.append([]) elif col[0] == '_': # _col header['hidden'] = True elif col[-1] == '_': # col_ header_groups.append([]) header_group.append(header) # Structure the rows and cells: # - group rows according to __group__ value, if defined # - group cells the same way headers are grouped chrome = Chrome(self.env) row_groups = [] authorized_results = [] prev_group_value = None for row_idx, result in enumerate(results): col_idx = 0 cell_groups = [] row = {'cell_groups': cell_groups} realm = self.realm parent_realm = '' parent_id = '' email_cells = [] for header_group in header_groups: cell_group = [] for header in header_group: value = cell_value(result[col_idx]) cell = {'value': value, 'header': header, 'index': col_idx} col = header['col'] col_idx += 1 # Detect and create new group if col == '__group__' and value != prev_group_value: prev_group_value = value # Brute force handling of email in group by header row_groups.append( (value and chrome.format_author(req, value), [])) # Other row properties row['__idx__'] = row_idx if col in self._html_cols: row[col] = value if col in ('report', 'ticket', 'id', '_id'): row['id'] = value # Special casing based on column name col = col.strip('_') if col in ('reporter', 'cc', 'owner'): email_cells.append(cell) elif col == 'realm': realm = value elif col == 'parent_realm': parent_realm = value elif col == 'parent_id': parent_id = value cell_group.append(cell) cell_groups.append(cell_group) if parent_realm: resource = Resource(realm, row.get('id'), parent=Resource(parent_realm, parent_id)) else: resource = Resource(realm, row.get('id')) # FIXME: for now, we still need to hardcode the realm in the action if resource.realm.upper()+'_VIEW' not in req.perm(resource): continue authorized_results.append(result) if email_cells: for cell in email_cells: emails = chrome.format_emails(context.child(resource), cell['value']) result[cell['index']] = cell['value'] = emails row['resource'] = resource if row_groups: row_group = row_groups[-1][1] else: row_group = [] row_groups = [(None, row_group)] row_group.append(row) data.update({'header_groups': header_groups, 'row_groups': row_groups, 'numrows': numrows}) if format == 'rss': data['context'] = web_context(req, report_resource, absurls=True) return 'report.rss', data, 'application/rss+xml' elif format == 'csv': filename = 'report_%s.csv' % id if id else 'report.csv' self._send_csv(req, cols, authorized_results, mimetype='text/csv', filename=filename) elif format == 'tab': filename = 'report_%s.tsv' % id if id else 'report.tsv' self._send_csv(req, cols, authorized_results, '\t', mimetype='text/tab-separated-values', filename=filename) else: p = page if max is not None else None add_link(req, 'alternate', auth_link(req, report_href(format='rss', page=None)), _('RSS Feed'), 'application/rss+xml', 'rss') add_link(req, 'alternate', report_href(format='csv', page=p), _('Comma-delimited Text'), 'text/plain') add_link(req, 'alternate', report_href(format='tab', page=p), _('Tab-delimited Text'), 'text/plain') if 'REPORT_SQL_VIEW' in req.perm('report', id): add_link(req, 'alternate', req.href.report(id=id, format='sql'), _('SQL Query'), 'text/plain') # reuse the session vars of the query module so that # the query navigation links on the ticket can be used to # navigate report results as well try: req.session['query_tickets'] = \ ' '.join([str(int(row['id'])) for rg in row_groups for row in rg[1]]) req.session['query_href'] = \ req.session['query_href'] = report_href() # Kludge: we have to clear the other query session # variables, but only if the above succeeded for var in ('query_constraints', 'query_time'): if var in req.session: del req.session[var] except (ValueError, KeyError): pass if set(data['args']) - set(['USER']): data['show_args_form'] = True add_script(req, 'common/js/folding.js') if missing_args: add_warning(req, _( 'The following arguments are missing: %(args)s', args=", ".join(missing_args))) return 'report_view.html', data, None
def process_request(self, req): realm = req.args['realm'] id_ = req.args['id'] if not which(self.dot_path): raise TracError(_("Path to dot executable is invalid: %(path)s", path=self.dot_path)) # Urls to generate the depgraph for a ticket is /depgraph/ticketnum # Urls to generate the depgraph for a milestone is # /depgraph/milestone/milestone_name # List of tickets to generate the depgraph. if realm == 'milestone': # We need to query the list of tickets in the milestone query = Query(self.env, constraints={'milestone': [id_]}, max=0) tkt_ids = [fields['id'] for fields in query.execute(req)] else: tid = as_int(id_, None) if tid is None: raise TracError(tag_("%(id)s is not a valid ticket id.", id=html.tt(id_))) tkt_ids = [tid] # The summary argument defines whether we place the ticket id or # its summary in the node's label label_summary = 0 if 'summary' in req.args: label_summary = int(req.args.get('summary')) g = self._build_graph(req, tkt_ids, label_summary=label_summary) if req.path_info.endswith('/depgraph.png') or 'format' in req.args: format_ = req.args.get('format') if format_ == 'text': # In case g.__str__ returns unicode, convert it in ascii req.send(to_unicode(g).encode('ascii', 'replace'), 'text/plain') elif format_ == 'debug': import pprint req.send( pprint.pformat( [TicketLinks(self.env, tkt_id) for tkt_id in tkt_ids] ), 'text/plain') elif format_ is not None: if format_ in self.acceptable_formats: req.send(g.render(self.dot_path, format_), 'text/plain') else: raise TracError(_("The %(format)s format is not allowed.", format=format_)) if self.use_gs: ps = g.render(self.dot_path, 'ps2') gs = subprocess.Popen( [self.gs_path, '-q', '-dTextAlphaBits=4', '-dGraphicsAlphaBits=4', '-sDEVICE=png16m', '-sOutputFile=%stdout%', '-'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) img, err = gs.communicate(ps) if err: self.log.debug('MasterTickets: Error from gs: %s', err) else: img = g.render(self.dot_path) req.send(img, 'image/png') else: data = {} # Add a context link to enable/disable labels in nodes. if label_summary: add_ctxtnav(req, 'Without labels', req.href(req.path_info, summary=0)) else: add_ctxtnav(req, 'With labels', req.href(req.path_info, summary=1)) if realm == 'milestone': add_ctxtnav(req, 'Back to Milestone: %s' % id_, req.href.milestone(id_)) data['milestone'] = id_ else: data['ticket'] = id_ add_ctxtnav(req, 'Back to Ticket #%s' % id_, req.href.ticket(id_)) data['graph'] = g data['graph_render'] = functools.partial(g.render, self.dot_path) data['use_gs'] = self.use_gs return 'depgraph.html', data, None
authenticated = req.authname and req.authname != 'anonymous' rejected = score < self.min_karma if not self.nolog_obvious or not obvious or authenticated or not rejected: LogEntry(self.env, time.time(), req.path_info, author, authenticated, ip, headers, content, rejected, score, reasons, [req.path_info, req.args]).insert() else: self.log.debug('Skip %s %s %d', author, req.remote_addr, score) type = "spam" if rejected else "ham" status = "delete" for strategy in self.strategies: count = 0 name = get_strategy_name(strategy) for r in reasons: if r[0] == name: count = as_int(r[1], 0) break if count: spamstatus = count < 0 self._record_action(status, type, ("ok" if spamstatus == rejected else "error"), strategy, 0) else: self._record_action(status, type, '', strategy, 0) self._record_action(status, type, '', '', 0) LogEntry.purge(self.env, self.purge_age) if score < self.min_karma: self.log.debug('Rejecting submission %r by "%s" (%r) because it ' 'earned only %d karma points (%d are required).', abbrev, author, req.remote_addr, score, self.min_karma) rejects = []
def process_request(self, req): req.perm('timeline').require('TIMELINE_VIEW') format = req.args.get('format') maxrows = int(req.args.get('max', 50 if format == 'rss' else 0)) lastvisit = int(req.session.get('timeline.lastvisit', '0')) # indication of new events is unchanged when form is updated by user revisit = any(a in req.args for a in ['update', 'from', 'daysback', 'author']) if revisit: lastvisit = int(req.session.get('timeline.nextlastvisit', lastvisit)) # Parse the from date and adjust the timestamp to the last second of # the day fromdate = today = datetime.now(req.tz) yesterday = to_datetime(today.replace(tzinfo=None) - timedelta(days=1), req.tz) precisedate = precision = None if 'from' in req.args: # Acquire from date only from non-blank input reqfromdate = req.args['from'].strip() if reqfromdate: try: precisedate = user_time(req, parse_date, reqfromdate) except TracError as e: add_warning(req, e) else: fromdate = precisedate.astimezone(req.tz) precision = req.args.get('precision', '') if precision.startswith('second'): precision = timedelta(seconds=1) elif precision.startswith('minute'): precision = timedelta(minutes=1) elif precision.startswith('hour'): precision = timedelta(hours=1) else: precision = None fromdate = to_datetime(datetime(fromdate.year, fromdate.month, fromdate.day, 23, 59, 59, 999999), req.tz) daysback = as_int(req.args.get('daysback'), 90 if format == 'rss' else None) if daysback is None: daysback = as_int(req.session.get('timeline.daysback'), None) if daysback is None: daysback = self.default_daysback daysback = max(0, daysback) if self.max_daysback >= 0: daysback = min(self.max_daysback, daysback) authors = req.args.get('authors') if authors is None and format != 'rss': authors = req.session.get('timeline.authors') authors = (authors or '').strip() data = {'fromdate': fromdate, 'daysback': daysback, 'authors': authors, 'today': user_time(req, format_date, today), 'yesterday': user_time(req, format_date, yesterday), 'precisedate': precisedate, 'precision': precision, 'events': [], 'filters': [], 'abbreviated_messages': self.abbreviated_messages, 'lastvisit': lastvisit} available_filters = [] for event_provider in self.event_providers: available_filters += event_provider.get_timeline_filters(req) or [] # check the request or session for enabled filters, or use default filters = [f[0] for f in available_filters if f[0] in req.args] if not filters and format != 'rss': filters = [f[0] for f in available_filters if req.session.get('timeline.filter.' + f[0]) == '1'] if not filters: filters = [f[0] for f in available_filters if len(f) == 2 or f[2]] # save the results of submitting the timeline form to the session if 'update' in req.args: for filter_ in available_filters: key = 'timeline.filter.%s' % filter_[0] if filter_[0] in req.args: req.session[key] = '1' elif key in req.session: del req.session[key] stop = fromdate start = to_datetime(stop.replace(tzinfo=None) - timedelta(days=daysback + 1), req.tz) # create author include and exclude sets include = set() exclude = set() for match in self._authors_pattern.finditer(authors): name = (match.group(2) or match.group(3) or match.group(4)).lower() if match.group(1): exclude.add(name) else: include.add(name) # gather all events for the given period of time events = [] for provider in self.event_providers: try: for event in provider.get_timeline_events(req, start, stop, filters) or []: author = (event[2] or '').lower() if (not include or author in include) \ and author not in exclude: events.append(self._event_data(provider, event)) except Exception as e: # cope with a failure of that provider self._provider_failure(e, req, provider, filters, [f[0] for f in available_filters]) # prepare sorted global list events = sorted(events, key=lambda e: e['date'], reverse=True) if maxrows: events = events[:maxrows] data['events'] = events if format == 'rss': rss_context = web_context(req, absurls=True) rss_context.set_hints(wiki_flavor='html', shorten_lines=False) data['context'] = rss_context return 'timeline.rss', data, 'application/rss+xml' else: req.session.set('timeline.daysback', daysback, self.default_daysback) req.session.set('timeline.authors', authors, '') # store lastvisit if events and not revisit: lastviewed = to_utimestamp(events[0]['date']) req.session['timeline.lastvisit'] = max(lastvisit, lastviewed) req.session['timeline.nextlastvisit'] = lastvisit html_context = web_context(req) html_context.set_hints(wiki_flavor='oneliner', shorten_lines=self.abbreviated_messages) data['context'] = html_context add_stylesheet(req, 'common/css/timeline.css') rss_href = req.href.timeline([(f, 'on') for f in filters], daysback=90, max=50, authors=authors, format='rss') add_link(req, 'alternate', auth_link(req, rss_href), _('RSS Feed'), 'application/rss+xml', 'rss') Chrome(self.env).add_jquery_ui(req) for filter_ in available_filters: data['filters'].append({'name': filter_[0], 'label': filter_[1], 'enabled': filter_[0] in filters}) # Navigation to the previous/next period of 'daysback' days previous_start = fromdate.replace(tzinfo=None) - \ timedelta(days=daysback + 1) previous_start = format_date(to_datetime(previous_start, req.tz), format='%Y-%m-%d', tzinfo=req.tz) add_link(req, 'prev', req.href.timeline(from_=previous_start, authors=authors, daysback=daysback), _("Previous Period")) if today - fromdate > timedelta(days=0): next_start = fromdate.replace(tzinfo=None) + \ timedelta(days=daysback + 1) next_start = format_date(to_datetime(next_start, req.tz), format='%Y-%m-%d', tzinfo=req.tz) add_link(req, 'next', req.href.timeline(from_=next_start, authors=authors, daysback=daysback), _("Next Period")) prevnext_nav(req, _("Previous Period"), _("Next Period")) return 'timeline.html', data, None
def process_request(self, req): req.perm.assert_permission('TIMELINE_VIEW') format = req.args.get('format') maxrows = int(req.args.get('max', 50 if format == 'rss' else 0)) lastvisit = int(req.session.get('timeline.lastvisit', '0')) # indication of new events is unchanged when form is updated by user revisit = any(a in req.args for a in ['update', 'from', 'daysback', 'author']) if revisit: lastvisit = int(req.session.get('timeline.nextlastvisit', lastvisit)) # Parse the from date and adjust the timestamp to the last second of # the day fromdate = today = datetime.now(req.tz) yesterday = to_datetime(today.replace(tzinfo=None) - timedelta(days=1), req.tz) precisedate = precision = None if 'from' in req.args: # Acquire from date only from non-blank input reqfromdate = req.args['from'].strip() if reqfromdate: precisedate = user_time(req, parse_date, reqfromdate) fromdate = precisedate.astimezone(req.tz) precision = req.args.get('precision', '') if precision.startswith('second'): precision = timedelta(seconds=1) elif precision.startswith('minute'): precision = timedelta(minutes=1) elif precision.startswith('hour'): precision = timedelta(hours=1) else: precision = None fromdate = to_datetime(datetime(fromdate.year, fromdate.month, fromdate.day, 23, 59, 59, 999999), req.tz) daysback = as_int(req.args.get('daysback'), 90 if format == 'rss' else None) if daysback is None: daysback = as_int(req.session.get('timeline.daysback'), None) if daysback is None: daysback = self.default_daysback daysback = max(0, daysback) if self.max_daysback >= 0: daysback = min(self.max_daysback, daysback) authors = req.args.get('authors') if authors is None and format != 'rss': authors = req.session.get('timeline.authors') authors = (authors or '').strip() data = {'fromdate': fromdate, 'daysback': daysback, 'authors': authors, 'today': user_time(req, format_date, today), 'yesterday': user_time(req, format_date, yesterday), 'precisedate': precisedate, 'precision': precision, 'events': [], 'filters': [], 'abbreviated_messages': self.abbreviated_messages, 'lastvisit': lastvisit} available_filters = [] for event_provider in self.event_providers: available_filters += event_provider.get_timeline_filters(req) or [] # check the request or session for enabled filters, or use default filters = [f[0] for f in available_filters if f[0] in req.args] if not filters and format != 'rss': filters = [f[0] for f in available_filters if req.session.get('timeline.filter.' + f[0]) == '1'] if not filters: filters = [f[0] for f in available_filters if len(f) == 2 or f[2]] # save the results of submitting the timeline form to the session if 'update' in req.args: for filter in available_filters: key = 'timeline.filter.%s' % filter[0] if filter[0] in req.args: req.session[key] = '1' elif key in req.session: del req.session[key] stop = fromdate start = to_datetime(stop.replace(tzinfo=None) - \ timedelta(days=daysback + 1), req.tz) # create author include and exclude sets include = set() exclude = set() for match in self._authors_pattern.finditer(authors): name = (match.group(2) or match.group(3) or match.group(4)).lower() if match.group(1): exclude.add(name) else: include.add(name) # gather all events for the given period of time events = [] for provider in self.event_providers: try: for event in provider.get_timeline_events(req, start, stop, filters) or []: # Check for 0.10 events author = (event[2 if len(event) < 6 else 4] or '').lower() if (not include or author in include) \ and not author in exclude: events.append(self._event_data(provider, event)) except Exception, e: # cope with a failure of that provider self._provider_failure(e, req, provider, filters, [f[0] for f in available_filters])
def get_user_attribute(env, username=None, authenticated=1, attribute=None, value=None): """Return user attributes.""" all_cols = ('sid', 'authenticated', 'name', 'value') columns = [] constraints = [] if username is not None: columns.append('sid') constraints.append(username) if authenticated is not None: columns.append('authenticated') constraints.append(as_int(authenticated, 0, min=0, max=1)) if attribute is not None: columns.append('name') constraints.append(attribute) if value is not None: columns.append('value') constraints.append(to_unicode(value)) sel_columns = [col for col in all_cols if col not in columns] if len(sel_columns) == 0: # No variable left, so only COUNTing is as a sensible task here. sel_stmt = 'COUNT(*)' else: if 'sid' not in sel_columns: sel_columns.append('sid') sel_stmt = ','.join(sel_columns) if len(columns) > 0: where_stmt = ''.join(['WHERE ', '=%s AND '.join(columns), '=%s']) else: where_stmt = '' sql = """ SELECT %s FROM session_attribute %s """ % (sel_stmt, where_stmt) sql_args = tuple(constraints) res = {} for row in env.db_query(sql, sql_args): if sel_stmt == 'COUNT(*)': return [row[0]] res_row = {} res_row.update(zip(sel_columns, row)) # Merge with constraints, that are constants for this SQL query. res_row.update(zip(columns, constraints)) account = res_row.pop('sid') authenticated = res_row.pop('authenticated') # Create single unique attribute ID. m = hashlib.md5() m.update(''.join([account, str(authenticated), res_row.get('name')]).encode('utf-8')) row_id = m.hexdigest() if account in res: if authenticated in res[account]: res[account][authenticated].update({ res_row['name']: res_row['value'] }) res[account][authenticated]['id'].update({ res_row['name']: row_id }) else: res[account][authenticated] = { res_row['name']: res_row['value'], 'id': {res_row['name']: row_id} } # Create account ID for additional authentication state. m = hashlib.md5() m.update(''.join([account, str(authenticated)]).encode('utf-8')) res[account]['id'][authenticated] = m.hexdigest() else: # Create account ID for authentication state. m = hashlib.md5() m.update(''.join([account, str(authenticated)]).encode('utf-8')) res[account] = { authenticated: { res_row['name']: res_row['value'], 'id': {res_row['name']: row_id} }, 'id': {authenticated: m.hexdigest()} } return res
req.redirect(req.href.report(id, action="edit", error=to_unicode(e))) format = req.args.get("format") if format == "sql": self._send_sql(req, id, title, description, sql) title = "{%i} %s" % (id, title) report_resource = Resource("report", id) req.perm.require("REPORT_VIEW", report_resource) context = web_context(req, report_resource) page = int(req.args.get("page", "1")) default_max = {"rss": self.items_per_page_rss, "csv": 0, "tab": 0}.get(format, self.items_per_page) max = req.args.get("max") limit = as_int(max, default_max, min=0) # explict max takes precedence offset = (page - 1) * limit sort_col = req.args.get("sort", "") asc = req.args.get("asc", 1) asc = bool(int(asc)) # string '0' or '1' to int/boolean def report_href(**kwargs): """Generate links to this report preserving user variables, and sorting and paging variables. """ params = args.copy() if sort_col: params["sort"] = sort_col params["page"] = page if max: