class SQLiteConnector(Component): """Database connector for SQLite. Database URLs should be of the form: {{{ sqlite:path/to/trac.db }}} """ implements(IDatabaseConnector) extensions = ListOption('sqlite', 'extensions', doc="""Paths to sqlite extensions, relative to Trac environment's directory or absolute. (''since 0.12'')""") def __init__(self): self._version = None self.error = None self._extensions = None def get_supported_schemes(self): if not have_pysqlite: self.error = _("Cannot load Python bindings for SQLite") elif sqlite_version >= (3, 3, 3) and sqlite.version_info[0] == 2 and \ sqlite.version_info < (2, 0, 7): self.error = _("Need at least PySqlite %(version)s or higher", version='2.0.7') elif (2, 5, 2) <= sqlite.version_info < (2, 5, 5): self.error = _("PySqlite 2.5.2 - 2.5.4 break Trac, please use " "2.5.5 or higher") yield ('sqlite', self.error and -1 or 1) def get_connection(self, path, log=None, params={}): if not self._version: self._version = get_pkginfo(sqlite).get( 'version', '%d.%d.%s' % sqlite.version_info) self.env.systeminfo.extend([('SQLite', sqlite_version_string), ('pysqlite', self._version)]) self.required = True # construct list of sqlite extension libraries if self._extensions is None: self._extensions = [] for extpath in self.extensions: if not os.path.isabs(extpath): extpath = os.path.join(self.env.path, extpath) self._extensions.append(extpath) params['extensions'] = self._extensions return SQLiteConnection(path, log, params) def init_db(self, path, log=None, params={}): if path != ':memory:': # make the directory to hold the database if os.path.exists(path): raise TracError(_('Database already exists at %(path)s', path=path)) dir = os.path.dirname(path) if not os.path.exists(dir): os.makedirs(dir) if isinstance(path, unicode): # needed with 2.4.0 path = path.encode('utf-8') cnx = sqlite.connect(path, timeout=int(params.get('timeout', 10000))) cursor = cnx.cursor() from trac.db_default import schema for table in schema: for stmt in self.to_sql(table): cursor.execute(stmt) cnx.commit() def to_sql(self, table): return _to_sql(table) def alter_column_types(self, table, columns): """Yield SQL statements altering the type of one or more columns of a table. Type changes are specified as a `columns` dict mapping column names to `(from, to)` SQL type tuples. """ for name, (from_, to) in sorted(columns.iteritems()): if _type_map.get(to, to) != _type_map.get(from_, from_): raise NotImplementedError('Conversion from %s to %s is not ' 'implemented' % (from_, to)) return () def backup(self, dest_file): """Simple SQLite-specific backup of the database. @param dest_file: Destination file basename """ import shutil db_str = self.config.get('trac', 'database') try: db_str = db_str[:db_str.index('?')] except ValueError: pass db_name = os.path.join(self.env.path, db_str[7:]) shutil.copy(db_name, dest_file) if not os.path.exists(dest_file): raise TracError(_("No destination file created")) return dest_file
class LogModule(Component): implements(INavigationContributor, IPermissionRequestor, IRequestHandler, IWikiSyntaxProvider) default_log_limit = IntOption( 'revisionlog', 'default_log_limit', 100, """Default value for the limit argument in the TracRevisionLog. (''since 0.11'')""") graph_colors = ListOption( 'revisionlog', 'graph_colors', ['#cc0', '#0c0', '#0cc', '#00c', '#c0c', '#c00'], doc="""Comma-separated list of colors to use for the TracRevisionLog graph display. (''since 1.0'')""") # INavigationContributor methods def get_active_navigation_item(self, req): return 'browser' def get_navigation_items(self, req): return [] # IPermissionRequestor methods def get_permission_actions(self): return ['LOG_VIEW'] # IRequestHandler methods def match_request(self, req): match = re.match(r'/log(/.*)?$', req.path_info) if match: req.args['path'] = match.group(1) or '/' return True def process_request(self, req): req.perm.require('LOG_VIEW') mode = req.args.get('mode', 'stop_on_copy') path = req.args.get('path', '/') rev = req.args.get('rev') stop_rev = req.args.get('stop_rev') revs = req.args.get('revs') format = req.args.get('format') verbose = req.args.get('verbose') limit = int(req.args.get('limit') or self.default_log_limit) rm = RepositoryManager(self.env) reponame, repos, path = rm.get_repository_by_path(path) if not repos: raise ResourceNotFound( _("Repository '%(repo)s' not found", repo=reponame)) if reponame != repos.reponame: # Redirect alias qs = req.query_string req.redirect( req.href.log(repos.reponame or None, path) + ('?' + qs if qs else '')) normpath = repos.normalize_path(path) # if `revs` parameter is given, then we're restricted to the # corresponding revision ranges. # If not, then we're considering all revisions since `rev`, # on that path, in which case `revranges` will be None. revranges = None if revs: try: revranges = Ranges(revs) rev = revranges.b except ValueError: pass rev = unicode(repos.normalize_rev(rev)) display_rev = repos.display_rev # The `history()` method depends on the mode: # * for ''stop on copy'' and ''follow copies'', it's `Node.history()` # unless explicit ranges have been specified # * for ''show only add, delete'' we're using # `Repository.get_path_history()` cset_resource = repos.resource.child('changeset') show_graph = False if mode == 'path_history': def history(): for h in repos.get_path_history(path, rev): if 'CHANGESET_VIEW' in req.perm(cset_resource(id=h[1])): yield h elif revranges: def history(): prevpath = path expected_next_item = None ranges = list(revranges.pairs) ranges.reverse() for (a, b) in ranges: a = repos.normalize_rev(a) b = repos.normalize_rev(b) while not repos.rev_older_than(b, a): node = get_existing_node(req, repos, prevpath, b) node_history = list(node.get_history(2)) p, rev, chg = node_history[0] if repos.rev_older_than(rev, a): break # simply skip, no separator if 'CHANGESET_VIEW' in req.perm(cset_resource(id=rev)): if expected_next_item: # check whether we're continuing previous range np, nrev, nchg = expected_next_item if rev != nrev: # no, we need a separator yield (np, nrev, None) yield node_history[0] prevpath = node_history[-1][0] # follow copy b = repos.previous_rev(rev) if len(node_history) > 1: expected_next_item = node_history[-1] else: expected_next_item = None if expected_next_item: yield (expected_next_item[0], expected_next_item[1], None) else: show_graph = path == '/' and not verbose \ and not repos.has_linear_changesets def history(): node = get_existing_node(req, repos, path, rev) for h in node.get_history(): if 'CHANGESET_VIEW' in req.perm(cset_resource(id=h[1])): yield h # -- retrieve history, asking for limit+1 results info = [] depth = 1 previous_path = normpath count = 0 for old_path, old_rev, old_chg in history(): if stop_rev and repos.rev_older_than(old_rev, stop_rev): break old_path = repos.normalize_path(old_path) item = { 'path': old_path, 'rev': old_rev, 'existing_rev': old_rev, 'change': old_chg, 'depth': depth, } if old_chg == Changeset.DELETE: item['existing_rev'] = repos.previous_rev(old_rev, old_path) if not (mode == 'path_history' and old_chg == Changeset.EDIT): info.append(item) if old_path and old_path != previous_path and \ not (mode == 'path_history' and old_path == normpath): depth += 1 item['depth'] = depth item['copyfrom_path'] = old_path if mode == 'stop_on_copy': break elif mode == 'path_history': depth -= 1 if old_chg is None: # separator entry stop_limit = limit else: count += 1 stop_limit = limit + 1 if count >= stop_limit: break previous_path = old_path if info == []: node = get_existing_node(req, repos, path, rev) if repos.rev_older_than(stop_rev, node.created_rev): # FIXME: we should send a 404 error here raise TracError( _( "The file or directory '%(path)s' doesn't " "exist at revision %(rev)s or at any previous revision.", path=path, rev=display_rev(rev)), _('Nonexistent path')) # Generate graph data graph = {} if show_graph: threads, vertices, columns = \ make_log_graph(repos, (item['rev'] for item in info)) graph.update(threads=threads, vertices=vertices, columns=columns, colors=self.graph_colors, line_width=0.04, dot_radius=0.1) add_script(req, 'common/js/excanvas.js', ie_if='IE') add_script(req, 'common/js/log_graph.js') add_script_data(req, graph=graph) def make_log_href(path, **args): link_rev = rev if rev == str(repos.youngest_rev): link_rev = None params = {'rev': link_rev, 'mode': mode, 'limit': limit} params.update(args) if verbose: params['verbose'] = verbose return req.href.log(repos.reponame or None, path, **params) if format in ('rss', 'changelog'): info = [i for i in info if i['change']] # drop separators if info and count > limit: del info[-1] elif info and count >= limit: # stop_limit reached, there _might_ be some more next_rev = info[-1]['rev'] next_path = info[-1]['path'] next_revranges = None if revranges: next_revranges = str(revranges.truncate(next_rev)) if next_revranges or not revranges: older_revisions_href = make_log_href(next_path, rev=next_rev, revs=next_revranges) add_link( req, 'next', older_revisions_href, _('Revision Log (restarting at %(path)s, rev. %(rev)s)', path=next_path, rev=display_rev(next_rev))) # only show fully 'limit' results, use `change == None` as a marker info[-1]['change'] = None revisions = [i['rev'] for i in info] changes = get_changes(repos, revisions, self.log) extra_changes = {} if format == 'changelog': for rev in revisions: changeset = changes[rev] cs = {} cs['message'] = wrap(changeset.message, 70, initial_indent='\t', subsequent_indent='\t') files = [] actions = [] for cpath, kind, chg, bpath, brev in changeset.get_changes(): files.append(bpath if chg == Changeset.DELETE else cpath) actions.append(chg) cs['files'] = files cs['actions'] = actions extra_changes[rev] = cs data = { 'context': web_context(req, 'source', path, parent=repos.resource), 'reponame': repos.reponame or None, 'repos': repos, 'path': path, 'rev': rev, 'stop_rev': stop_rev, 'display_rev': display_rev, 'revranges': revranges, 'mode': mode, 'verbose': verbose, 'limit': limit, 'items': info, 'changes': changes, 'extra_changes': extra_changes, 'graph': graph, 'wiki_format_messages': self.config['changeset'].getbool('wiki_format_messages') } if format == 'changelog': return 'revisionlog.txt', data, 'text/plain' elif format == 'rss': data['email_map'] = Chrome(self.env).get_email_map() data['context'] = web_context(req, 'source', path, parent=repos.resource, absurls=True) return 'revisionlog.rss', data, 'application/rss+xml' item_ranges = [] range = [] for item in info: if item['change'] is None: # separator if range: # start new range range.append(item) item_ranges.append(range) range = [] else: range.append(item) if range: item_ranges.append(range) data['item_ranges'] = item_ranges add_stylesheet(req, 'common/css/diff.css') add_stylesheet(req, 'common/css/browser.css') path_links = get_path_links(req.href, repos.reponame, path, rev) if path_links: data['path_links'] = path_links if path != '/': add_link(req, 'up', path_links[-2]['href'], _('Parent directory')) rss_href = make_log_href(path, format='rss', revs=revs, stop_rev=stop_rev) add_link(req, 'alternate', auth_link(req, rss_href), _('RSS Feed'), 'application/rss+xml', 'rss') changelog_href = make_log_href(path, format='changelog', revs=revs, stop_rev=stop_rev) add_link(req, 'alternate', changelog_href, _('ChangeLog'), 'text/plain') add_ctxtnav(req, _('View Latest Revision'), href=req.href.browser(repos.reponame or None, path)) if 'next' in req.chrome['links']: next = req.chrome['links']['next'][0] add_ctxtnav( req, tag.span(tag.a(_('Older Revisions'), href=next['href']), Markup(' →'))) return 'revisionlog.html', data, None # IWikiSyntaxProvider methods REV_RANGE = r"(?:%s|%s)" % (Ranges.RE_STR, ChangesetModule.CHANGESET_ID) # int rev ranges or any kind of rev def get_wiki_syntax(self): yield ( # [...] form, starts with optional intertrac: [T... or [trac ... r"!?\[(?P<it_log>%s\s*)" % WikiParser.INTERTRAC_SCHEME + # <from>:<to> + optional path restriction r"(?P<log_revs>%s)(?P<log_path>[/?][^\]]*)?\]" % self.REV_RANGE, lambda x, y, z: self._format_link(x, 'log1', y[1:-1], y, z)) yield ( # r<from>:<to> form + optional path restriction (no intertrac) r"(?:\b|!)r%s\b(?:/[a-zA-Z0-9_/+-]+)?" % Ranges.RE_STR, lambda x, y, z: self._format_link(x, 'log2', '@' + y[1:], y)) def get_link_resolvers(self): yield ('log', self._format_link) def _format_link(self, formatter, ns, match, label, fullmatch=None): if ns == 'log1': groups = fullmatch.groupdict() it_log = groups.get('it_log') revs = groups.get('log_revs') path = groups.get('log_path') or '/' target = '%s%s@%s' % (it_log, path, revs) # prepending it_log is needed, as the helper expects it there intertrac = formatter.shorthand_intertrac_helper( 'log', target, label, fullmatch) if intertrac: return intertrac path, query, fragment = formatter.split_link(path) else: assert ns in ('log', 'log2') if ns == 'log': match, query, fragment = formatter.split_link(match) else: query = fragment = '' match = ''.join(reversed(match.split('/', 1))) path = match revs = '' if self.LOG_LINK_RE.match(match): indexes = [sep in match and match.index(sep) for sep in ':@'] idx = min([i for i in indexes if i is not False]) path, revs = match[:idx], match[idx + 1:] rm = RepositoryManager(self.env) try: reponame, repos, path = rm.get_repository_by_path(path) if not reponame: reponame = rm.get_default_repository(formatter.context) if reponame is not None: repos = rm.get_repository(reponame) if repos: revranges = None if any(c for c in ':-,' if c in revs): revranges = self._normalize_ranges(repos, path, revs) revs = None if 'LOG_VIEW' in formatter.perm: if revranges: href = formatter.href.log(repos.reponame or None, path or '/', revs=str(revranges)) else: try: rev = repos.normalize_rev(revs) except NoSuchChangeset: rev = None href = formatter.href.log(repos.reponame or None, path or '/', rev=rev) if query and (revranges or revs): query = '&' + query[1:] return tag.a(label, class_='source', href=href + query + fragment) errmsg = _("No permission to view change log") elif reponame: errmsg = _("Repository '%(repo)s' not found", repo=reponame) else: errmsg = _("No default repository defined") except TracError, e: errmsg = to_unicode(e) return tag.a(label, class_='missing source', title=errmsg)
class WatchSubscriber(Component): """Allows user to subscribe to ticket or wiki notification on a per resource basis. Watch, Unwatch links are added to wiki pages and tickets that the user can select to start watching a resource. """ implements(IRequestFilter, IRequestHandler, IAnnouncementSubscriber, ITicketChangeListener, IWikiChangeListener) watchable_paths = ListOption( 'announcer', 'watchable_paths', 'wiki/*,ticket/*', doc='List of URL paths to allow watching. Globs are supported.') ctxtnav_names = ListOption( 'announcer', 'ctxtnav_names', "Watch This, Unwatch This", doc="Text of context navigation entries. " "An empty list removes them from the context navigation bar.") path_match = re.compile(r'/watch(/.*)') # IRequestHandler methods def match_request(self, req): m = self.path_match.match(req.path_info) if m: (path_info, ) = m.groups() realm, _ = self.path_info_to_realm_target(path_info) return "%s_VIEW" % realm.upper() in req.perm return False def process_request(self, req): match = self.path_match.match(req.path_info) (path_info, ) = match.groups() realm, target = self.path_info_to_realm_target(path_info) req.perm.require('%s_VIEW' % realm.upper()) self.toggle_watched(req.session.sid, req.session.authenticated, realm, target, req) req.redirect(req.href(realm, target)) def toggle_watched(self, sid, authenticated, realm, target, req=None): if self.is_watching(sid, authenticated, realm, target): self.set_unwatch(sid, authenticated, realm, target) self._schedule_notice(req, _('You are no longer receiving ' \ 'change notifications about this resource.')) else: self.set_watch(sid, authenticated, realm, target) self._schedule_notice(req, _('You are now receiving ' \ 'change notifications about this resource.')) def _schedule_notice(self, req, message): req.session['_announcer_watch_message_'] = message def _add_notice(self, req): if '_announcer_watch_message_' in req.session: add_notice(req, req.session['_announcer_watch_message_']) del req.session['_announcer_watch_message_'] def is_watching(self, sid, authenticated, realm, target): klass = self.__class__.__name__ attrs = SubscriptionAttribute.find_by_sid_class_realm_and_target( self.env, sid, authenticated, klass, realm, target) if attrs: return True else: return False def set_watch(self, sid, authenticated, realm, target): klass = self.__class__.__name__ SubscriptionAttribute.add(self.env, sid, authenticated, klass, realm, (target, )) def set_unwatch(self, sid, authenticated, realm, target): klass = self.__class__.__name__ (attr, ) = SubscriptionAttribute.find_by_sid_class_realm_and_target( self.env, sid, authenticated, klass, realm, target) if attr: SubscriptionAttribute.delete(self.env, attr['id']) # IRequestFilter methods def pre_process_request(self, req, handler): return handler def post_process_request(self, req, template, data, content_type): self._add_notice(req) if req.authname != "anonymous" or 'email' in req.session: for pattern in self.watchable_paths: realm, target = self.path_info_to_realm_target(req.path_info) if fnmatch('%s/%s' % (realm, target), pattern): if '%s_VIEW' % realm.upper() not in req.perm: return (template, data, content_type) self.render_watcher(req) break return (template, data, content_type) # Internal methods def render_watcher(self, req): if not self.ctxtnav_names: return realm, target = self.path_info_to_realm_target(req.path_info) sess = req.session if self.is_watching(sess.sid, sess.authenticated, realm, target): action_name = len(self.ctxtnav_names) >= 2 and \ self.ctxtnav_names[1] or 'Unwatch This' else: action_name = len(self.ctxtnav_names) and \ self.ctxtnav_names[0] or 'Watch This' add_ctxtnav(req, tag.a(_(action_name), href=req.href.watch(realm, target))) def path_info_to_realm_target(self, path_info): realm = target = None g = re.match(r'^/([^/]+)(.*)', path_info) if g: realm, target = g.groups() target = target.strip('/') return self.normalize_realm_target(realm, target) def normalize_realm_target(self, realm, target): if not realm: realm = 'wiki' if not target and realm == 'wiki': target = 'WikiStart' return realm, target # ITicketChangeListener methods def ticket_created(*args): pass def ticket_changed(*args): pass def ticket_deleted(self, ticket): klass = self.__class__.__name__ SubscriptionAttribute.delete_by_class_realm_and_target( self.env, klass, 'ticket', get_target_id(ticket)) # IWikiChangeListener methods def wiki_page_added(*args): pass def wiki_page_changed(*args): pass def wiki_page_deleted(self, page): klass = self.__class__.__name__ SubscriptionAttribute.delete_by_class_realm_and_target( self.env, klass, 'wiki', get_target_id(page)) def wiki_page_version_deleted(*args): pass # IAnnouncementSubscriber methods def matches(self, event): klass = self.__class__.__name__ attrs = SubscriptionAttribute.find_by_class_realm_and_target( self.env, klass, event.realm, get_target_id(event.target)) sids = set(map(lambda x: (x['sid'], x['authenticated']), attrs)) for i in Subscription.find_by_sids_and_class(self.env, sids, klass): yield i.subscription_tuple() def description(self): return _("notify me when one of my watched wiki or tickets is updated") def requires_authentication(self): return False
class Phrases(Component): """Highlight attentional phrases like `FIXME`. Phrases that are highlighted are defined in the `[wikiextras]` section in `trac.ini`. Use the `ShowPhrases` macro to show a list of currently defined phrases. """ implements(IRequestFilter, ITemplateProvider, IWikiSyntaxProvider, IWikiMacroProvider) fixme_phrases = ListOption('wikiextras', 'fixme_phrases', 'BUG, FIXME', doc= """A list of attentional phrases or single words, separated by comma (`,`) that will be highlighted to catch attention. Any delimiter `():<>` adjacent to a phrase will not be presented. (i.e. do not include any of these delimiters in this list). This makes it possible to naturally write, for example, `FIXME:` in a wiki text, but view the phrase highlighted without the colon (`:`) which would not look natural. Use the `ShowPhrases` macro to show a list of currently defined phrases.""") todo_phrases = ListOption('wikiextras', 'todo_phrases', 'REVIEW, TODO', doc="Analogous to `FIXME`-phrases, but " "presentation is less eye-catching.") done_phrases = ListOption('wikiextras', 'done_phrases', 'DONE, DEBUGGED, FIXED, REVIEWED', doc="Analogous to `FIXME`-phrases, but " "presentation is less eye-catching.") custom_phrases_section = ConfigSection('wikiextras-custom-phrases', """Custom phrases are configurable by providing associations between a CSS class and the list of phrases separated by comma. Example: {{{#!ini [wikiextras-custom-phrases] nice = NICE, COOL }}} """) def __init__(self): self.text = {} #noinspection PyArgumentList html_form = '<span class="wikiextras phrase %s">%s</span>' def add_style(style, phrases): for phrase in phrases: html = html_form % (style, phrase) self.text[phrase] = html for (d1, d2) in [(':', ':'), ('<', '>'), ('(', ')')]: self.text['%s%s%s' % (d1, phrase, d2)] = html for d2 in [':']: self.text['%s%s' % (phrase, d2)] = html for style, phrases in [('fixme', self.fixme_phrases), ('todo', self.todo_phrases), ('done', self.done_phrases)]: add_style(style, phrases) for style, phrases in self.custom_phrases_section.options(): add_style(style, phrases.split(',')) # IRequestFilter methods #noinspection PyUnusedLocal def pre_process_request(self, req, handler): return handler def post_process_request(self, req, template, data, content_type): add_stylesheet(req, 'wikiextras/css/phrases.css') return template, data, content_type # ITemplateProvider methods def get_htdocs_dirs(self): return [('wikiextras', resource_filename(__name__, 'htdocs'))] def get_templates_dirs(self): return [] # IWikiSyntaxProvider methods def get_wiki_syntax(self): yield ('!?(?:%s)' % prepare_regexp(self.text), self._format_phrase) def get_link_resolvers(self): return [] #noinspection PyUnusedLocal def _format_phrase(self, formatter, match, fullmatch): return Markup(self.text[match]) # IWikiMacroProvider methods def get_macros(self): yield 'ShowPhrases' #noinspection PyUnusedLocal def get_macro_description(self, name): return cleandoc("""Renders in a table the list of known phrases that are highlighted to catch attention. Comment: Any delimiter `():<>` adjacent to a phrase will not be presented. This makes it possible to naturally write `FIXME:`, for example, but view the phrase highlighted without the colon (`:`) which would not look natural. Prefixing a phrase with `!` prevents it from being highlighted. """) #noinspection PyUnusedLocal def expand_macro(self, formatter, name, content, args=None): t = [render_table(p, '1', lambda s: self._format_phrase(formatter, s, None)) for p in [self.fixme_phrases, self.todo_phrases, self.done_phrases]] style = 'border:none;text-align:center;vertical-align:top' spacer = tag.td(style='width:2em;border:none') return tag.table(tag.tr(tag.td(t[0], style=style), spacer, tag.td(t[1], style=style), spacer, tag.td(t[2], style=style)))
class RepositoryAdminPanel(Component): """Web admin panel for repository administration.""" implements(IAdminPanelProvider) allowed_repository_dir_prefixes = ListOption( 'versioncontrol', 'allowed_repository_dir_prefixes', '', doc="""Comma-separated list of allowed prefixes for repository directories when adding and editing repositories in the repository admin panel. If the list is empty, all repository directories are allowed. (''since 0.12.1'')""") # IAdminPanelProvider methods def get_admin_panels(self, req): if 'VERSIONCONTROL_ADMIN' in req.perm: yield ('versioncontrol', _('Version Control'), 'repository', _('Repositories')) def render_admin_panel(self, req, category, page, path_info): req.perm.require('VERSIONCONTROL_ADMIN') # Retrieve info for all repositories rm = RepositoryManager(self.env) all_repos = rm.get_all_repositories() db_provider = self.env[DbRepositoryProvider] if path_info: # Detail view reponame = not is_default(path_info) and path_info or '' info = all_repos.get(reponame) if info is None: raise TracError( _("Repository '%(repo)s' not found", repo=path_info)) if req.method == 'POST': if req.args.get('cancel'): req.redirect(req.href.admin(category, page)) elif db_provider and req.args.get('save'): # Modify repository changes = {} for field in db_provider.repository_attrs: value = normalize_whitespace(req.args.get(field)) if (value is not None or field == 'hidden') \ and value != info.get(field): changes[field] = value if 'dir' in changes \ and not self._check_dir(req, changes['dir']): changes = {} if changes: db_provider.modify_repository(reponame, changes) add_notice(req, _('Your changes have been saved.')) name = req.args.get('name') resync = tag.tt('trac-admin $ENV repository resync "%s"' % (name or '(default)')) if 'dir' in changes: msg = tag_( 'You should now run %(resync)s to ' 'synchronize Trac with the repository.', resync=resync) add_notice(req, msg) elif 'type' in changes: msg = tag_( 'You may have to run %(resync)s to ' 'synchronize Trac with the repository.', resync=resync) add_notice(req, msg) if name and name != path_info and not 'alias' in info: cset_added = tag.tt('trac-admin $ENV changeset ' 'added "%s" $REV' % (name or '(default)')) msg = tag_( 'You will need to update your post-commit ' 'hook to call %(cset_added)s with the new ' 'repository name.', cset_added=cset_added) add_notice(req, msg) if changes: req.redirect(req.href.admin(category, page)) Chrome(self.env).add_wiki_toolbars(req) data = {'view': 'detail', 'reponame': reponame} else: # List view if req.method == 'POST': # Add a repository if db_provider and req.args.get('add_repos'): name = req.args.get('name') type_ = req.args.get('type') # Avoid errors when copy/pasting paths dir = normalize_whitespace(req.args.get('dir', '')) if name is None or type_ is None or not dir: add_warning( req, _('Missing arguments to add a ' 'repository.')) elif self._check_dir(req, dir): db_provider.add_repository(name, dir, type_) name = name or '(default)' add_notice( req, _('The repository "%(name)s" has been ' 'added.', name=name)) resync = tag.tt('trac-admin $ENV repository resync ' '"%s"' % name) msg = tag_( 'You should now run %(resync)s to ' 'synchronize Trac with the repository.', resync=resync) add_notice(req, msg) cset_added = tag.tt('trac-admin $ENV changeset ' 'added "%s" $REV' % name) msg = tag_( 'You should also set up a post-commit hook ' 'on the repository to call %(cset_added)s ' 'for each committed changeset.', cset_added=cset_added) add_notice(req, msg) req.redirect(req.href.admin(category, page)) # Add a repository alias elif db_provider and req.args.get('add_alias'): name = req.args.get('name') alias = req.args.get('alias') if name is not None and alias is not None: db_provider.add_alias(name, alias) add_notice( req, _('The alias "%(name)s" has been ' 'added.', name=name or '(default)')) req.redirect(req.href.admin(category, page)) add_warning(req, _('Missing arguments to add an ' 'alias.')) # Refresh the list of repositories elif req.args.get('refresh'): req.redirect(req.href.admin(category, page)) # Remove repositories elif db_provider and req.args.get('remove'): sel = req.args.getlist('sel') if sel: for name in sel: db_provider.remove_repository(name) add_notice( req, _('The selected repositories have ' 'been removed.')) req.redirect(req.href.admin(category, page)) add_warning(req, _('No repositories were selected.')) data = {'view': 'list'} # Find repositories that are editable db_repos = {} if db_provider is not None: db_repos = dict(db_provider.get_repositories()) # Prepare common rendering data repositories = dict( (reponame, self._extend_info(reponame, info.copy(), reponame in db_repos)) for (reponame, info) in all_repos.iteritems()) types = sorted([''] + rm.get_supported_types()) data.update({ 'types': types, 'default_type': rm.repository_type, 'repositories': repositories }) return 'admin_repositories.html', data def _extend_info(self, reponame, info, editable): """Extend repository info for rendering.""" info['name'] = reponame if info.get('dir') is not None: info['prettydir'] = breakable_path(info['dir']) or '' info['hidden'] = as_bool(info.get('hidden')) info['editable'] = editable if not info.get('alias'): try: repos = RepositoryManager(self.env).get_repository(reponame) youngest_rev = repos.get_youngest_rev() info['rev'] = youngest_rev info['display_rev'] = repos.display_rev(youngest_rev) except Exception: pass return info def _check_dir(self, req, dir): """Check that a repository directory is valid, and add a warning message if not. """ if not os.path.isabs(dir): add_warning( req, _('The repository directory must be an absolute ' 'path.')) return False prefixes = [ os.path.join(self.env.path, prefix) for prefix in self.allowed_repository_dir_prefixes ] if prefixes and not any( is_path_below(dir, prefix) for prefix in prefixes): add_warning( req, _( 'The repository directory must be located ' 'below one of the following directories: ' '%(dirs)s', dirs=', '.join(prefixes))) return False return True
class TicketComponentOwnerSubscriber(Component): """Allows component owners to subscribe to tickets assigned to their components. """ implements(IAnnouncementDefaultSubscriber, IAnnouncementSubscriber) default_on = BoolOption( "announcer", "always_notify_component_owner", 'true', """Whether or not to notify the owner of the ticket's component. The user can override this setting in their preferences. """) default_distributor = ListOption( "announcer", "always_notify_component_owner_distributor", "email", doc="""Comma-separated list of distributors to send the message to by default. ex. email, xmpp """) # IAnnouncementSubscriber methods def matches(self, event): if event.realm != "ticket": return if event.category not in ('created', 'changed', 'attachment added'): return ticket = event.target try: component = model.Component(self.env, ticket['component']) if not component.owner: return if re.match(r'^[^@]+@.+', component.owner): sid, auth, addr = None, 0, component.owner else: sid, auth, addr = component.owner, 1, None # Default subscription for s in self.default_subscriptions(): yield (s[0], s[1], sid, auth, addr, None, s[2], s[3]) if sid: klass = self.__class__.__name__ for s in Subscription.find_by_sids_and_class( self.env, ((sid, auth), ), klass): yield s.subscription_tuple() except: self.log.debug("Component for ticket (%s) not found" % ticket['id']) def description(self): return _("notify me when a ticket that belongs to a component " "that I own is created or modified") def requires_authentication(self): return True # IAnnouncementDefaultSubscriber method def default_subscriptions(self): if self.default_on: for d in self.default_distributor: yield (self.__class__.__name__, d, 101, 'always')
class ODTExportPlugin(Component): """Convert Wiki pages to ODT.""" implements(IContentConverter) img_width = Option('odtexport', 'img_default_width', '8cm') img_height = Option('odtexport', 'img_default_height', '6cm') img_dpi = IntOption('odtexport', 'dpi', '96') get_remote_images = BoolOption('odtexport', 'get_remote_images', True) replace_keyword = Option('odtexport', 'replace_keyword', 'TRAC-ODT-INSERT') wikiversion_keyword = Option('odtexport', 'wikiversion_keyword', 'TRAC-ODT-WIKIVERSION') wikiname_keyword = Option('odtexport', 'wikiname_keyword', 'TRAC-ODT-WIKINAME') timestamp_keyword = Option('odtexport', 'timestamp_keyword', 'TRAC-ODT-TIMESTAMP') cut_start_keyword = Option('odtexport', 'cut_start_keyword', 'TRAC-ODT-CUT-START') cut_stop_keyword = Option('odtexport', 'cut_stop_keyword', 'TRAC-ODT-CUT-STOP') remove_macros = ListOption( 'odtexport', 'remove_macros', "PageOutline, TracGuideToc, TOC, TranslatedPages") # IContentConverter methods def get_supported_conversions(self): yield ('odt', 'OpenDocument', 'odt', 'text/x-trac-wiki', 'application/vnd.oasis.opendocument.text', 5) def convert_content(self, req, input_type, content, output_type): # pylint: disable-msg=W0613 self.page_name = req.args.get('page', 'WikiStart') #wikipage = WikiPage(self.env, self.page_name) template = self.get_template_name(content) html = self.wiki_to_html(content, req) #return (html, "text/plain") odtfile = ODTFile( self.page_name, req.args.get('version', 'latest'), template, self.env, # pylint: disable-msg=E1101 options={ "img_width": self.img_width, "img_height": self.img_height, "img_dpi": self.img_dpi, "get_remote_images": self.get_remote_images, "replace_keyword": self.replace_keyword, "wikiversion_keyword": self.wikiversion_keyword, "wikiname_keyword": self.wikiname_keyword, "timestamp_keyword": self.timestamp_keyword, "cut_start_keyword": self.cut_start_keyword, "cut_stop_keyword": self.cut_stop_keyword, }) odtfile.open() #return (odtfile.import_xhtml(html), "text/plain") odtfile.import_xhtml(html) newdoc = odtfile.save() return (newdoc, "application/vnd.oasis.opendocument.text") def get_template_name(self, wikitext): template_macro = re.search('\[\[OdtTemplate\(([^)]+)\)\]\]', wikitext) if template_macro: tpl = template_macro.group(1) if tpl.endswith(".odt"): return tpl else: return "%s.odt" % tpl return "wikipage.odt" def wiki_to_html(self, wikitext, req): self.env.log.debug( 'start function wiki_to_html') # pylint: disable-msg=E1101 # Remove some macros (TOC is better handled in ODT itself) for macro in self.remove_macros: wikitext = re.sub('\[\[%s(\([^)]*\))?\]\]' % macro, "", wikitext) # Now convert wiki to HTML out = StringIO() context = Context.from_request(req, absurls=True) Formatter( self.env, # pylint: disable-msg=E1101 context('wiki', self.page_name)).format(wikitext, out) html = Markup(out.getvalue()) html = html.encode("utf-8", 'replace') # Clean up the HTML html = re.sub('<span class="icon">.</span>', '', html) # Remove external link icon tidy_options = dict(output_xhtml=1, add_xml_decl=1, indent=1, tidy_mark=0, input_encoding='utf8', output_encoding='utf8', doctype='auto', wrap=0, char_encoding='utf8') html = tidy.parseString(html, **tidy_options) # Replace nbsp with entity: # http://www.mail-archive.com/[email protected]/msg03670.html html = str(html).replace(" ", " ") # Tidy creates newlines after <pre> (by indenting) html = re.sub('<pre([^>]*)>\n', '<pre\\1>', html) return html
class TicketSystem(Component): implements(IPermissionRequestor, IWikiSyntaxProvider, IResourceManager, ITicketManipulator) change_listeners = ExtensionPoint(ITicketChangeListener) milestone_change_listeners = ExtensionPoint(IMilestoneChangeListener) realm = 'ticket' ticket_custom_section = ConfigSection( 'ticket-custom', """In this section, you can define additional fields for tickets. See TracTicketsCustomFields for more details.""") action_controllers = OrderedExtensionsOption( 'ticket', 'workflow', ITicketActionController, default='ConfigurableTicketWorkflow', include_missing=False, doc="""Ordered list of workflow controllers to use for ticket actions. """) restrict_owner = BoolOption( 'ticket', 'restrict_owner', 'false', """Make the owner field of tickets use a drop-down menu. Be sure to understand the performance implications before activating this option. See [TracTickets#Assign-toasDrop-DownList Assign-to as Drop-Down List]. Please note that e-mail addresses are '''not''' obfuscated in the resulting drop-down menu, so this option should not be used if e-mail addresses must remain protected. """) default_version = Option('ticket', 'default_version', '', """Default version for newly created tickets.""") default_type = Option('ticket', 'default_type', 'defect', """Default type for newly created tickets.""") default_priority = Option( 'ticket', 'default_priority', 'major', """Default priority for newly created tickets.""") default_milestone = Option( 'ticket', 'default_milestone', '', """Default milestone for newly created tickets.""") default_component = Option( 'ticket', 'default_component', '', """Default component for newly created tickets.""") default_severity = Option( 'ticket', 'default_severity', '', """Default severity for newly created tickets.""") default_summary = Option( 'ticket', 'default_summary', '', """Default summary (title) for newly created tickets.""") default_description = Option( 'ticket', 'default_description', '', """Default description for newly created tickets.""") default_keywords = Option( 'ticket', 'default_keywords', '', """Default keywords for newly created tickets.""") default_owner = Option( 'ticket', 'default_owner', '< default >', """Default owner for newly created tickets. The component owner is used when set to the value `< default >`. """) default_cc = Option('ticket', 'default_cc', '', """Default cc: list for newly created tickets.""") default_resolution = Option( 'ticket', 'default_resolution', 'fixed', """Default resolution for resolving (closing) tickets.""") allowed_empty_fields = ListOption( 'ticket', 'allowed_empty_fields', 'milestone, version', doc="""Comma-separated list of `select` fields that can have an empty value. (//since 1.1.2//)""") max_comment_size = IntOption( 'ticket', 'max_comment_size', 262144, """Maximum allowed comment size in characters.""") max_description_size = IntOption( 'ticket', 'max_description_size', 262144, """Maximum allowed description size in characters.""") max_summary_size = IntOption( 'ticket', 'max_summary_size', 262144, """Maximum allowed summary size in characters. (//since 1.0.2//)""") def __init__(self): self.log.debug('action controllers for ticket workflow: %r', [c.__class__.__name__ for c in self.action_controllers]) # Public API def get_available_actions(self, req, ticket): """Returns a sorted list of available actions""" # The list should not have duplicates. actions = {} for controller in self.action_controllers: weighted_actions = controller.get_ticket_actions(req, ticket) or [] for weight, action in weighted_actions: if action in actions: actions[action] = max(actions[action], weight) else: actions[action] = weight all_weighted_actions = [(weight, action) for action, weight in actions.items()] return [x[1] for x in sorted(all_weighted_actions, reverse=True)] def get_all_status(self): """Returns a sorted list of all the states all of the action controllers know about.""" valid_states = set() for controller in self.action_controllers: valid_states.update(controller.get_all_status() or []) return sorted(valid_states) def get_ticket_field_labels(self): """Produce a (name,label) mapping from `get_ticket_fields`.""" labels = {f['name']: f['label'] for f in self.get_ticket_fields()} labels['attachment'] = _("Attachment") return labels def get_ticket_fields(self): """Returns list of fields available for tickets. Each field is a dict with at least the 'name', 'label' (localized) and 'type' keys. It may in addition contain the 'custom' key, the 'optional' and the 'options' keys. When present 'custom' and 'optional' are always `True`. """ fields = copy.deepcopy(self.fields) label = 'label' # workaround gettext extraction bug for f in fields: if not f.get('custom'): f[label] = gettext(f[label]) return fields def reset_ticket_fields(self): """Invalidate ticket field cache.""" del self.fields @cached def fields(self): """Return the list of fields available for tickets.""" from trac.ticket import model fields = TicketFieldList() # Basic text fields fields.append({ 'name': 'summary', 'type': 'text', 'label': N_('Summary') }) fields.append({ 'name': 'reporter', 'type': 'text', 'label': N_('Reporter') }) # Owner field, by default text but can be changed dynamically # into a drop-down depending on configuration (restrict_owner=true) fields.append({'name': 'owner', 'type': 'text', 'label': N_('Owner')}) # Description fields.append({ 'name': 'description', 'type': 'textarea', 'format': 'wiki', 'label': N_('Description') }) # Default select and radio fields selects = [('type', N_('Type'), model.Type), ('status', N_('Status'), model.Status), ('priority', N_('Priority'), model.Priority), ('milestone', N_('Milestone'), model.Milestone), ('component', N_('Component'), model.Component), ('version', N_('Version'), model.Version), ('severity', N_('Severity'), model.Severity), ('resolution', N_('Resolution'), model.Resolution)] for name, label, cls in selects: options = [val.name for val in cls.select(self.env)] if not options: # Fields without possible values are treated as if they didn't # exist continue field = { 'name': name, 'type': 'select', 'label': label, 'value': getattr(self, 'default_' + name, ''), 'options': options } if name in ('status', 'resolution'): field['type'] = 'radio' field['optional'] = True elif name in self.allowed_empty_fields: field['optional'] = True fields.append(field) # Advanced text fields fields.append({ 'name': 'keywords', 'type': 'text', 'format': 'list', 'label': N_('Keywords') }) fields.append({ 'name': 'cc', 'type': 'text', 'format': 'list', 'label': N_('Cc') }) # Date/time fields fields.append({ 'name': 'time', 'type': 'time', 'format': 'relative', 'label': N_('Created') }) fields.append({ 'name': 'changetime', 'type': 'time', 'format': 'relative', 'label': N_('Modified') }) for field in self.custom_fields: if field['name'] in [f['name'] for f in fields]: self.log.warning('Duplicate field name "%s" (ignoring)', field['name']) continue fields.append(field) return fields reserved_field_names = [ 'report', 'order', 'desc', 'group', 'groupdesc', 'col', 'row', 'format', 'max', 'page', 'verbose', 'comment', 'or', 'id', 'time', 'changetime', 'owner', 'reporter', 'cc', 'summary', 'description', 'keywords' ] def get_custom_fields(self): return copy.deepcopy(self.custom_fields) @cached def custom_fields(self): """Return the list of custom ticket fields available for tickets.""" fields = TicketFieldList() config = self.ticket_custom_section for name in [ option for option, value in config.options() if '.' not in option ]: field = { 'name': name, 'custom': True, 'type': config.get(name), 'order': config.getint(name + '.order', 0), 'label': config.get(name + '.label') or name.replace("_", " ").strip().capitalize(), 'value': config.get(name + '.value', '') } if field['type'] == 'select' or field['type'] == 'radio': field['options'] = config.getlist(name + '.options', sep='|') if not field['options']: continue if '' in field['options'] or \ field['name'] in self.allowed_empty_fields: field['optional'] = True if '' in field['options']: field['options'].remove('') elif field['type'] == 'checkbox': field['value'] = '1' if as_bool(field['value']) else '0' elif field['type'] == 'text': field['format'] = config.get(name + '.format', 'plain') field['max_size'] = config.getint(name + '.max_size', 0) elif field['type'] == 'textarea': field['format'] = config.get(name + '.format', 'plain') field['max_size'] = config.getint(name + '.max_size', 0) field['height'] = config.getint(name + '.rows') elif field['type'] == 'time': field['format'] = config.get(name + '.format', 'datetime') if field['name'] in self.reserved_field_names: self.log.warning( 'Field name "%s" is a reserved name ' '(ignoring)', field['name']) continue if not re.match('^[a-zA-Z][a-zA-Z0-9_]+$', field['name']): self.log.warning( 'Invalid name for custom field: "%s" ' '(ignoring)', field['name']) continue fields.append(field) fields.sort(key=lambda f: (f['order'], f['name'])) return fields def get_field_synonyms(self): """Return a mapping from field name synonyms to field names. The synonyms are supposed to be more intuitive for custom queries.""" # i18n TODO - translated keys return {'created': 'time', 'modified': 'changetime'} def eventually_restrict_owner(self, field, ticket=None): """Restrict given owner field to be a list of users having the TICKET_MODIFY permission (for the given ticket) """ if self.restrict_owner: field['type'] = 'select' field['options'] = self.get_allowed_owners(ticket) field['optional'] = True def get_allowed_owners(self, ticket=None): """Returns a list of permitted ticket owners (those possessing the TICKET_MODIFY permission). Returns `None` if the option `[ticket]` `restrict_owner` is `False`. If `ticket` is not `None`, fine-grained permission checks are used to determine the allowed owners for the specified resource. :since: 1.0.3 """ if self.restrict_owner: allowed_owners = [] for user in PermissionSystem(self.env) \ .get_users_with_permission('TICKET_MODIFY'): if not ticket or \ 'TICKET_MODIFY' in PermissionCache(self.env, user, ticket.resource): allowed_owners.append(user) allowed_owners.sort() return allowed_owners # ITicketManipulator methods def prepare_ticket(self, req, ticket, fields, actions): pass def validate_ticket(self, req, ticket): # Validate select fields for known values. for field in ticket.fields: if 'options' not in field: continue name = field['name'] if name == 'status': continue if name in ticket and name in ticket._old: value = ticket[name] if value: if value not in field['options']: yield name, _('"%(value)s" is not a valid value', value=value) elif not field.get('optional', False): yield name, _("field cannot be empty") # Validate description length. if len(ticket['description'] or '') > self.max_description_size: yield 'description', _( "Must be less than or equal to %(num)s " "characters", num=self.max_description_size) # Validate summary length. if not ticket['summary']: yield 'summary', _("Tickets must contain a summary.") elif len(ticket['summary'] or '') > self.max_summary_size: yield 'summary', _( "Must be less than or equal to %(num)s " "characters", num=self.max_summary_size) # Validate custom field length. for field in ticket.custom_fields: field_attrs = ticket.fields.by_name(field) max_size = field_attrs.get('max_size', 0) if 0 < max_size < len(ticket[field] or ''): label = field_attrs.get('label') yield label or field, _( "Must be less than or equal to " "%(num)s characters", num=max_size) # Validate time field content. for field in ticket.time_fields: value = ticket[field] if field in ticket.custom_fields and \ field in ticket._old and \ not isinstance(value, datetime): field_attrs = ticket.fields.by_name(field) format = field_attrs.get('format') try: ticket[field] = user_time(req, parse_date, value, hint=format) \ if value else None except TracError as e: # Degrade TracError to warning. ticket[field] = value label = field_attrs.get('label') yield label or field, to_unicode(e) def validate_comment(self, req, comment): # Validate comment length if len(comment or '') > self.max_comment_size: yield _("Must be less than or equal to %(num)s characters", num=self.max_comment_size) # IPermissionRequestor methods def get_permission_actions(self): return [ 'TICKET_APPEND', 'TICKET_CREATE', 'TICKET_CHGPROP', 'TICKET_VIEW', 'TICKET_EDIT_CC', 'TICKET_EDIT_DESCRIPTION', 'TICKET_EDIT_COMMENT', ('TICKET_MODIFY', ['TICKET_APPEND', 'TICKET_CHGPROP']), ('TICKET_ADMIN', [ 'TICKET_CREATE', 'TICKET_MODIFY', 'TICKET_VIEW', 'TICKET_EDIT_CC', 'TICKET_EDIT_DESCRIPTION', 'TICKET_EDIT_COMMENT' ]) ] # IWikiSyntaxProvider methods def get_link_resolvers(self): return [('bug', self._format_link), ('issue', self._format_link), ('ticket', self._format_link), ('comment', self._format_comment_link)] def get_wiki_syntax(self): yield ( # matches #... but not &#... (HTML entity) r"!?(?<!&)#" # optional intertrac shorthand #T... + digits r"(?P<it_ticket>%s)%s" % (WikiParser.INTERTRAC_SCHEME, Ranges.RE_STR), lambda x, y, z: self._format_link(x, 'ticket', y[1:], y, z)) def _format_link(self, formatter, ns, target, label, fullmatch=None): intertrac = formatter.shorthand_intertrac_helper( ns, target, label, fullmatch) if intertrac: return intertrac try: link, params, fragment = formatter.split_link(target) r = Ranges(link) if len(r) == 1: num = r.a ticket = formatter.resource(self.realm, num) from trac.ticket.model import Ticket if Ticket.id_is_valid(num) and \ 'TICKET_VIEW' in formatter.perm(ticket): # TODO: attempt to retrieve ticket view directly, # something like: t = Ticket.view(num) for type, summary, status, resolution in \ self.env.db_query(""" SELECT type, summary, status, resolution FROM ticket WHERE id=%s """, (str(num),)): description = self.format_summary( summary, status, resolution, type) title = '#%s: %s' % (num, description) href = formatter.href.ticket(num) + params + fragment return tag.a(label, title=title, href=href, class_='%s ticket' % status) else: ranges = str(r) if params: params = '&' + params[1:] label_wrap = label.replace(',', u',\u200b') ranges_wrap = ranges.replace(',', u', ') return tag.a(label_wrap, title=_("Tickets %(ranges)s", ranges=ranges_wrap), href=formatter.href.query(id=ranges) + params) except ValueError: pass return tag.a(label, class_='missing ticket') def _format_comment_link(self, formatter, ns, target, label): resource = None if ':' in target: elts = target.split(':') if len(elts) == 3: cnum, realm, id = elts if cnum != 'description' and cnum and not cnum[0].isdigit(): realm, id, cnum = elts # support old comment: style id = as_int(id, None) if realm in ('bug', 'issue'): realm = 'ticket' resource = formatter.resource(realm, id) else: resource = formatter.resource cnum = target if resource and resource.id and resource.realm == self.realm and \ cnum and (cnum.isdigit() or cnum == 'description'): href = title = class_ = None if self.resource_exists(resource): from trac.ticket.model import Ticket ticket = Ticket(self.env, resource.id) if cnum != 'description' and not ticket.get_change(cnum): title = _("ticket comment does not exist") class_ = 'missing ticket' elif 'TICKET_VIEW' in formatter.perm(resource): href = formatter.href.ticket(resource.id) + \ "#comment:%s" % cnum if resource.id != formatter.resource.id: summary = self.format_summary(ticket['summary'], ticket['status'], ticket['resolution'], ticket['type']) if cnum == 'description': title = _("Description for #%(id)s: %(summary)s", id=resource.id, summary=summary) else: title = _( "Comment %(cnum)s for #%(id)s: " "%(summary)s", cnum=cnum, id=resource.id, summary=summary) class_ = ticket['status'] + ' ticket' else: title = _("Description") if cnum == 'description' \ else _("Comment %(cnum)s", cnum=cnum) class_ = 'ticket' else: title = _("no permission to view ticket") class_ = 'forbidden ticket' else: title = _("ticket does not exist") class_ = 'missing ticket' return tag.a(label, class_=class_, href=href, title=title) return label # IResourceManager methods def get_resource_realms(self): yield self.realm def get_resource_description(self, resource, format=None, context=None, **kwargs): if format == 'compact': return '#%s' % resource.id elif format == 'summary': from trac.ticket.model import Ticket ticket = Ticket(self.env, resource.id) args = [ ticket[f] for f in ('summary', 'status', 'resolution', 'type') ] return self.format_summary(*args) return _("Ticket #%(shortname)s", shortname=resource.id) def format_summary(self, summary, status=None, resolution=None, type=None): summary = shorten_line(summary) if type: summary = type + ': ' + summary if status: if status == 'closed' and resolution: status += ': ' + resolution return "%s (%s)" % (summary, status) else: return summary def resource_exists(self, resource): """ >>> from trac.test import EnvironmentStub >>> from trac.resource import Resource, resource_exists >>> env = EnvironmentStub() >>> resource_exists(env, Resource('ticket', 123456)) False >>> from trac.ticket.model import Ticket >>> t = Ticket(env) >>> int(t.insert()) 1 >>> resource_exists(env, t.resource) True """ try: id_ = int(resource.id) except (TypeError, ValueError): return False if self.env.db_query("SELECT id FROM ticket WHERE id=%s", (id_, )): if resource.version is None: return True revcount = self.env.db_query( """ SELECT count(DISTINCT time) FROM ticket_change WHERE ticket=%s """, (id_, )) return revcount[0][0] >= resource.version else: return False
class SilverCityRenderer(Component): """Syntax highlighting based on SilverCity.""" implements(ISystemInfoProvider, IHTMLPreviewRenderer) silvercity_modes = ListOption('mimeviewer', 'silvercity_modes', '', doc= """List of additional MIME types known by SilverCity. For each, a tuple `mimetype:mode:quality` has to be specified, where `mimetype` is the MIME type, `mode` is the corresponding SilverCity mode to be used for the conversion and `quality` is the quality ratio associated to this conversion. That can also be used to override the default quality ratio used by the SilverCity render, which is 3 (''since 0.10'').""") expand_tabs = True returns_source = True def __init__(self): self._types = None # ISystemInfoProvider methods def get_system_info(self): if have_silvercity: yield 'SilverCity', get_pkginfo(SilverCity).get('version', '?') # TODO: the above works only if setuptools was used to build # SilverCity, which is not yet the case by default for 0.9.7. # I've not been able to find an alternative way to get version. # IHTMLPreviewRenderer methods def get_quality_ratio(self, mimetype): # Extend default MIME type to mode mappings with configured ones if not have_silvercity: return 0 if not self._types: self._types = {} self._types.update(types) self._types.update( Mimeview(self.env).configured_modes_mapping('silvercity')) return self._types.get(mimetype, (None, 0))[1] def render(self, context, mimetype, content, filename=None, rev=None): try: mimetype = mimetype.split(';', 1)[0] typelang = self._types[mimetype] lang = typelang[0] module = getattr(SilverCity, lang) generator = getattr(module, lang + "HTMLGenerator") try: allprops = typelang[2] propset = SilverCity.PropertySet() for p in allprops.keys(): propset[p] = allprops[p] except IndexError: pass except (KeyError, AttributeError): err = "No SilverCity lexer found for mime-type '%s'." % mimetype raise Exception, err # SilverCity does not like unicode strings content = content.encode('utf-8') # SilverCity generates extra empty line against some types of # the line such as comment or #include with CRLF. So we # standardize to LF end-of-line style before call. content = CRLF_RE.sub('', content) buf = StringIO() generator().generate_html(buf, content) br_re = re.compile(r'<br\s*/?>$', re.MULTILINE) span_default_re = re.compile(r'<span class="\w+_default">(.*?)</span>', re.DOTALL) html = span_default_re.sub(r'\1', br_re.sub('', buf.getvalue())) # Convert the output back to a unicode string html = html.decode('utf-8') # SilverCity generates _way_ too many non-breaking spaces... # We don't need them anyway, so replace them by normal spaces return [Markup(line) for line in html.replace(' ', ' ').splitlines()]
class TasklistPlugin(QueryModule): implements(ITemplateProvider, ITemplateStreamFilter) ticket_manipulators = ExtensionPoint(ITicketManipulator) field_name = Option('tasklist', 'tasklist_field', default='action_item') default_query = Option( 'tasklist', 'default_query', default='status!=closed&owner=$USER', doc='The default tasklist query for authenticated users.') default_anonymous_query = Option( 'tasklist', 'default_anonymous_query', default='status!=closed&cc~=$USER', doc='The default tasklist query for anonymous users.') default_cols = ListOption( 'tasklist', 'default_cols', default=['id', 'summary', 'priority'], doc='The default list of columns to show in the tasklist.') # INavigationContributor methods def get_active_navigation_item(self, req): return 'tasklist' def get_navigation_items(self, req): if req.perm.has_permission('TICKET_VIEW'): yield ('mainnav', 'tasklist', tag.a('Tasklist', href=req.href.tasklist())) # IRequestHandler methods def match_request(self, req): return req.path_info.startswith('/tasklist') def process_request(self, req): req.perm.assert_permission('TICKET_VIEW') if not self.env.config.has_option('ticket-custom', self.field_name): raise TracError( 'Configuration error: the custom ticket field "%s" has not been defined ' 'in the [ticket-custom] section of trac.ini. See the documentation ' 'for more info on configuring the TaskListPlugin.' % self.field_name) constraints = self._get_constraints(req) if not constraints and not 'order' in req.args: # If no constraints are given in the URL, use the default ones. if req.authname and req.authname != 'anonymous': qstring = self.default_query user = req.authname else: email = req.session.get('email') name = req.session.get('name') qstring = self.default_anonymous_query user = email or name or None if user: qstring = qstring.replace('$USER', user) self.log.debug('TasklistPlugin: Using default query: %s', qstring) constraints = Query.from_string(self.env, qstring).constraints # Ensure no field constraints that depend on $USER are used # if we have no username. for field, vals in constraints.items(): for val in vals: if val.endswith('$USER'): del constraints[field] cols = req.args.get('col') if not cols: cols = self.default_cols cols.append(self.field_name) if isinstance(cols, basestring): cols = [cols] form_cols = copy.copy(cols) # Since we don't show 'id' or the tasklist_field as an option # to the user, we need to re-insert it here. if cols and 'id' not in cols: cols.insert(0, 'id') if cols and self.field_name not in cols: cols.insert(0, self.field_name) rows = req.args.get('row', []) if isinstance(rows, basestring): rows = [rows] q = Query(self.env, constraints=constraints, cols=cols) query = Query(self.env, req.args.get('report'), constraints, cols, req.args.get('order'), 'desc' in req.args, req.args.get('group'), 'groupdesc' in req.args, 'verbose' in req.args, rows, req.args.get('limit')) if 'update' in req.args: # Reset session vars for var in ('query_constraints', 'query_time', 'query_tickets'): if var in req.session: del req.session[var] req.redirect(q.get_href(req.href).replace('/query', '/tasklist')) if 'add' in req.args: req.perm.require('TICKET_CREATE') t = Ticket(self.env) if req.method == 'POST' and 'field_owner' in req.args and \ 'TICKET_MODIFY' not in req.perm: del req.args['field_owner'] self._populate(req, t) reporter_id = req.args.get('field_reporter') or \ get_reporter_id(req, 'author') t.values['reporter'] = reporter_id valid = None valid = self._validate_ticket(req, t) if valid: t.insert() # Notify try: tn = TicketNotifyEmail(self.env) tn.notify(t, newticket=True) except Exception, e: self.log.exception( "Failure sending notification on creation of " "ticket #%s: %s" % (t.id, e)) req.redirect(q.get_href(req.href).replace('/query', '/tasklist')) template, data, mime_type = self.display_html(req, query) # We overlap the query session href var so that if a ticket is # entered from the tasklist the "Back to Query" link will # come back to the tasklist instead of the query module. query_href = req.session['query_href'] req.session['query_href'] = query_href.replace('/query', '/tasklist') data['title'] = 'Task List' data['all_columns'].remove(self.field_name) #_pprint(data['tickets']) for ticket in data['tickets']: summary = ticket['summary'] action = ticket[self.field_name] ticket['title'] = summary ticket['summary'] = action != '--' and action or summary continue for i, header in enumerate(data['headers']): header['href'] = header['href'].replace('/query', '/tasklist') if header['name'] == self.field_name: del_index = i continue del data['headers'][del_index] data['ticket_fields'] = self._get_ticket_fields(data) add_stylesheet(req, 'tasklist/css/tasklist.css') return 'tasklist.html', data, mime_type
class MenuManagerModule(Component): implements(IRequestFilter, ITemplateProvider) managed_menus = ListOption( 'menu-custom', 'managed_menus', 'mainnav,metanav', doc="""List of menus to be controlled by the Menu Manager""") serve_ui_files = BoolOption('menu-custom', 'serve_ui_files', True) # ITemplateProvider methods def get_templates_dirs(self): return [] def get_htdocs_dirs(self): from pkg_resources import resource_filename return [('tracmenus', resource_filename(__name__, 'htdocs'))] # IRequestFilter methods def pre_process_request(self, req, handler): return handler def post_process_request(self, req, template, data, content_type): if 'nav_orig' in req.chrome: return template, data, content_type req.chrome['nav_orig'] = req.chrome['nav'].copy() if 'ctxtnav' in self.managed_menus and 'ctxtnav' in req.chrome: req.chrome['nav_orig']['ctxtnav'] = [ dict(name='ctxtnav_' + str(idx), label=ctx_label) for idx, ctx_label in enumerate(req.chrome['ctxtnav']) ] for menu_name in self.managed_menus: req.chrome['nav'][menu_name] = self._get_menu( req, menu_name, req.chrome['nav_orig']) if menu_name == 'ctxtnav': req.chrome['ctxtnav'] = [ ctxt_item.get('label') for ctxt_item in req.chrome['nav'][menu_name] ] if self.serve_ui_files: add_script(req, 'tracmenus/js/superfish.js') add_script(req, 'tracmenus/js/tracmenus.js') add_script(req, 'tracmenus/js/jquery.hoverIntent.minified.js') add_stylesheet(req, 'tracmenus/css/tracmenus.css') return template, data, content_type def _get_menu(self, req, menu_name, nav_orig): config_menu, config_options = self._get_config_menus(req, menu_name) menu_orig = nav_orig.get(menu_name, []) hide_if_no_children = [] menu_result = [] if 'inherit' in config_options: menu_orig += nav_orig.get(config_options['inherit'], []) tree_menu = {} for option in sorted( menu_orig + [{ 'name': key } for key in config_menu.keys()], key=lambda x: int( config_menu.get(x['name'], {}).get('order', 999))): name = option['name'] if 'visited' in tree_menu.get(name, []) \ or (config_menu.get(name, {}).get('enabled', True)==False and not 'active' in option)\ or config_menu.get(name, {}).get('if_path_info', True)==False \ or False in [req.perm.has_permission(perm) for perm in config_menu.get(name, {}).get('perm', [])]: continue tree_node = tree_menu.setdefault(name, {}) tree_node.update(option.copy()) if 'label' in option and 'label' in config_menu.get(name, []): del config_menu[name]['label'] tree_node.update( config_menu.get(name, {'parent_name': 'unassigned'})) if tree_node.get('hide_if_no_children'): hide_if_no_children.append(tree_node) tree_node['label'] = html( tree_node.setdefault('label', html.a(name))) tree_node['visited'] = True if tree_node.get('href'): tree_node_href = urlsplit(tree_node['href']) tree_node.setdefault( 'active', tree_node_href[2] == req.path_info and tree_node_href[3] in req.environ['QUERY_STRING']) if '_tmp_children' in tree_node: tree_node['children'] = html.ul() tree_node['label'].append(tree_node['children']) tree_node['children'].children.extend( tree_node['_tmp_children']) del tree_node['_tmp_children'] if (tree_node['parent_name']=='unassigned' and not 'unassigned' in config_menu) \ or tree_node['parent_name']=='top': menu_result.append(tree_node) continue tree_node['parent'] = tree_menu.setdefault( tree_node['parent_name'], {}) child_node = html.li( class_=tree_node.get('active') == True and 'active' or None) tree_node['outter_html'] = child_node child_node.children = [tree_node['label']] if 'label' in tree_node['parent']: if not 'children' in tree_node['parent']: tree_node['parent']['children'] = html.ul() tree_node['parent']['label'].append( tree_node['parent']['children']) tree_node['parent']['children'].append(child_node) else: tree_node['parent'].setdefault('_tmp_children', []).append(child_node) for hide_node in hide_if_no_children: if not hide_node.get('children'): if hide_node['parent_name'] == 'top': pos = menu_result.index(hide_node) del menu_result[pos] else: pos = hide_node['parent']['children'].children.index( hide_node['outter_html']) del hide_node['parent']['children'].children[pos] return menu_result def _get_config_menus(self, req, menu_name): new_menu_option = lambda name: dict( name=name, href='#', enabled=False, parent_name='top') menu, options = {}, {} for option, value in self.config[menu_name].options(): item_parts = option.split('.', 1) name, prop_name = item_parts[ 0], len(item_parts) > 1 and item_parts[1] or 'enabled' if name == 'inherit': options[name] = value continue menu.setdefault(name, new_menu_option(name)) if prop_name == 'parent': menu[name]['parent_name'] = value elif prop_name == 'enabled': menu[name][prop_name] = self.config[menu_name].getbool( option, True) elif prop_name == 'href': value = value.replace('$PATH_INFO', req.path_info) href = value.startswith('/') and (req.href().rstrip('/') + value) or value menu[name]['label'] = menu[name].setdefault( 'label', html.a())(href=href) menu[name][prop_name] = value elif prop_name == 'label': menu[name].setdefault('label', html.a(href='#'))(value) elif prop_name == 'path_info': menu[name]['if_path_info'] = re.match( value, req.path_info) and True or False elif prop_name == 'enabled': menu[name][prop_name] = self.config[menu_name].getbool( option, False) elif prop_name == 'hide_if_no_children': menu[name][prop_name] = self.config[menu_name].getbool( option, False) elif prop_name == 'perm': menu[name][prop_name] = self.config[menu_name].getlist( option, default=[], sep=',') else: menu[name][prop_name] = value # Perform checks for invalid configuration for name in menu: # There won't be an href if there isn't a label if 'label' not in menu[name]: menu[name]['enabled'] = False return menu, options
class SQLiteConnector(Component): """Database connector for SQLite. Database URLs should be of the form: {{{ sqlite:path/to/trac.db }}} """ implements(IDatabaseConnector) required = False extensions = ListOption('sqlite', 'extensions', doc="""Paths to [https://sqlite.org/loadext.html sqlite extensions]. The paths may be absolute or relative to the Trac environment. (''since 0.12'') """) memory_cnx = None def __init__(self): self.error = None # IDatabaseConnector methods def get_supported_schemes(self): if sqlite_version < min_sqlite_version: self.error = _("SQLite version is %(version)s. Minimum required " "version is %(min_version)s.", version=sqlite_version_string, min_version='%d.%d.%d' % min_sqlite_version) elif pysqlite_version < min_pysqlite_version: self.error = _("Need at least PySqlite %(version)s or higher", version='%d.%d.%d' % min_pysqlite_version) yield 'sqlite', -1 if self.error else 1 def get_connection(self, path, log=None, params={}): self.required = True params['extensions'] = self._extensions if path == ':memory:': try: self.memory_cnx.cursor() except (AttributeError, sqlite.DatabaseError): # memory_cnx is None or database connection closed. self.memory_cnx = SQLiteConnection(path, log, params) return self.memory_cnx else: return SQLiteConnection(path, log, params) def get_exceptions(self): return sqlite def init_db(self, path, schema=None, log=None, params={}): def insert_schema(cursor, schema): if schema is None: from trac.db_default import schema for table in schema: for stmt in self.to_sql(table): cursor.execute(stmt) if path != ':memory:': # make the directory to hold the database if self.db_exists(path): raise TracError(_("Database already exists at %(path)s", path=path)) dir = os.path.dirname(path) if not os.path.exists(dir): os.makedirs(dir) if isinstance(path, unicode): # needed with 2.4.0 path = path.encode('utf-8') # this direct connect will create the database if needed cnx = sqlite.connect(path, isolation_level=None, timeout=int(params.get('timeout', 10000))) with closing(cnx.cursor()) as cursor: _set_journal_mode(cursor, params.get('journal_mode')) _set_synchronous(cursor, params.get('synchronous')) insert_schema(cursor, schema) cnx.isolation_level = 'DEFERRED' else: cnx = self.get_connection(path, log, params) with closing(cnx.cursor()) as cursor: insert_schema(cursor, schema) cnx.commit() def destroy_db(self, path, log=None, params={}): if path != ':memory:': if not os.path.isabs(path): path = os.path.join(self.env.path, path) try: os.remove(path) except OSError as e: if e.errno != errno.ENOENT: raise def db_exists(self, path, log=None, params={}): return os.path.exists(path) def to_sql(self, table): return _to_sql(table) def alter_column_types(self, table, columns): """Yield SQL statements altering the type of one or more columns of a table. Type changes are specified as a `columns` dict mapping column names to `(from, to)` SQL type tuples. """ for name, (from_, to) in sorted(columns.iteritems()): if _type_map.get(to, to) != _type_map.get(from_, from_): raise NotImplementedError("Conversion from %s to %s is not " "implemented" % (from_, to)) return () def backup(self, dest_file): """Simple SQLite-specific backup of the database. @param dest_file: Destination file basename """ import shutil db_str = self.config.get('trac', 'database') try: db_str = db_str[:db_str.index('?')] except ValueError: pass db_name = os.path.join(self.env.path, db_str[7:]) shutil.copy(db_name, dest_file) if not os.path.exists(dest_file): raise TracError(_("No destination file created")) return dest_file def get_system_info(self): yield 'SQLite', sqlite_version_string yield 'pysqlite', pysqlite_version_string @lazy def _extensions(self): _extensions = [] for extpath in self.extensions: if not os.path.isabs(extpath): extpath = os.path.join(self.env.path, extpath) _extensions.append(extpath) return _extensions
class CloudModule(Component): """Orchestrates AWS cloud resources via Chef. Leans heavily on boto and PyChef and borrowed much from the built-in report component.""" implements(ITemplateProvider, INavigationContributor, IPermissionRequestor, IRequestFilter, IRequestHandler) # trac.ini options nav_label = Option('cloud', 'nav_label', _('Cloud'), _("Top nav label.")) aws_key = Option('cloud', 'aws_key', '', _("AWS/S3 access key.")) aws_secret = Option('cloud', 'aws_secret', '', _("AWS/S3 secret.")) aws_keypair = Option('cloud', 'aws_keypair', '', _("AWS/S3 keypair name.")) aws_keypair_pem = Option('cloud', 'aws_keypair_pem', '', _("AWS/EC2 keypair file path.")) aws_username = Option('cloud', 'aws_username', 'ubuntu', _("AWS/EC2 ssh username.")) aws_security_groups = Option( 'cloud', 'aws_security_groups', 'default', _("AWS/EC2 security groups comma-separated list of strings.")) rds_username = Option('cloud', 'rds_username', '', _("AWS/RDS master username.")) rds_password = Option('cloud', 'rds_password', '', _("AWS/RDS master username.")) chef_base_path = Option('cloud', 'chef_base_path', '', _("Directory where .chef configs can be found.")) chef_boot_run_list = ListOption( 'cloud', 'chef_boot_run_list', [], _("If set, used instead of the role(s) to bootstrap instances.")) chef_boot_sudo = BoolOption( 'cloud', 'chef_boot_sudo', True, _("Whether the chef knife bootstrap should be run as sudo.")) chef_boot_version = Option('cloud', 'chef_boot_version', '', _("Version of chef-client to install.")) jabber_server = Option('cloud', 'jabber_server', '', _("Jabber server.")) jabber_port = Option('cloud', 'jabber_port', '', _("Jabber port.")) jabber_username = Option('cloud', 'jabber_username', '', _("Jabber username.")) jabber_password = Option('cloud', 'jabber_password', '', _("Jabber password.")) jabber_channel = Option('cloud', 'jabber_channel', '', _("Jabber channel.")) default_resource = Option( 'cloud', 'default_resource', '', _("Name of the AWS resource to show if not provided in url.")) items_per_page = IntOption( 'cloud', 'items_per_page', 100, _("Number of items displayed per page in cloud reports by default")) items_per_page_rss = IntOption( 'cloud', 'items_per_page_rss', 0, _("Number of items displayed in the rss feeds for cloud reports")) # NOTE: Each droplet's [cloud.*] config is retrieved during its init. field_handlers = ExtensionPoint(IFieldHandler) # ITemplateProvider methods def get_htdocs_dirs(self): from pkg_resources import resource_filename #@UnresolvedImport return [('cloud', resource_filename(__name__, 'htdocs'))] def get_templates_dirs(self): from pkg_resources import resource_filename #@UnresolvedImport return [resource_filename(__name__, 'templates')] # INavigationContributor methods def get_active_navigation_item(self, req): return 'cloud' def get_navigation_items(self, req): if 'CLOUD_VIEW' in req.perm: yield ('mainnav', 'cloud', tag.a(self.nav_label, href=req.href.cloud())) # IPermissionRequestor methods def get_permission_actions(self): actions = [ 'CLOUD_CREATE', 'CLOUD_DELETE', 'CLOUD_MODIFY', 'CLOUD_VIEW' ] return actions + [('CLOUD_ADMIN', actions)] # IRequestFilter methods def pre_process_request(self, req, handler): return handler def post_process_request(self, req, template, data, content_type): if req.path_info.startswith('/cloud'): add_script(req, 'cloud/droplet.js') return template, data, content_type # IRequestHandler methods def match_request(self, req): match = re.match(r'/cloud(?:/([^/]+)(?:/([\w.\-]+))?)?$', req.path_info) if match: if match.group(1): req.args['droplet_name'] = match.group(1) if match.group(2): req.args['id'] = match.group(2) return True def process_request(self, req): req.perm.require('CLOUD_VIEW') # setup cloud droplets if not hasattr(self, 'droplets'): # setup chefapi and cloudapi chefapi = Chef(self.chef_base_path, self.aws_keypair_pem, self.aws_username, self.chef_boot_run_list, self.chef_boot_sudo, self.chef_boot_version, self.log) cloudapi = Aws(self.aws_key, self.aws_secret, self.aws_keypair, self.aws_security_groups, self.rds_username, self.rds_password, self.log) # instantiate each droplet (singletons) self.droplets = {} self.titles = Droplet.titles(self.env) for _order, droplet_name, _title in self.titles: self.droplets[droplet_name] = Droplet.new( self.env, droplet_name, chefapi, cloudapi, self.field_handlers, self.log) # ensure at least one droplet exists if not self.droplets: raise ResourceNotFound(_("No cloud resources found in trac.ini."), _('Missing Cloud Resource')) droplet_name = req.args.get('droplet_name', '') id = req.args.get('id', '') action = req.args.get('action', 'view') file = req.args.get('file', '') if not droplet_name: droplet_name = self.default_resource if not droplet_name: _order, droplet_name, _title = self.titles[0] req.redirect(req.href.cloud(droplet_name)) # check for valid kind if droplet_name not in self.droplets: raise ResourceNotFound( _("Cloud resource '%(droplet_name)s' does not exist.", droplet_name=droplet_name), _('Invalid Cloud Resource')) # retrieve the droplet droplet = self.droplets[droplet_name] # route the request if req.method == 'POST': if 'cancel' in req.args: req.redirect(req.href.cloud(droplet_name, id)) elif action == 'new': droplet.create(req) elif action == 'delete': droplet.delete(req, id) elif action == 'edit': droplet.save(req, id) elif action == 'audit' or 'audit' in req.args: droplet.audit(req, id) elif action == 'execute' or 'execute' in req.args: droplet.execute(req, id) else: # req.method == 'GET': if action in ('edit', 'new'): template, data, content_type = droplet.render_edit(req, id) Chrome(self.env).add_wiki_toolbars(req) elif action == 'delete': template, data, content_type = droplet.render_delete(req, id) elif action == 'progress': template, data, content_type = droplet.render_progress( req, file) elif id == '': template, data, content_type = droplet.render_grid(req) if content_type: # i.e. alternate format return template, data, content_type else: template, data, content_type = droplet.render_view(req, id) if content_type: # i.e. alternate format return template, data, content_type # add contextual nav for _order, droplet_name, title in self.titles: add_ctxtnav(req, title, href=req.href.cloud(droplet_name)) add_stylesheet(req, 'common/css/report.css') # reuse css return template, data, None
class IrcLogsView(Component): implements(INavigationContributor, ITemplateProvider, IRequestHandler, \ IPermissionRequestor) _url_re = re.compile( r'^/irclogs(/(?P<year>\d{4})(/(?P<month>\d{2})' r'(/(?P<day>\d{2}))?)?)?(/(?P<feed>feed)(/(?P<feed_count>\d+?))?)?/?$') # TODO: make the line format somewhat configurable # Uncomment the following line if using a pipe as a divider and a space # between the date adn time. Make sure to comment out the existing # _line_re. # _line_re = re.compile('%s %s \| (%s)$' % ( _line_re = re.compile('%sT%s (%s)$' % (r'(?P<date>\d{4}-\d{2}-\d{2})', r'(?P<time>\d{2}:\d{2}:\d{2})', '|'.join([ r'(<(?P<c_nickname>.*?)> (?P<c_text>.*?))', r'(\* (?P<a_nickname>.*?) (?P<a_text>.*?))', r'(\*\*\* (?P<s_nickname>.*?) (?P<s_text>.*?))' ]))) charset = Option('irclogs', 'charset', 'utf-8', doc='Channel charset') file_format = Option('irclogs', 'file_format', '#channel.%Y-%m-%d.log', doc='Format of a logfile for a given day. Must ' 'include %Y, %m and %d. Example: ' '#channel.%Y-%m-%d.log') path = Option('irclogs', 'path', '', doc='The path where the irc logfiles are') navbutton = Option( 'irclogs', 'navigation_button', '', doc="""If not empty an button with this value as caption is added to the navigation bar, pointing to the irc plugin""") prefix = Option('irclogs', 'prefix', '', doc='IRC Channel name') search_db_path = Option( 'irclogs', 'search_db_path', '/tmp/irclogs.idx', doc="""A path to the directory where the search index resides. Example: /tmp/irclogs.idx""") hidden_users = ListOption( 'irclogs', 'hidden_users', '', doc='A list of users that should be hidden by default') # ITemplateProvider methods def get_templates_dirs(self): from pkg_resources import resource_filename return [resource_filename(__name__, 'templates')] def get_htdocs_dirs(self): from pkg_resources import resource_filename return [('irclogs', resource_filename(__name__, 'htdocs'))] # INavigationContributor methods def get_active_navigation_item(self, req): if self.navbutton.strip(): return 'irclogs' def get_navigation_items(self, req): if req.perm.has_permission('IRCLOGS_VIEW'): title = self.navbutton.strip() if title: yield 'mainnav', 'irclogs', html.a(title, href=req.href.irclogs()) # IPermissionHandler methods def get_permission_actions(self): return ['IRCLOGS_VIEW'] # IRequestHandler methods def match_request(self, req): m = self._url_re.search(req.path_info) if m is None: return False req.args.update(m.groupdict()) return True def _to_unicode(self, iterable): for line in iterable: yield to_unicode(line, self.charset) def _get_file_re(self): return re.compile(r'^%s$' % re.escape(self.file_format).replace( '\\%Y', '(?P<year>\d{4})').replace( '\\%m', '(?P<month>\d{2})').replace('\\%d', '(?P<day>\d{2})')) def _get_filename(self, year, month, day): return os.path.join( self.path, self.file_format.replace('%Y', str(year)).replace( '%m', str(month)).replace('%d', str(day))) def _render_lines(self, iterable, tz=None): dummy = lambda: {} result = [] for line in iterable: d = getattr(self._line_re.search(line), 'groupdict', dummy)() for mode in ('channel', 'action', 'server'): prefix = mode[0] text = d.get('%s_text' % prefix) if not text is None: nick = d['%s_nickname' % prefix] break else: continue if nick in self.hidden_users: hidden = "hidden_user" else: hidden = "" if not tz is None: utc = pytz.utc server_dt = self._get_tz_datetime(d['date'], d['time']) local_dt = tz.normalize(server_dt.astimezone(tz)) local_time = local_dt.strftime("%H:%M:%S") local_date = local_dt.strftime("%Y-%m-%d") utc_dt = utc.normalize(server_dt.astimezone(utc)). \ strftime("UTC%Y-%m-%dT%H:%M:%S") else: local_date = d['date'] local_time = d['time'] utc_dt = d['time'] result.append({ 'date': local_date, 'hidden_user': hidden, 'time': local_time, 'utc_dt': utc_dt, 'mode': mode, 'text': text, 'nickname': nick, 'nickcls': 'nick-%d' % (sum(ord(c) for c in nick) % 8), }) return result def _generate_calendar(self, req, entries): if not req.args['year'] is None: year = int(req.args['year']) else: year = datetime.now().year if not req.args['month'] is None: month = int(req.args['month']) else: month = datetime.now().month if not req.args['day'] is None: today = int(req.args['day']) else: today = -1 this_month_entries = entries.get(year, {}).get(month, {}) weeks = [] for week in calendar.monthcalendar(year, month): w = [] for day in week: if not day: w.append({'empty': True}) else: w.append({ 'caption': day, 'href': req.href('irclogs', year, '%02d' % month, '%02d' % day), 'today': day == today, 'has_log': day in this_month_entries }) weeks.append(w) next_month_year = year next_month = int(month) + 1 if next_month > 12: next_month_year += 1 next_month = 1 if today > -1: next_month_href = req.href('irclogs', next_month_year, '%02d' % next_month, '%02d' % today) else: next_month_href = req.href('irclogs', next_month_year, '%02d' % next_month) prev_month_year = year prev_month = int(month) - 1 if prev_month < 1: prev_month_year -= 1 prev_month = 12 if today > -1: prev_month_href = req.href('irclogs', prev_month_year, '%02d' % prev_month, '%02d' % today) else: prev_month_href = req.href('irclogs', prev_month_year, '%02d' % prev_month) return { 'weeks': weeks, 'year': { 'caption': year, 'href': req.href('irclogs', year) }, 'month': { 'caption': month_name[month], 'href': req.href('irclogs', year, '%02d' % month) }, 'next_year': { 'caption': str(year + 1), 'href': req.href('irclogs', year + 1) }, 'prev_year': { 'caption': str(year - 1), 'href': req.href('irclogs', year - 1) }, 'next_month': { 'caption': '%02d' % next_month, 'href': next_month_href }, 'prev_month': { 'caption': '%02d' % prev_month, 'href': prev_month_href }, } def _get_tz_datetime(self, date, time): return datetime(*strptime(date + "T" + time, "%Y-%m-%dT%H:%M:%S")[0:6]). \ replace(tzinfo=localtz) def process_request(self, req): req.perm.assert_permission('IRCLOGS_VIEW') add_stylesheet(req, 'irclogs/style.css') add_stylesheet(req, 'irclogs/datePicker.css') add_script(req, 'irclogs/date.js') add_script(req, 'irclogs/jquery.datePicker.js') add_script(req, 'irclogs/irclogs.js') file_re = self._get_file_re() context = {} entries = {} context['cal'] = self._generate_calendar(req, entries) # list all log files to know what dates are available try: files = os.listdir(self.path) except OSError, e: code, message = e context['error'] = True context['message'] = '%s: %s' % (message, e.filename) return 'irclogs.html', context, None if len(files) == 0: context['error'] = True context['message'] = 'No logs exist yet. ' \ 'Contact your system administrator.' return 'irclogs.html', context, None files.sort() first_found = True for fn in files: m = file_re.search(fn) if m is None: continue d = m.groupdict() y = entries.setdefault(int(d['year']), {}) m = y.setdefault(int(d['month']), {}) m[int(d['day'])] = True if first_found is True: context['start_date'] = '%s/%s/%s' % (d['month'], d['day'], d['year']) first_found = False # default to today if no date is selected # or build lists of available dates if no date is given if req.args['year'] is None: today = datetime.now() req.args['year'] = today.year req.args['month'] = '%02d' % today.month req.args['day'] = '%02d' % today.day elif req.args['month'] is None: months = entries.get(int(req.args['year']), {}).keys() months.sort() context['months'] = [{ 'caption': month_name[m], 'href': req.href('irclogs', req.args['year'], '%02d' % m) } for m in months] context['year'] = req.args['year'] context['viewmode'] = 'months' elif req.args['day'] is None: year = entries.get(int(req.args['year']), {}) days = year.get(int(req.args['month']), {}).keys() days.sort() context['days'] = [{ 'caption': d, 'href': req.href('irclogs', req.args['year'], req.args['month'], '%02d' % d) } for d in days] context['year'] = req.args['year'] context['month'] = month_name[int(req.args['month'])] context['viewmode'] = 'days' # generate calendar according to log files found # if day is given, read logfile and build irc log for display if req.args['day'] is not None: logfile = self._get_filename(req.args['year'], req.args['month'], req.args['day']) context['day'] = req.args['day'] context['month'] = req.args['month'] context['month_name'] = month_name[int(req.args['month'])] context['year'] = req.args['year'] context['viewmode'] = 'day' context['current_date'] = '%s/%s/%s' % ( req.args['month'], req.args['day'], req.args['year']) context['int_month'] = int(req.args['month']) - 1 if not os.path.exists(logfile): context['missing'] = True else: context['missing'] = False f = file(logfile) try: context['lines'] = self._render_lines( self._to_unicode(f), req.tz) finally: f.close() # handle if display type is html or an external feed if req.args['feed'] is not None: if not context['missing']: context['lines'] = context['lines'] \ [:int(req.args.get('feed_count',10))] return 'irclogs_feed.html', context, None else: return 'irclogs.html', context, None
class PygmentsRenderer(Component): """HTML renderer for syntax highlighting based on Pygments.""" implements(ISystemInfoProvider, IHTMLPreviewRenderer, IPreferencePanelProvider, IRequestHandler, ITemplateProvider) is_valid_default_handler = False pygments_lexer_options = ConfigSection( 'pygments-lexer', """Configure Pygments [%(url)s lexer] options. For example, to set the [%(url)s#lexers-for-php-and-related-languages PhpLexer] options `startinline` and `funcnamehighlighting`: {{{#!ini [pygments-lexer] php.startinline = True php.funcnamehighlighting = True }}} The lexer name is derived from the class name, with `Lexer` stripped from the end. The lexer //short names// can also be used in place of the lexer name. """, doc_args={'url': 'http://pygments.org/docs/lexers/'}) default_style = Option( 'mimeviewer', 'pygments_default_style', 'trac', """The default style to use for Pygments syntax highlighting.""") pygments_modes = ListOption( 'mimeviewer', 'pygments_modes', '', doc="""List of additional MIME types known by Pygments. For each, a tuple `mimetype:mode:quality` has to be specified, where `mimetype` is the MIME type, `mode` is the corresponding Pygments mode to be used for the conversion and `quality` is the quality ratio associated to this conversion. That can also be used to override the default quality ratio used by the Pygments render.""") expand_tabs = True returns_source = True QUALITY_RATIO = 7 EXAMPLE = """<!DOCTYPE html> <html lang="en"> <head> <title>Hello, world!</title> <script> jQuery(document).ready(function($) { $("h1").fadeIn("slow"); }); </script> </head> <body> <h1>Hello, world!</h1> </body> </html>""" # ISystemInfoProvider methods def get_system_info(self): version = get_pkginfo(pygments).get('version') # if installed from source, fallback to the hardcoded version info if not version and hasattr(pygments, '__version__'): version = pygments.__version__ yield 'Pygments', version # IHTMLPreviewRenderer methods def get_extra_mimetypes(self): for _, aliases, _, mimetypes in get_all_lexers(): for mimetype in mimetypes: yield mimetype, aliases def get_quality_ratio(self, mimetype): # Extend default MIME type to mode mappings with configured ones try: return self._types[mimetype][1] except KeyError: return 0 def render(self, context, mimetype, content, filename=None, rev=None): req = context.req style = req.session.get('pygments_style', self.default_style) add_stylesheet(req, '/pygments/%s.css' % style) try: if len(content) > 0: mimetype = mimetype.split(';', 1)[0] language = self._types[mimetype][0] return self._generate(language, content, context) except (KeyError, ValueError): raise Exception("No Pygments lexer found for mime-type '%s'." % mimetype) # IPreferencePanelProvider methods def get_preference_panels(self, req): yield 'pygments', _('Syntax Highlighting') def render_preference_panel(self, req, panel): styles = list(get_all_styles()) if req.method == 'POST': style = req.args.get('style') if style and style in styles: req.session['pygments_style'] = style add_notice(req, _("Your preferences have been saved.")) req.redirect(req.href.prefs(panel or None)) for style in sorted(styles): add_stylesheet(req, '/pygments/%s.css' % style, title=style.title()) output = self._generate('html', self.EXAMPLE) return 'prefs_pygments.html', { 'output': output, 'selection': req.session.get('pygments_style', self.default_style), 'styles': styles } # IRequestHandler methods def match_request(self, req): match = re.match(r'/pygments/([-\w]+)\.css', req.path_info) if match: req.args['style'] = match.group(1) return True def process_request(self, req): style = req.args['style'] try: style_cls = get_style_by_name(style) except ValueError as e: raise HTTPNotFound(e) parts = style_cls.__module__.split('.') filename = resource_filename('.'.join(parts[:-1]), parts[-1] + '.py') mtime = datetime.fromtimestamp(os.path.getmtime(filename), localtz) last_modified = http_date(mtime) if last_modified == req.get_header('If-Modified-Since'): req.send_response(304) req.end_headers() return formatter = HtmlFormatter(style=style_cls) content = u'\n\n'.join([ formatter.get_style_defs('div.code pre'), formatter.get_style_defs('table.code td') ]).encode('utf-8') req.send_response(200) req.send_header('Content-Type', 'text/css; charset=utf-8') req.send_header('Last-Modified', last_modified) req.send_header('Content-Length', len(content)) req.write(content) # ITemplateProvider methods def get_htdocs_dirs(self): return [] def get_templates_dirs(self): return [resource_filename('trac.mimeview', 'templates')] # Internal methods @lazy def _lexer_alias_name_map(self): lexer_alias_name_map = {} for lexer_name, aliases, _, _ in get_all_lexers(): name = aliases[0] if aliases else lexer_name for alias in aliases: lexer_alias_name_map[alias] = name return lexer_alias_name_map @lazy def _lexer_options(self): lexer_options = {} for key, lexer_option_value in self.pygments_lexer_options.options(): try: lexer_name_or_alias, lexer_option_name = key.split('.') except ValueError: pass else: lexer_name = self._lexer_alias_to_name(lexer_name_or_alias) lexer_option = {lexer_option_name: lexer_option_value} lexer_options.setdefault(lexer_name, {}).update(lexer_option) return lexer_options @lazy def _types(self): types = {} for lexer_name, aliases, _, mimetypes in get_all_lexers(): name = aliases[0] if aliases else lexer_name for mimetype in mimetypes: types[mimetype] = (name, self.QUALITY_RATIO) # Pygments < 1.4 doesn't know application/javascript if 'application/javascript' not in types: js_entry = types.get('text/javascript') if js_entry: types['application/javascript'] = js_entry types.update(Mimeview(self.env).configured_modes_mapping('pygments')) return types def _generate(self, language, content, context=None): lexer_name = self._lexer_alias_to_name(language) lexer_options = {'stripnl': False} lexer_options.update(self._lexer_options.get(lexer_name, {})) if context: lexer_options.update(context.get_hint('lexer_options', {})) lexer = get_lexer_by_name(lexer_name, **lexer_options) return GenshiHtmlFormatter().generate(lexer.get_tokens(content)) def _lexer_alias_to_name(self, alias): return self._lexer_alias_name_map.get(alias, alias)
class TagInputAutoComplete(TagTemplateProvider): """[opt] Provides auto-complete functionality for tag input fields. This module is based on KeywordSuggestModule from KeywordSuggestPlugin 0.5dev. """ implements(IRequestFilter, ITemplateStreamFilter) field_opt = Option( 'tags', 'complete_field', 'keywords', "Ticket field to which a drop-down tag list should be attached.") help_opt = Option( 'tags', 'ticket_help', None, "If specified, 'keywords' label on ticket view will be turned into a " "link to this URL.") helpnewwindow_opt = BoolOption( 'tags', 'ticket_help_newwindow', False, "If true and keywords_help specified, wiki page will open in a new " "window. Default is false.") # Needs to be reimplemented, refs th:#8141. #mustmatch = BoolOption('tags', 'complete_mustmatch', False, # "If true, input fields accept values from the word list only.") matchcontains_opt = BoolOption( 'tags', 'complete_matchcontains', True, "Include partial matches in suggestion list. Default is true.") separator_opt = Option( 'tags', 'separator', ' ', "Character(s) to use as separators between tags. Default is a " "single whitespace.") sticky_tags_opt = ListOption( 'tags', 'complete_sticky_tags', '', ',', doc="A list of comma separated values available for input.") def __init__(self): self.tags_enabled = self.env.is_enabled(TagSystem) @property def separator(self): return self.separator_opt.strip('\'') or ' ' # IRequestFilter methods def pre_process_request(self, req, handler): return handler def post_process_request(self, req, template, data, content_type): if template is not None and \ (req.path_info.startswith('/ticket/') or req.path_info.startswith('/newticket') or (self.tags_enabled and req.path_info.startswith('/wiki/'))): # In Trac 1.0 and later, jQuery-UI is included from the core. if trac_version >= '1.0': Chrome(self.env).add_jquery_ui(req) else: add_script(req, 'tags/js/jquery-ui-1.8.16.custom.min.js') add_stylesheet(req, 'tags/css/jquery-ui-1.8.16.custom.css') return template, data, content_type # ITemplateStreamFilter method def filter_stream(self, req, method, filename, stream, data): if not (filename == 'ticket.html' or (self.tags_enabled and filename == 'wiki_edit.html')): return stream keywords = self._get_keywords_string(req) if not keywords: self.log.debug( "No keywords found. TagInputAutoComplete is disabled.") return stream matchfromstart = '"^" +' if self.matchcontains_opt: matchfromstart = '' js = """ jQuery(document).ready(function($) { var keywords = [ %(keywords)s ] var sep = '%(separator)s'.trim() + ' ' function split( val ) { return val.split( /%(separator)s\s*|\s+/ ); } function extractLast( term ) { return split( term ).pop(); } $('%(field)s') // don't navigate away from field on tab when selecting // an item .bind( "keydown", function( event ) { if ( event.keyCode === $.ui.keyCode.TAB && $( this ).data( "autocomplete" ).menu.active ) { event.preventDefault(); } }) .autocomplete({ delay: 0, minLength: 0, source: function( request, response ) { // delegate back to autocomplete, but extract // the last term response( $.ui.autocomplete.filter( keywords, extractLast( request.term ) ) ); }, focus: function() { // prevent value inserted on focus return false; }, select: function( event, ui ) { var terms = split( this.value ); // remove the current input terms.pop(); // add the selected item terms.push( ui.item.value ); // add placeholder to get the comma-and-space at // the end terms.push( "" ); this.value = terms.join( sep ); return false; } }); });""" # Inject transient part of JavaScript into ticket.html template. if req.path_info.startswith('/ticket/') or \ req.path_info.startswith('/newticket'): js_ticket = js % { 'field': '#field-' + self.field_opt, 'keywords': keywords, 'matchfromstart': matchfromstart, 'separator': self.separator } stream = stream | Transformer('.//head')\ .append(builder.script(Markup(js_ticket), type='text/javascript')) # Turn keywords field label into link to an arbitrary resource. if self.help_opt: link = self._get_help_link(req) if self.helpnewwindow_opt: link = builder.a(href=link, target='blank') else: link = builder.a(href=link) xpath = '//label[@for="field-keywords"]/text()' stream = stream | Transformer(xpath).wrap(link) # Inject transient part of JavaScript into wiki.html template. elif self.tags_enabled and req.path_info.startswith('/wiki/'): js_wiki = js % { 'field': '#tags', 'keywords': keywords, 'matchfromstart': matchfromstart, 'separator': self.separator } stream = stream | Transformer('.//head')\ .append(builder.script(Markup(js_wiki), type='text/javascript')) return stream # Private methods def _get_keywords_string(self, req): keywords = set(self.sticky_tags_opt) # prevent duplicates if self.tags_enabled: # Use TagsPlugin >= 0.7 performance-enhanced API. tags = TagSystem(self.env).get_all_tags(req) keywords.update(tags.keys()) if keywords: keywords = sorted(keywords) keywords = ','.join( ("'%s'" % javascript_quote(_keyword) for _keyword in keywords)) else: keywords = '' return keywords def _get_help_link(self, req): link = resource_id = None if self.help_opt.startswith('/'): # Assume valid URL to arbitrary resource inside # of the current Trac environment. link = req.href(self.help_opt) if not link and ':' in self.help_opt: realm, resource_id = self.help_opt.split(':', 1) # Validate realm-like prefix against resource realm list, # but exclude 'wiki' to allow deferred page creation. rsys = ResourceSystem(self.env) if realm in set(rsys.get_known_realms()) - set('wiki'): mgr = rsys.get_resource_manager(realm) # Handle optional IResourceManager method gracefully. try: if mgr.resource_exists(Resource(realm, resource_id)): link = mgr.get_resource_url(resource_id, req.href) except AttributeError: # Assume generic resource URL build rule. link = req.href(realm, resource_id) if not link: if not resource_id: # Assume wiki page name for backwards-compatibility. resource_id = self.help_opt # Preserve anchor without 'path_safe' arg (since Trac 0.12.2dev). if '#' in resource_id: path, anchor = resource_id.split('#', 1) else: anchor = None path = resource_id if hasattr(unicode_quote_plus, "safe"): # Use method for query string quoting (since Trac 0.13dev). anchor = unicode_quote_plus(anchor, safe="?!~*'()") else: anchor = unicode_quote_plus(anchor) link = '#'.join([req.href.wiki(path), anchor]) return link
class BrowserModule(Component): implements(INavigationContributor, IPermissionRequestor, IRequestHandler, IWikiSyntaxProvider, IHTMLPreviewAnnotator, IWikiMacroProvider) property_renderers = ExtensionPoint(IPropertyRenderer) realm = RepositoryManager.source_realm downloadable_paths = ListOption('browser', 'downloadable_paths', '/trunk, /branches/*, /tags/*', doc="""List of repository paths that can be downloaded. Leave this option empty if you want to disable all downloads, otherwise set it to a comma-separated list of authorized paths (those paths are glob patterns, i.e. "*" can be used as a wild card). In a multi-repository environment, the path must be qualified with the repository name if the path does not point to the default repository (e.g. /reponame/trunk). Note that a simple prefix matching is performed on the paths, so aliases won't get automatically resolved. """) color_scale = BoolOption('browser', 'color_scale', True, doc="""Enable colorization of the ''age'' column. This uses the same color scale as the source code annotation: blue is older, red is newer. """) NEWEST_COLOR = (255, 136, 136) newest_color = Option('browser', 'newest_color', repr(NEWEST_COLOR), doc="""(r,g,b) color triple to use for the color corresponding to the newest color, for the color scale used in ''blame'' or the browser ''age'' column if `color_scale` is enabled. """) OLDEST_COLOR = (136, 136, 255) oldest_color = Option('browser', 'oldest_color', repr(OLDEST_COLOR), doc="""(r,g,b) color triple to use for the color corresponding to the oldest color, for the color scale used in ''blame'' or the browser ''age'' column if `color_scale` is enabled. """) intermediate_point = Option('browser', 'intermediate_point', '', doc="""If set to a value between 0 and 1 (exclusive), this will be the point chosen to set the `intermediate_color` for interpolating the color value. """) intermediate_color = Option('browser', 'intermediate_color', '', doc="""(r,g,b) color triple to use for the color corresponding to the intermediate color, if two linear interpolations are used for the color scale (see `intermediate_point`). If not set, the intermediate color between `oldest_color` and `newest_color` will be used. """) render_unsafe_content = BoolOption('browser', 'render_unsafe_content', 'false', """Whether raw files should be rendered in the browser, or only made downloadable. Pretty much any file may be interpreted as HTML by the browser, which allows a malicious user to create a file containing cross-site scripting attacks. For open repositories where anyone can check-in a file, it is recommended to leave this option disabled.""") hidden_properties = ListOption('browser', 'hide_properties', 'svk:merge', doc="""Comma-separated list of version control properties to hide from the repository browser. """) # public methods def get_custom_colorizer(self): """Returns a converter for values from [0.0, 1.0] to a RGB triple.""" def interpolate(old, new, value): # Provides a linearly interpolated color triple for `value` # which must be a floating point value between 0.0 and 1.0 return tuple([int(b + (a - b) * value) for a, b in zip(new, old)]) def parse_color(rgb, default): # Get three ints out of a `rgb` string or return `default` try: t = tuple([int(v) for v in re.split(r'(\d+)', rgb)[1::2]]) return t if len(t) == 3 else default except ValueError: return default newest_color = parse_color(self.newest_color, self.NEWEST_COLOR) oldest_color = parse_color(self.oldest_color, self.OLDEST_COLOR) try: intermediate = float(self.intermediate_point) except ValueError: intermediate = None if intermediate: intermediate_color = parse_color(self.intermediate_color, None) if not intermediate_color: intermediate_color = tuple([(a + b) / 2 for a, b in zip(newest_color, oldest_color)]) def colorizer(value): if value <= intermediate: value = value / intermediate return interpolate(oldest_color, intermediate_color, value) else: value = (value - intermediate) / (1.0 - intermediate) return interpolate(intermediate_color, newest_color, value) else: def colorizer(value): return interpolate(oldest_color, newest_color, value) return colorizer # INavigationContributor methods def get_active_navigation_item(self, req): return 'browser' def get_navigation_items(self, req): rm = RepositoryManager(self.env) if any(repos.is_viewable(req.perm) for repos in rm.get_real_repositories()): yield ('mainnav', 'browser', tag.a(_('Browse Source'), href=req.href.browser())) # IPermissionRequestor methods def get_permission_actions(self): return ['BROWSER_VIEW', 'FILE_VIEW'] # IRequestHandler methods def match_request(self, req): match = re.match(r'/(export|browser|file)(/.*)?$', req.path_info) if match: mode, path = match.groups() if mode == 'export': if path and '/' in path: path_elts = path.split('/', 2) if len(path_elts) != 3: return False path = path_elts[2] req.args['rev'] = path_elts[1] req.args['format'] = 'raw' elif mode == 'file': req.redirect(req.href.browser(path, rev=req.args.get('rev'), format=req.args.get('format')), permanent=True) req.args['path'] = path or '/' return True def process_request(self, req): presel = req.args.get('preselected') if presel and (presel + '/').startswith(req.href.browser() + '/'): req.redirect(presel) path = req.args.get('path', '/') rev = req.args.get('rev', '') if rev.lower() in ('', 'head'): rev = None format = req.args.get('format') order = req.args.get('order', 'name').lower() desc = 'desc' in req.args rm = RepositoryManager(self.env) all_repositories = rm.get_all_repositories() reponame, repos, path = rm.get_repository_by_path(path) # Repository index show_index = not reponame and path == '/' if show_index: if repos and (as_bool(all_repositories[''].get('hidden')) or not repos.is_viewable(req.perm)): repos = None if not repos and reponame: raise ResourceNotFound(_("Repository '%(repo)s' not found", repo=reponame)) if reponame and reponame != repos.reponame: # Redirect alias qs = req.query_string req.redirect(req.href.browser(repos.reponame or None, path) + ('?' + qs if qs else '')) reponame = repos.reponame if repos else None # Find node for the requested path/rev context = web_context(req) node = None changeset = None display_rev = lambda rev: rev if repos: try: if rev: rev = repos.normalize_rev(rev) # If `rev` is `None`, we'll try to reuse `None` consistently, # as a special shortcut to the latest revision. rev_or_latest = rev or repos.youngest_rev node = get_existing_node(req, repos, path, rev_or_latest) except NoSuchChangeset as e: raise ResourceNotFound(e, _('Invalid changeset number')) if node: try: # use changeset instance to retrieve branches and tags changeset = repos.get_changeset(node.rev) except NoSuchChangeset: pass context = context.child(repos.resource.child(self.realm, path, version=rev_or_latest)) display_rev = repos.display_rev # Prepare template data path_links = get_path_links(req.href, reponame, path, rev, order, desc) repo_data = dir_data = file_data = None if show_index: repo_data = self._render_repository_index( context, all_repositories, order, desc) if node: if not node.is_viewable(req.perm): raise PermissionError('BROWSER_VIEW' if node.isdir else 'FILE_VIEW', node.resource, self.env) if node.isdir: if format in ('zip',): # extension point here... self._render_zip(req, context, repos, node, rev) # not reached dir_data = self._render_dir(req, repos, node, rev, order, desc) elif node.isfile: file_data = self._render_file(req, context, repos, node, rev) if not repos and not (repo_data and repo_data['repositories']): # If no viewable repositories, check permission instead of # repos.is_viewable() req.perm.require('BROWSER_VIEW') if show_index: raise ResourceNotFound(_("No viewable repositories")) else: raise ResourceNotFound(_("No node %(path)s", path=path)) quickjump_data = properties_data = None if node and not req.is_xhr: properties_data = self.render_properties( 'browser', context, node.get_properties()) quickjump_data = list(repos.get_quickjump_entries(rev)) data = { 'context': context, 'reponame': reponame, 'repos': repos, 'repoinfo': all_repositories.get(reponame or ''), 'path': path, 'rev': node and node.rev, 'stickyrev': rev, 'display_rev': display_rev, 'changeset': changeset, 'created_path': node and node.created_path, 'created_rev': node and node.created_rev, 'properties': properties_data, 'path_links': path_links, 'order': order, 'desc': 1 if desc else None, 'repo': repo_data, 'dir': dir_data, 'file': file_data, 'quickjump_entries': quickjump_data, 'wiki_format_messages': self.config['changeset'].getbool('wiki_format_messages'), } if req.is_xhr: # render and return the content only return 'dir_entries.html', data if dir_data or repo_data: add_script(req, 'common/js/expand_dir.js') add_script(req, 'common/js/keyboard_nav.js') # Links for contextual navigation if node: if node.isfile: prev_rev = repos.previous_rev(rev=node.created_rev, path=node.created_path) if prev_rev: href = req.href.browser(reponame, node.created_path, rev=prev_rev) add_link(req, 'prev', href, _('Revision %(num)s', num=display_rev(prev_rev))) if rev is not None: add_link(req, 'up', req.href.browser(reponame, node.created_path)) next_rev = repos.next_rev(rev=node.created_rev, path=node.created_path) if next_rev: href = req.href.browser(reponame, node.created_path, rev=next_rev) add_link(req, 'next', href, _('Revision %(num)s', num=display_rev(next_rev))) prevnext_nav(req, _('Previous Revision'), _('Next Revision'), _('Latest Revision')) else: if path != '/': add_link(req, 'up', path_links[-2]['href'], _('Parent directory')) add_ctxtnav(req, tag.a(_('Last Change'), href=req.href.changeset(node.created_rev, reponame, node.created_path))) if node.isfile: annotate = data['file']['annotate'] if annotate: add_ctxtnav(req, _('Normal'), title=_('View file without annotations'), href=req.href.browser(reponame, node.created_path, rev=rev)) if annotate != 'blame': add_ctxtnav(req, _('Blame'), title=_('Annotate each line with the last ' 'changed revision ' '(this can be time consuming...)'), href=req.href.browser(reponame, node.created_path, rev=rev, annotate='blame')) add_ctxtnav(req, _('Revision Log'), href=req.href.log(reponame, path, rev=rev)) path_url = repos.get_path_url(path, rev) if path_url: if path_url.startswith('//'): path_url = req.scheme + ':' + path_url add_ctxtnav(req, _('Repository URL'), href=path_url) add_stylesheet(req, 'common/css/browser.css') return 'browser.html', data # Internal methods def _render_repository_index(self, context, all_repositories, order, desc): # Color scale for the age column timerange = custom_colorizer = None if self.color_scale: custom_colorizer = self.get_custom_colorizer() rm = RepositoryManager(self.env) repositories = [] for reponame, repoinfo in all_repositories.iteritems(): if not reponame or as_bool(repoinfo.get('hidden')): continue try: repos = rm.get_repository(reponame) except TracError as err: entry = (reponame, repoinfo, None, None, exception_to_unicode(err), None) else: if repos: if not repos.is_viewable(context.perm): continue try: youngest = repos.get_changeset(repos.youngest_rev) except NoSuchChangeset: youngest = None if self.color_scale and youngest: if not timerange: timerange = TimeRange(youngest.date) else: timerange.insert(youngest.date) raw_href = self._get_download_href(context.href, repos, None, None) entry = (reponame, repoinfo, repos, youngest, None, raw_href) else: entry = (reponame, repoinfo, None, None, u"\u2013", None) if entry[4] is not None: # Check permission in case of error root = Resource('repository', reponame).child(self.realm, '/') if 'BROWSER_VIEW' not in context.perm(root): continue repositories.append(entry) # Ordering of repositories if order == 'date': def repo_order(args): reponame, repoinfo, repos, youngest, err, href = args return (youngest.date if youngest else to_datetime(0), embedded_numbers(reponame.lower())) elif order == 'author': def repo_order(args): reponame, repoinfo, repos, youngest, err, href = args return (youngest.author.lower() if youngest else '', embedded_numbers(reponame.lower())) else: def repo_order(args): reponame, repoinfo, repos, youngest, err, href = args return embedded_numbers(reponame.lower()) repositories = sorted(repositories, key=repo_order, reverse=desc) return {'repositories' : repositories, 'timerange': timerange, 'colorize_age': custom_colorizer} def _render_dir(self, req, repos, node, rev, order, desc): req.perm(node.resource).require('BROWSER_VIEW') download_href = self._get_download_href # Entries metadata class entry(object): _copy = 'name rev created_rev kind isdir path content_length' \ .split() __slots__ = _copy + ['raw_href'] def __init__(self, node): for f in entry._copy: setattr(self, f, getattr(node, f)) self.raw_href = download_href(req.href, repos, node, rev) entries = [entry(n) for n in node.get_entries() if n.is_viewable(req.perm)] changes = get_changes(repos, [i.created_rev for i in entries], self.log) if rev: newest = repos.get_changeset(rev).date else: newest = datetime_now(req.tz) # Color scale for the age column timerange = custom_colorizer = None if self.color_scale: timerange = TimeRange(newest) max_s = req.args.get('range_max_secs') min_s = req.args.get('range_min_secs') parent_range = [timerange.from_seconds(int(s)) for s in [max_s, min_s] if s] this_range = [c.date for c in changes.values() if c] for dt in this_range + parent_range: timerange.insert(dt) custom_colorizer = self.get_custom_colorizer() # Ordering of entries if order == 'date': def file_order(a): return (changes[a.created_rev].date, embedded_numbers(a.name.lower())) elif order == 'size': def file_order(a): return (a.content_length, embedded_numbers(a.name.lower())) elif order == 'author': def file_order(a): return (changes[a.created_rev].author.lower(), embedded_numbers(a.name.lower())) else: def file_order(a): return embedded_numbers(a.name.lower()) dir_order = 1 if desc else -1 def browse_order(a): return dir_order if a.isdir else 0, file_order(a) entries = sorted(entries, key=browse_order, reverse=desc) # ''Zip Archive'' alternate link zip_href = self._get_download_href(req.href, repos, node, rev) if zip_href: add_link(req, 'alternate', zip_href, _('Zip Archive'), 'application/zip', 'zip') return {'entries': entries, 'changes': changes, 'timerange': timerange, 'colorize_age': custom_colorizer, 'range_max_secs': (timerange and timerange.to_seconds(timerange.newest)), 'range_min_secs': (timerange and timerange.to_seconds(timerange.oldest)), } def _iter_nodes(self, node): stack = [node] while stack: node = stack.pop() yield node if node.isdir: stack.extend(sorted(node.get_entries(), key=lambda x: x.name, reverse=True)) def _render_zip(self, req, context, repos, root_node, rev=None): if not self.is_path_downloadable(repos, root_node.path): raise TracError(_("Path not available for download")) req.perm(context.resource).require('FILE_VIEW') root_path = root_node.path.rstrip('/') if root_path: archive_name = root_node.name else: archive_name = repos.reponame or 'repository' filename = '%s-%s.zip' % (archive_name, root_node.rev) render_zip(req, filename, repos, root_node, self._iter_nodes) def _render_file(self, req, context, repos, node, rev=None): req.perm(node.resource).require('FILE_VIEW') mimeview = Mimeview(self.env) # MIME type detection with content_closing(node.get_processed_content()) as content: chunk = content.read(CHUNK_SIZE) mime_type = node.content_type if not mime_type or mime_type == 'application/octet-stream': mime_type = mimeview.get_mimetype(node.name, chunk) or \ mime_type or 'text/plain' # Eventually send the file directly format = req.args.get('format') if format in ('raw', 'txt'): req.send_response(200) req.send_header('Content-Type', 'text/plain' if format == 'txt' else mime_type) req.send_header('Last-Modified', http_date(node.last_modified)) if rev is None: req.send_header('Pragma', 'no-cache') req.send_header('Cache-Control', 'no-cache') req.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT') if not self.render_unsafe_content: # Force browser to download files instead of rendering # them, since they might contain malicious code enabling # XSS attacks req.send_header('Content-Disposition', 'attachment') req.end_headers() # Note: don't pass an iterable instance to RequestDone, instead # call req.write() with each chunk here to avoid SEGVs (#11805) while chunk: req.write(chunk) chunk = content.read(CHUNK_SIZE) raise RequestDone # The changeset corresponding to the last change on `node` # is more interesting than the `rev` changeset. changeset = repos.get_changeset(node.created_rev) # add ''Plain Text'' alternate link if needed if not is_binary(chunk) and mime_type != 'text/plain': plain_href = req.href.browser(repos.reponame or None, node.path, rev=rev, format='txt') add_link(req, 'alternate', plain_href, _('Plain Text'), 'text/plain') # add ''Original Format'' alternate link (always) raw_href = req.href.export(rev or repos.youngest_rev, repos.reponame or None, node.path) add_link(req, 'alternate', raw_href, _('Original Format'), mime_type) self.log.debug("Rendering preview of node %s@%s with " "mime-type %s", node.name, rev, mime_type) add_stylesheet(req, 'common/css/code.css') annotations = ['lineno'] annotate = req.args.get('annotate') if annotate: annotations.insert(0, annotate) with content_closing(node.get_processed_content()) as content: preview_data = mimeview.preview_data(context, content, node.get_content_length(), mime_type, node.created_path, raw_href, annotations=annotations, force_source=bool(annotate)) return { 'changeset': changeset, 'size': node.content_length, 'preview': preview_data, 'annotate': annotate, } def _get_download_href(self, href, repos, node, rev): """Return the URL for downloading a file, or a directory as a ZIP.""" if node is not None and node.isfile: return href.export(rev or 'HEAD', repos.reponame or None, node.path) path = '' if node is None else node.path.strip('/') if self.is_path_downloadable(repos, path): return href.browser(repos.reponame or None, path, rev=rev or repos.youngest_rev, format='zip') # public methods def is_path_downloadable(self, repos, path): if repos.reponame: path = repos.reponame + '/' + path return any(fnmatchcase(path, dp.strip('/')) for dp in self.downloadable_paths) def render_properties(self, mode, context, props): """Prepare rendering of a collection of properties.""" return filter(None, [self.render_property(name, mode, context, props) for name in sorted(props)]) def render_property(self, name, mode, context, props): """Renders a node property to HTML.""" if name in self.hidden_properties: return candidates = [] for renderer in self.property_renderers: quality = renderer.match_property(name, mode) if quality > 0: candidates.append((quality, renderer)) candidates.sort(reverse=True) for (quality, renderer) in candidates: try: rendered = renderer.render_property(name, mode, context, props) if not rendered: return rendered if isinstance(rendered, RenderedProperty): value = rendered.content else: value = rendered rendered = None prop = {'name': name, 'value': value, 'rendered': rendered} return prop except Exception as e: self.log.warning('Rendering failed for property %s with ' 'renderer %s: %s', name, renderer.__class__.__name__, exception_to_unicode(e, traceback=True)) # IWikiSyntaxProvider methods def get_wiki_syntax(self): return [] def get_link_resolvers(self): """TracBrowser link resolvers. `source:` and `browser:` * simple paths (/dir/file) * paths at a given revision (/dir/file@234) * paths with line number marks (/dir/file@234:10,20-30) * paths with line number anchor (/dir/file@234#L100) Marks and anchor can be combined. The revision must be present when specifying line numbers. In the few cases where it would be redundant (e.g. for tags), the revision number itself can be omitted: /tags/v10/file@100-110#L99 """ return [('repos', self._format_browser_link), ('export', self._format_export_link), ('source', self._format_browser_link), ('browser', self._format_browser_link)] def _format_export_link(self, formatter, ns, export, label): export, query, fragment = formatter.split_link(export) if ':' in export: rev, path = export.split(':', 1) elif '@' in export: path, rev = export.split('@', 1) else: rev, path = None, export node, raw_href, title = self._get_link_info(path, rev, formatter.href, formatter.perm) if raw_href: return tag.a(label, class_='export', href=raw_href + fragment, title=title) return tag.a(label, class_='missing export') def _format_browser_link(self, formatter, ns, path, label): path, query, fragment = formatter.split_link(path) rev = marks = None match = self.PATH_LINK_RE.match(path) if match: path, rev, marks = match.groups() href = formatter.href src_href = href.browser(path, rev=rev, marks=marks) + query + fragment node, raw_href, title = self._get_link_info(path, rev, formatter.href, formatter.perm) if not node: return tag.a(label, class_='missing source') link = tag.a(label, class_='source', href=src_href) if raw_href: link = tag(link, tag.a(u'\u200b', href=raw_href + fragment, title=title, class_='trac-rawlink' if node.isfile else 'trac-ziplink')) return link PATH_LINK_RE = re.compile(r"([^@#:]*)" # path r"[@:]([^#:]+)?" # rev r"(?::(\d+(?:-\d+)?(?:,\d+(?:-\d+)?)*))?" # marks ) def _get_link_info(self, path, rev, href, perm): rm = RepositoryManager(self.env) node = raw_href = title = None try: reponame, repos, npath = rm.get_repository_by_path(path) node = get_allowed_node(repos, npath, rev, perm) if node is not None: raw_href = self._get_download_href(href, repos, node, rev) title = _("Download") if node.isfile \ else _("Download as Zip archive") except TracError: pass return node, raw_href, title # IHTMLPreviewAnnotator methods def get_annotation_type(self): return 'blame', _('Rev'), _('Revision in which the line changed') def get_annotation_data(self, context): """Cache the annotation data corresponding to each revision.""" return BlameAnnotator(self.env, context) def annotate_row(self, context, row, lineno, line, blame_annotator): blame_annotator.annotate(row, lineno) # IWikiMacroProvider methods def get_macros(self): yield "RepositoryIndex" def get_macro_description(self, name): description = cleandoc_(""" Display the list of available repositories. Can be given the following named arguments: ''format'':: Select the rendering format: - ''compact'' produces a comma-separated list of repository prefix names (default) - ''list'' produces a description list of repository prefix names - ''table'' produces a table view, similar to the one visible in the ''Browse View'' page ''glob'':: Do a glob-style filtering on the repository names (defaults to '*') ''order'':: Order repositories by the given column (one of "name", "date" or "author") ''desc'':: When set to 1, order by descending order """) return 'messages', description def expand_macro(self, formatter, name, content): args, kwargs = parse_args(content) format = kwargs.get('format', 'compact') glob = kwargs.get('glob', '*') order = kwargs.get('order') desc = as_bool(kwargs.get('desc', 0)) rm = RepositoryManager(self.env) all_repos = dict(rdata for rdata in rm.get_all_repositories().items() if fnmatchcase(rdata[0], glob)) if format == 'table': repo = self._render_repository_index(formatter.context, all_repos, order, desc) add_stylesheet(formatter.req, 'common/css/browser.css') wiki_format_messages = self.config['changeset'] \ .getbool('wiki_format_messages') data = {'repo': repo, 'order': order, 'desc': 1 if desc else None, 'reponame': None, 'path': '/', 'stickyrev': None, 'wiki_format_messages': wiki_format_messages} return Chrome(self.env).render_fragment(formatter.context.req, 'repository_index.html', data) def get_repository(reponame): try: return rm.get_repository(reponame) except TracError: return all_repos = [(reponame, get_repository(reponame)) for reponame in all_repos] all_repos = sorted(((reponame, repos) for reponame, repos in all_repos if repos and not as_bool(repos.params.get('hidden')) and repos.is_viewable(formatter.perm)), reverse=desc) def repolink(reponame, repos): label = reponame or _('(default)') return Markup(tag.a(label, title=_('View repository %(repo)s', repo=label), href=formatter.href.browser(repos.reponame or None))) if format == 'list': return tag.dl([ tag(tag.dt(repolink(reponame, repos)), tag.dd(repos.params.get('description'))) for reponame, repos in all_repos]) else: # compact return Markup(', ').join(repolink(reponame, repos) for reponame, repos in all_repos)
class TicketFormatter(AnnouncerTemplateProvider): implements(IAnnouncementFormatter) ticket_email_header_fields = ListOption('announcer', 'ticket_email_header_fields', 'owner, reporter, milestone, priority, severity', doc="""Comma-separated list of fields to appear in tickets. Use * to include all headers. """) ticket_link_with_comment = BoolOption('announcer', 'ticket_link_with_comment', 'false', """Include last change anchor in the ticket URL.""") def styles(self, transport, realm): if realm == "ticket": yield "text/plain" yield "text/html" def alternative_style_for(self, transport, realm, style): if realm == "ticket" and style != 'text/plain': return "text/plain" def format(self, transport, realm, style, event): if realm == "ticket": if style == "text/plain": return self._format_plaintext(event) elif style == "text/html": return self._format_html(event) def _ticket_link(self, ticket): ticket_link = self.env.abs_href('ticket', ticket.id) if not self.ticket_link_with_comment: return ticket_link cnum = self._ticket_last_comment(ticket) if cnum is not None: ticket_link += "#comment:%s" % str(cnum) return ticket_link def _ticket_last_comment(self, ticket): cnum = -1 for entry in ticket.get_changelog(): (time, author, field, oldvalue, newvalue, permanent) = entry if field != 'comment': continue n = as_int(oldvalue, None) if n is None: continue if cnum < n: cnum = n if cnum == -1: return None else: return cnum def _format_plaintext(self, event): ticket = event.target short_changes = {} long_changes = {} changed_items = [(field, to_unicode(old_value)) for field, old_value in event.changes.items()] for field, old_value in changed_items: new_value = to_unicode(ticket[field]) if '\n' in new_value or '\n' in old_value: long_changes[field.capitalize()] = \ '\n'.join(lineup(wrap(new_value, cols=67).split('\n'))) else: short_changes[field.capitalize()] = (old_value, new_value) data = dict( ticket=ticket, author=event.author, comment=event.comment, fields=self._header_fields(ticket), category=event.category, ticket_link=self._ticket_link(ticket), project_name=self.env.project_name, project_desc=self.env.project_description, project_link=self.env.project_url or self.env.abs_href(), has_changes=short_changes or long_changes, long_changes=long_changes, short_changes=short_changes, attachment=event.attachment ) chrome = Chrome(self.env) dirs = [] for provider in chrome.template_providers: dirs += provider.get_templates_dirs() templates = TemplateLoader(dirs, variable_lookup='lenient') template = templates.load('ticket_email_plaintext.txt', cls=NewTextTemplate) if template: stream = template.generate(**data) return stream.render('text') def _header_fields(self, ticket): headers = self.ticket_email_header_fields fields = TicketSystem(self.env).get_ticket_fields() if len(headers) and headers[0].strip() != '*': def _filter(i): return i['name'] in headers fields = filter(_filter, fields) return fields def _format_html(self, event): ticket = event.target attachment = event.attachment short_changes = {} long_changes = {} for field, old_value in event.changes.items(): new_value = ticket[field] if (new_value and '\n' in new_value) or \ (old_value and '\n' in old_value): long_changes[field.capitalize()] = HTML( "<pre>\n%s\n</pre>" % ( '\n'.join( diff_cleanup( difflib.unified_diff( wrap(old_value, cols=60).split('\n'), wrap(new_value, cols=60).split('\n'), lineterm='', n=3 ) ) ) ) ) else: short_changes[field.capitalize()] = (old_value, new_value) def wiki_to_html(event, wikitext): if wikitext is None: return "" try: req = Mock( href=Href(self.env.abs_href()), abs_href=self.env.abs_href, authname=event.author, perm=MockPerm(), chrome=dict( warnings=[], notices=[] ), args={} ) resource = Resource(event.realm, event.target.id) context = web_context(req, resource) formatter = HtmlFormatter(self.env, context, wikitext) return formatter.generate(True) except Exception, e: self.log.error("Failed to render %s", repr(wikitext)) self.log.error(exception_to_unicode(e, traceback=True)) raise description = wiki_to_html(event, ticket['description']) if attachment: comment = wiki_to_html(event, attachment.description) else: comment = wiki_to_html(event, event.comment) data = dict( ticket=ticket, description=description, author=event.author, fields=self._header_fields(ticket), comment=comment, category=event.category, ticket_link=self._ticket_link(ticket), project_name=self.env.project_name, project_desc=self.env.project_description, project_link=self.env.project_url or self.env.abs_href(), has_changes=short_changes or long_changes, long_changes=long_changes, short_changes=short_changes, attachment=event.attachment, attachment_link=self.env.abs_href('attachment/ticket', ticket.id) ) chrome = Chrome(self.env) dirs = [] for provider in chrome.template_providers: dirs += provider.get_templates_dirs() templates = TemplateLoader(dirs, variable_lookup='lenient') template = templates.load('ticket_email_mimic.html', cls=MarkupTemplate) if template: stream = template.generate(**data) return stream.render()
class CalendarPopUp(Component): implements(ITemplateProvider, ITemplateStreamFilter) insert_into = ListOption( 'calendarpopup', 'files', 'ticket.html,milestone_edit.html,admin_milestones.html', doc='List of files that calendarpopup should handle.') watch_ids = ListOption( 'calendarpopup', 'ids', 'field-due_assign,field-due_close,duedate=MM/dd/yy', doc= 'List of input ID\'s that should show calendarpopup. If date format of input field differs from yyyy/MM/dd, define it like duedate=MM/dd/yy' ) ### methods for ITemplateProvider def get_htdocs_dirs(self): from pkg_resources import resource_filename return [('calendarpopup', resource_filename(__name__, 'htdocs'))] def get_templates_dirs(self): return [] def post_process_request(self, req, template, data, content_type): """Do any post-processing the request might need; typically adding values to the template `data` dictionary, or changing template or mime type. `data` may be update in place. Always returns a tuple of (template, data, content_type), even if unchanged. Note that `template`, `data`, `content_type` will be `None` if: - called when processing an error page - the default request handler did not return any result (Since 0.11) """ return (template, data, content_type) def pre_process_request(self, req, handler): """Called after initial handler selection, and can be used to change the selected handler or redirect request. Always returns the request handler, even if unchanged. """ return handler ## ITemplateStreamFilter def filter_stream(self, req, method, filename, stream, data): found = False for pattern in self.insert_into: if filename == pattern: add_stylesheet(req, 'calendarpopup/css/CalendarPopUp.css') add_script(req, 'calendarpopup/js/CalendarPopUp.js') found = True calendarPopUpArrayOfIDs = "" calendarPopUpArrayOfIDsFormat = "" for element in self.watch_ids: if element.find('=') != -1: (one, two) = element.split('=', 2) if one and two: if calendarPopUpArrayOfIDs == "": calendarPopUpArrayOfIDs = "\"%s\"" % one calendarPopUpArrayOfIDsFormat = "\"%s\"" % two else: calendarPopUpArrayOfIDs = "%s, \"%s\"" % ( calendarPopUpArrayOfIDs, one) calendarPopUpArrayOfIDsFormat = "%s, \"%s\"" % ( calendarPopUpArrayOfIDsFormat, two) else: if element: if calendarPopUpArrayOfIDs == "": calendarPopUpArrayOfIDs = "\"%s\"" % element calendarPopUpArrayOfIDsFormat = "\"yyyy/MM/dd\"" else: calendarPopUpArrayOfIDs = "%s, \"%s\"" % ( calendarPopUpArrayOfIDs, element) calendarPopUpArrayOfIDsFormat = "%s, \"yyyy/MM/dd\"" % calendarPopUpArrayOfIDsFormat insertDIV = Element( 'div', id="CalendarPopUpDiv", style= "position:absolute;visibility:hidden;background-color:white;layer-background-color:white;" ) insertScript = Element( 'script', type="text/javascript" )('var calendarPopUpArrayOfIDs = new Array(%s); var calendarPopUpArrayOfIDsFormat = new Array(%s)' % (calendarPopUpArrayOfIDs, calendarPopUpArrayOfIDsFormat)) if found: return stream | Transformer('//div[@id="footer"]').after( insertDIV) | Transformer('body').before(insertScript) return stream
class JoinableGroupSubscriber(Component): """Allows users to subscribe to groups as defined by the system administrator. Any ticket with the said group listed in the cc field will trigger announcements to users in the group. """ implements(IAnnouncementPreferenceProvider, IAnnouncementSubscriber) joinable_groups = ListOption( 'announcer', 'joinable_groups', [], doc="""Joinable groups represent 'opt-in' groups that users may freely join. Enter a list of groups (without @) seperated by commas. The name of the groups should be a simple alphanumeric string. By adding the group name preceeded by @ (such as @sec for the sec group) to the CC field of a ticket, everyone in that group will receive an announcement when that ticket is changed. """) # IAnnouncementSubscriber methods def matches(self, event): if event.realm != 'ticket': return if event.category not in ('changed', 'created', 'attachment added'): return klass = self.__class__.__name__ ticket = event.target sids = set() cc = event.target['cc'] or '' for chunk in re.split('\s|,', cc): chunk = chunk.strip() if chunk and chunk.startswith('@'): member = None grp = chunk[1:] attrs = SubscriptionAttribute.find_by_class_realm_and_target( self.env, klass, 'ticket', grp) sids.update( set(map(lambda x: (x['sid'], x['authenticated']), attrs))) for i in Subscription.find_by_sids_and_class(self.env, sids, klass): yield i.subscription_tuple() def description(self): return _("notify me on ticket changes in one of my subscribed groups") def requires_authentication(self): return False # IAnnouncementPreferenceProvider methods def get_announcement_preference_boxes(self, req): if req.authname == "anonymous" and 'email' not in req.session: return if self.joinable_groups: yield "joinable_groups", _("Group Subscriptions") def render_announcement_preference_box(self, req, panel): klass = self.__class__.__name__ if req.method == "POST": @self.env.with_transaction() def do_update(db): SubscriptionAttribute.delete_by_sid_and_class( self.env, req.session.sid, req.session.authenticated, klass, db) def _map(value): g = re.match('^joinable_group_(.*)', value) if g: if istrue(req.args.get(value)): return g.groups()[0] groups = set(filter(None, map(_map, req.args.keys()))) SubscriptionAttribute.add(self.env, req.session.sid, req.session.authenticated, klass, 'ticket', groups, db) attrs = filter( None, map( lambda x: x['target'], SubscriptionAttribute.find_by_sid_and_class( self.env, req.session.sid, req.session.authenticated, klass))) data = dict(joinable_groups={}) for group in self.joinable_groups: data['joinable_groups'][group] = (group in attrs) and True or None return "prefs_announcer_joinable_groups.html", data
class RepositoryManager(Component): """Version control system manager.""" implements(IRequestFilter, IResourceManager, IRepositoryProvider) connectors = ExtensionPoint(IRepositoryConnector) providers = ExtensionPoint(IRepositoryProvider) change_listeners = ExtensionPoint(IRepositoryChangeListener) repositories_section = ConfigSection( 'repositories', """One of the alternatives for registering new repositories is to populate the `[repositories]` section of the `trac.ini`. This is especially suited for setting up convenience aliases, short-lived repositories, or during the initial phases of an installation. See [TracRepositoryAdmin#Intrac.ini TracRepositoryAdmin] for details about the format adopted for this section and the rest of that page for the other alternatives. (''since 0.12'')""") repository_type = Option( 'trac', 'repository_type', 'svn', """Default repository connector type. (''since 0.10'') This is also used as the default repository type for repositories defined in [[TracIni#repositories-section repositories]] or using the "Repositories" admin panel. (''since 0.12'') """) repository_dir = Option( 'trac', 'repository_dir', '', """Path to the default repository. This can also be a relative path (''since 0.11''). This option is deprecated, and repositories should be defined in the [TracIni#repositories-section repositories] section, or using the "Repositories" admin panel. (''since 0.12'')""") repository_sync_per_request = ListOption( 'trac', 'repository_sync_per_request', '(default)', doc="""List of repositories that should be synchronized on every page request. Leave this option empty if you have set up post-commit hooks calling `trac-admin $ENV changeset added` on all your repositories (recommended). Otherwise, set it to a comma-separated list of repository names. Note that this will negatively affect performance, and will prevent changeset listeners from receiving events from the repositories specified here. (''since 0.12'')""") def __init__(self): self._cache = {} self._lock = threading.Lock() self._connectors = None self._all_repositories = None # IRequestFilter methods def pre_process_request(self, req, handler): from trac.web.chrome import Chrome, add_warning if handler is not Chrome(self.env): for reponame in self.repository_sync_per_request: start = time.time() if is_default(reponame): reponame = '' try: repo = self.get_repository(reponame) if repo: repo.sync() else: self.log.warning( "Unable to find repository '%s' for " "synchronization", reponame or '(default)') continue except TracError as e: add_warning( req, _( "Can't synchronize with repository \"%(name)s\" " "(%(error)s). Look in the Trac log for more " "information.", name=reponame or '(default)', error=to_unicode(e))) except Exception as e: add_warning( req, _( "Failed to sync with repository \"%(name)s\": " "%(error)s; repository information may be out of " "date. Look in the Trac log for more information " "including mitigation strategies.", name=reponame or '(default)', error=to_unicode(e))) self.log.error( "Failed to sync with repository \"%s\"; You may be " "able to reduce the impact of this issue by " "configuring [trac] repository_sync_per_request; see " "http://trac.edgewall.org/wiki/TracRepositoryAdmin" "#ExplicitSync for more detail: %s", reponame or '(default)', exception_to_unicode(e, traceback=True)) self.log.info("Synchronized '%s' repository in %0.2f seconds", reponame or '(default)', time.time() - start) return handler def post_process_request(self, req, template, data, content_type): return (template, data, content_type) # IResourceManager methods def get_resource_realms(self): yield 'changeset' yield 'source' yield 'repository' def get_resource_description(self, resource, format=None, **kwargs): if resource.realm == 'changeset': parent = resource.parent reponame = parent and parent.id id = resource.id if reponame: return _("Changeset %(rev)s in %(repo)s", rev=id, repo=reponame) else: return _("Changeset %(rev)s", rev=id) elif resource.realm == 'source': parent = resource.parent reponame = parent and parent.id id = resource.id version = '' if format == 'summary': repos = self.get_repository(reponame) node = repos.get_node(resource.id, resource.version) if node.isdir: kind = _("directory") elif node.isfile: kind = _("file") if resource.version: version = _(" at version %(rev)s", rev=resource.version) else: kind = _("path") if resource.version: version = '@%s' % resource.version in_repo = _(" in %(repo)s", repo=reponame) if reponame else '' # TRANSLATOR: file /path/to/file.py at version 13 in reponame return _('%(kind)s %(id)s%(at_version)s%(in_repo)s', kind=kind, id=id, at_version=version, in_repo=in_repo) elif resource.realm == 'repository': if not resource.id: return _("Default repository") return _("Repository %(repo)s", repo=resource.id) def get_resource_url(self, resource, href, **kwargs): if resource.realm == 'changeset': parent = resource.parent return href.changeset(resource.id, parent and parent.id or None) elif resource.realm == 'source': parent = resource.parent return href.browser(parent and parent.id or None, resource.id, rev=resource.version or None) elif resource.realm == 'repository': return href.browser(resource.id or None) def resource_exists(self, resource): if resource.realm == 'repository': reponame = resource.id else: reponame = resource.parent.id repos = self.env.get_repository(reponame) if not repos: return False if resource.realm == 'changeset': try: repos.get_changeset(resource.id) return True except NoSuchChangeset: return False elif resource.realm == 'source': try: repos.get_node(resource.id, resource.version) return True except NoSuchNode: return False elif resource.realm == 'repository': return True # IRepositoryProvider methods def get_repositories(self): """Retrieve repositories specified in TracIni. The `[repositories]` section can be used to specify a list of repositories. """ repositories = self.repositories_section reponames = {} # eventually add pre-0.12 default repository if self.repository_dir: reponames[''] = {'dir': self.repository_dir} # first pass to gather the <name>.dir entries for option in repositories: if option.endswith('.dir'): reponames[option[:-4]] = {} # second pass to gather aliases for option in repositories: alias = repositories.get(option) if '.' not in option: # Support <alias> = <repo> syntax option += '.alias' if option.endswith('.alias') and alias in reponames: reponames.setdefault(option[:-6], {})['alias'] = alias # third pass to gather the <name>.<detail> entries for option in repositories: if '.' in option: name, detail = option.rsplit('.', 1) if name in reponames and detail != 'alias': reponames[name][detail] = repositories.get(option) for reponame, info in reponames.iteritems(): yield (reponame, info) # Public API methods def get_supported_types(self): """Return the list of supported repository types.""" types = set(type_ for connector in self.connectors for (type_, prio) in connector.get_supported_types() or [] if prio >= 0) return list(types) def get_repositories_by_dir(self, directory): """Retrieve the repositories based on the given directory. :param directory: the key for identifying the repositories. :return: list of `Repository` instances. """ directory = os.path.join(os.path.normcase(directory), '') repositories = [] for reponame, repoinfo in self.get_all_repositories().iteritems(): dir = repoinfo.get('dir') if dir: dir = os.path.join(os.path.normcase(dir), '') if dir.startswith(directory): repos = self.get_repository(reponame) if repos: repositories.append(repos) return repositories def get_repository_id(self, reponame): """Return a unique id for the given repository name. This will create and save a new id if none is found. Note: this should probably be renamed as we're dealing exclusively with *db* repository ids here. """ with self.env.db_transaction as db: for id, in db( "SELECT id FROM repository WHERE name='name' AND value=%s", (reponame, )): return id id = db("SELECT COALESCE(MAX(id), 0) FROM repository")[0][0] + 1 db("INSERT INTO repository (id, name, value) VALUES (%s, %s, %s)", (id, 'name', reponame)) return id def get_repository(self, reponame): """Retrieve the appropriate `Repository` for the given repository name. :param reponame: the key for specifying the repository. If no name is given, take the default repository. :return: if no corresponding repository was defined, simply return `None`. """ reponame = reponame or '' repoinfo = self.get_all_repositories().get(reponame, {}) if 'alias' in repoinfo: reponame = repoinfo['alias'] repoinfo = self.get_all_repositories().get(reponame, {}) rdir = repoinfo.get('dir') if not rdir: return None rtype = repoinfo.get('type') or self.repository_type # get a Repository for the reponame (use a thread-level cache) with self.env.db_transaction: # prevent possible deadlock, see #4465 with self._lock: tid = threading._get_ident() if tid in self._cache: repositories = self._cache[tid] else: repositories = self._cache[tid] = {} repos = repositories.get(reponame) if not repos: if not os.path.isabs(rdir): rdir = os.path.join(self.env.path, rdir) connector = self._get_connector(rtype) repos = connector.get_repository(rtype, rdir, repoinfo.copy()) repositories[reponame] = repos return repos def get_repository_by_path(self, path): """Retrieve a matching `Repository` for the given `path`. :param path: the eventually scoped repository-scoped path :return: a `(reponame, repos, path)` triple, where `path` is the remaining part of `path` once the `reponame` has been truncated, if needed. """ matches = [] path = path.strip('/') + '/' if path else '/' for reponame in self.get_all_repositories().keys(): stripped_reponame = reponame.strip('/') + '/' if path.startswith(stripped_reponame): matches.append((len(stripped_reponame), reponame)) if matches: matches.sort() length, reponame = matches[-1] path = path[length:] else: reponame = '' return (reponame, self.get_repository(reponame), path.rstrip('/') or '/') def get_default_repository(self, context): """Recover the appropriate repository from the current context. Lookup the closest source or changeset resource in the context hierarchy and return the name of its associated repository. """ while context: if context.resource.realm in ('source', 'changeset'): return context.resource.parent.id context = context.parent def get_all_repositories(self): """Return a dictionary of repository information, indexed by name.""" if not self._all_repositories: all_repositories = {} for provider in self.providers: for reponame, info in provider.get_repositories() or []: if reponame in all_repositories: self.log.warn("Discarding duplicate repository '%s'", reponame) else: info['name'] = reponame if 'id' not in info: info['id'] = self.get_repository_id(reponame) all_repositories[reponame] = info self._all_repositories = all_repositories return self._all_repositories def get_real_repositories(self): """Return a set of all real repositories (i.e. excluding aliases).""" repositories = set() for reponame in self.get_all_repositories(): try: repos = self.get_repository(reponame) if repos is not None: repositories.add(repos) except TracError: pass # Skip invalid repositories return repositories def reload_repositories(self): """Reload the repositories from the providers.""" with self._lock: # FIXME: trac-admin doesn't reload the environment self._cache = {} self._all_repositories = None self.config.touch() # Force environment reload def notify(self, event, reponame, revs): """Notify repositories and change listeners about repository events. The supported events are the names of the methods defined in the `IRepositoryChangeListener` interface. """ self.log.debug("Event %s on repository '%s' for changesets %r", event, reponame or '(default)', revs) # Notify a repository by name, and all repositories with the same # base, or all repositories by base or by repository dir repos = self.get_repository(reponame) repositories = [] if repos: base = repos.get_base() else: dir = os.path.abspath(reponame) repositories = self.get_repositories_by_dir(dir) if repositories: base = None else: base = reponame if base: repositories = [ r for r in self.get_real_repositories() if r.get_base() == base ] if not repositories: self.log.warn("Found no repositories matching '%s' base.", base or reponame) return for repos in sorted(repositories, key=lambda r: r.reponame): repos.sync() for rev in revs: args = [] if event == 'changeset_modified': args.append(repos.sync_changeset(rev)) try: changeset = repos.get_changeset(rev) except NoSuchChangeset: try: repos.sync_changeset(rev) changeset = repos.get_changeset(rev) except NoSuchChangeset: self.log.debug( "No changeset '%s' found in repository '%s'. " "Skipping subscribers for event %s", rev, repos.reponame or '(default)', event) continue self.log.debug("Event %s on repository '%s' for revision '%s'", event, repos.reponame or '(default)', rev) for listener in self.change_listeners: getattr(listener, event)(repos, changeset, *args) def shutdown(self, tid=None): """Free `Repository` instances bound to a given thread identifier""" if tid: assert tid == threading._get_ident() with self._lock: repositories = self._cache.pop(tid, {}) for reponame, repos in repositories.iteritems(): repos.close() # private methods def _get_connector(self, rtype): """Retrieve the appropriate connector for the given repository type. Note that the self._lock must be held when calling this method. """ if self._connectors is None: # build an environment-level cache for the preferred connectors self._connectors = {} for connector in self.connectors: for type_, prio in connector.get_supported_types() or []: keep = (connector, prio) if type_ in self._connectors and \ prio <= self._connectors[type_][1]: keep = None if keep: self._connectors[type_] = keep if rtype in self._connectors: connector, prio = self._connectors[rtype] if prio >= 0: # no error condition return connector else: raise TracError( _( 'Unsupported version control system "%(name)s"' ': %(error)s', name=rtype, error=to_unicode(connector.error))) else: raise TracError( _( 'Unsupported version control system "%(name)s": ' 'Can\'t find an appropriate component, maybe the ' 'corresponding plugin was not enabled? ', name=rtype))
class TicketCustomFieldSubscriber(Component): """Allows users to subscribe to tickets that have their sid listed in any field that has a name in the custom_cc_fields list. The custom_cc_fields list must be configured by the system administrator. """ implements(IAnnouncementDefaultSubscriber, IAnnouncementSubscriber) custom_cc_fields = ListOption( 'announcer', 'custom_cc_fields', doc="Field names that contain users that should be notified on " "ticket changes") default_on = BoolOption( "announcer", "always_notify_custom_cc", 'true', """The always_notify_custom_cc will notify the users in the custom cc field by default when a ticket is modified. """) default_distributor = ListOption( "announcer", "always_notify_custom_cc_distributor", "email", doc="""Comma-separated list of distributors to send the message to by default. ex. email, xmpp """) # IAnnouncementSubscriber methods def matches(self, event): if event.realm != 'ticket': return if event.category not in ('changed', 'created', 'attachment added'): return klass = self.__class__.__name__ ticket = event.target sids = set() for field in self.custom_cc_fields: subs = ticket[field] or '' for chunk in re.split('\s|,', subs): chunk = chunk.strip() if not chunk or chunk.startswith('@'): continue if re.match(r'^[^@]+@.+', chunk): sid, auth, addr = None, None, chunk else: sid, auth, addr = chunk, True, None # Default subscription for s in self.default_subscriptions(): yield (s[0], s[1], sid, auth, addr, None, s[3], s[4]) if sid: sids.add((sid, auth)) for i in Subscription.find_by_sids_and_class(self.env, sids, klass): yield i.subscription_tuple() def description(self): if self.custom_cc_fields: return _("notify me when I'm listed in any of the (%s) " "fields" % (','.join(self.custom_cc_fields), )) def requires_authentication(self): return True # IAnnouncementDefaultSubscriber method def default_subscriptions(self): if self.custom_cc_fields: if self.default_on: for d in self.default_distributor: yield (self.__class__.__name__, d, 101, 'always')
class CodeReviewerModule(Component): """Base component for reviewing changesets.""" implements(ITemplateProvider, IRequestFilter) # config options statuses = ListOption('codereviewer', 'status_choices', default=CodeReview.STATUSES, doc="Review status choices.") passed = ListOption('codereviewer', 'passed', default=[], doc="Ticket field changes on a PASSED submit.") failed = ListOption('codereviewer', 'failed', default=[], doc="Ticket field changes on a FAILED submit.") completeness = ListOption( 'codereviewer', 'completeness', default=[], doc="Ticket field values enabling ticket completeness.") command = Option('codereviewer', 'command', default='', doc="Command to execute upon ticket completeness.") # ITemplateProvider methods def get_htdocs_dirs(self): from pkg_resources import resource_filename return [('coderev', resource_filename(__name__, 'htdocs'))] def get_templates_dirs(self): return [] # IRequestFilter methods def pre_process_request(self, req, handler): return handler def post_process_request(self, req, template, data, content_type): if data is None: return template, data, content_type if req.path_info.startswith('/changeset') and \ data.get('changeset') is not False and \ 'CODEREVIEWER_MODIFY' in req.perm: changeset = data['changeset'] repos = changeset.repos reponame, rev = repos.reponame, repos.db_rev(changeset.rev) review = CodeReview(self.env, reponame, rev) tickets = req.args.getlist('tickets') if req.method == 'POST': status_changed = \ review.encode(req.args['status']) != review.status if review.save(req.authname, req.args['status'], req.args['summary']): self._update_tickets(changeset, review, status_changed) tickets = review.tickets req.redirect(req.href(req.path_info, tickets=tickets)) ctx = web_context(req) format_summary = functools.partial(format_to_html, self.env, ctx, escape_newlines=True) format_time = functools.partial(user_time, req, format_datetime) add_stylesheet(req, 'coderev/coderev.css') add_script(req, 'coderev/coderev.js') add_script_data( req, { 'review': { 'status': review.status, 'encoded_status': review.encode(review.status), 'summaries': [ dict([ ('html_summary', format_summary(r['summary'])), ('pretty_when', format_time(r['when'])), ('pretty_timedelta', pretty_timedelta( r['when'])), ('reviewer', r['reviewer']), ('status', r['status']) ]) for r in CodeReview.select(self.env, reponame, rev) ], }, 'tickets': tickets, 'statuses': self.statuses, 'form_token': req.form_token, }) req.send_header('Cache-Control', 'no-cache') elif req.path_info.startswith('/ticket/'): add_stylesheet(req, 'coderev/coderev.css') return template, data, content_type # Private methods def _update_tickets(self, changeset, review, status_changed): """Updates the tickets referenced by the given review's changeset with a comment of field changes. Field changes and command execution may occur if specified in trac.ini and the review's changeset is the last one of the ticket.""" status = review.encode(review.status).lower() # build comment comment = None if status_changed or review['summary']: if status_changed: comment = "Code review set to %s" % review['status'] else: comment = "Code review comment" repos = changeset.repos ref = review.changeset disp_ref = str(repos.short_rev(review.changeset)) if review.repo: ref += '/' + review.repo disp_ref += '/' + review.repo comment += ' for [changeset:"%s" %s]' % (ref, disp_ref) if review['summary']: comment += ":\n\n%s" % review['summary'] invoked = False for ticket in review.tickets: tkt = Ticket(self.env, ticket) # determine ticket changes changes = {} if self._is_complete(ticket, review, failed_ok=True): changes = self._get_ticket_changes(tkt, status) # update ticket if there's a review summary or ticket changes if comment or changes: for field, value in changes.items(): tkt[field] = value tkt.save_changes(review['reviewer'], comment) # check to invoke command if not invoked and self._is_complete(ticket, review): self._execute_command() invoked = True def _is_complete(self, ticket, review, failed_ok=False): """Returns True if the ticket is complete (or only the last review failed if ok_failed is True) and therefore actions (e.g., ticket changes and executing commands) should be taken. A ticket is complete when its completeness criteria is met and the review has PASSED and is the ticket's last review with no other PENDING reviews. Completeness criteria is defined in trac.ini like this: completeness = phase=(codereview|verifying|releasing) The above means that the ticket's phase field must have a value of either codereview, verifying, or releasing for the ticket to be considered complete. This helps prevent actions from being taken if there's a code review of partial work before the ticket is really ready to be fully tested and released. """ # check review's completeness reason = is_incomplete(self.env, review, ticket) if failed_ok and reason and CodeReview.NOT_PASSED in reason: return True return not reason def _get_ticket_changes(self, tkt, status): """Return a dict of field-value pairs of ticket fields to change for the given ticket as defined in trac.ini. As one workflow opinion, the changes are processed in order: passed = phase=verifying,owner={captain} In the above example, if the review passed and the ticket's phase already = verifying, then the owner change will not be included. """ changes = {} for group in getattr(self, status, []): if '=' not in group: continue field, value = group.split('=', 1) if value.startswith('{'): value = tkt[value.strip('{}')] if tkt[field] == value: break # no more changes once ticket already has target value changes[field] = value return changes def _execute_command(self): if not self.command: return p = Popen(self.command, shell=True, stderr=STDOUT, stdout=PIPE) out = p.communicate()[0] if p.returncode == 0: self.log.info('command: %s', self.command) else: self.log.error('command error: %s\n%s', self.command, out)
class NotificationSystem(Component): email_sender = ExtensionOption( 'notification', 'email_sender', IEmailSender, 'SmtpEmailSender', """Name of the component implementing `IEmailSender`. This component is used by the notification system to send emails. Trac currently provides `SmtpEmailSender` for connecting to an SMTP server, and `SendmailEmailSender` for running a `sendmail`-compatible executable. (''since 0.12'')""") smtp_enabled = BoolOption('notification', 'smtp_enabled', 'false', """Enable email notification.""") smtp_from = Option( 'notification', 'smtp_from', 'trac@localhost', """Sender address to use in notification emails. At least one of `smtp_from` and `smtp_replyto` must be set, otherwise Trac refuses to send notification mails.""") smtp_from_name = Option('notification', 'smtp_from_name', '', """Sender name to use in notification emails.""") smtp_from_author = BoolOption( 'notification', 'smtp_from_author', 'false', """Use the author of the change as the sender in notification emails (e.g. reporter of a new ticket, author of a comment). If the author hasn't set an email address, `smtp_from` and `smtp_from_name` are used instead. (''since 1.0'')""") smtp_replyto = Option( 'notification', 'smtp_replyto', 'trac@localhost', """Reply-To address to use in notification emails. At least one of `smtp_from` and `smtp_replyto` must be set, otherwise Trac refuses to send notification mails.""") smtp_always_cc_list = ListOption( 'notification', 'smtp_always_cc', '', sep=(',', ' '), doc="""Comma-separated list of email addresses to always send notifications to. Addresses can be seen by all recipients (Cc:).""") smtp_always_bcc_list = ListOption( 'notification', 'smtp_always_bcc', '', sep=(',', ' '), doc="""Comma-separated list of email addresses to always send notifications to. Addresses are not public (Bcc:). """) smtp_default_domain = Option( 'notification', 'smtp_default_domain', '', """Default host/domain to append to addresses that do not specify one. Fully qualified addresses are not modified. The default domain is appended to all username/login for which an email address cannot be found in the user settings.""") ignore_domains_list = ListOption( 'notification', 'ignore_domains', '', doc="""Comma-separated list of domains that should not be considered part of email addresses (for usernames with Kerberos domains).""") admit_domains_list = ListOption( 'notification', 'admit_domains', '', doc="""Comma-separated list of domains that should be considered as valid for email addresses (such as localdomain).""") mime_encoding = Option( 'notification', 'mime_encoding', 'none', """Specifies the MIME encoding scheme for emails. Supported values are: `none`, the default value which uses 7-bit encoding if the text is plain ASCII or 8-bit otherwise. `base64`, which works with any kind of content but may cause some issues with touchy anti-spam/anti-virus engine. `qp` or `quoted-printable`, which works best for european languages (more compact than base64) if 8-bit encoding cannot be used. """) use_public_cc = BoolOption( 'notification', 'use_public_cc', 'false', """Addresses in the To and Cc fields are visible to all recipients. If this option is disabled, recipients are put in the Bcc list. """) use_short_addr = BoolOption( 'notification', 'use_short_addr', 'false', """Permit email address without a host/domain (i.e. username only). The SMTP server should accept those addresses, and either append a FQDN or use local delivery. See also `smtp_default_domain`. Do not use this option with a public SMTP server. """) smtp_subject_prefix = Option( 'notification', 'smtp_subject_prefix', '__default__', """Text to prepend to subject line of notification emails. If the setting is not defined, then `[$project_name]` is used as the prefix. If no prefix is desired, then specifying an empty option will disable it. """) notification_subscriber_section = ConfigSection( 'notification-subscriber', """The notifications subscriptions are controlled by plugins. All `INotificationSubscriber` components are in charge. These components may allow to be configured via this section in the `trac.ini` file. See TracNotification for more details. Available subscribers: [[SubscriberList]] """) distributors = ExtensionPoint(INotificationDistributor) subscribers = ExtensionPoint(INotificationSubscriber) @property def smtp_always_cc(self): # For backward compatibility return self.config.get('notification', 'smtp_always_cc') @property def smtp_always_bcc(self): # For backward compatibility return self.config.get('notification', 'smtp_always_bcc') @property def ignore_domains(self): # For backward compatibility return self.config.get('notification', 'ignore_domains') @property def admit_domains(self): # For backward compatibility return self.config.get('notification', 'admit_domains') @lazy def subscriber_defaults(self): rawsubscriptions = self.notification_subscriber_section.options() return parse_subscriber_config(rawsubscriptions) def default_subscriptions(self, klass): for d in self.subscriber_defaults[klass]: yield (klass, d['distributor'], d['format'], d['priority'], d['adverb']) def send_email(self, from_addr, recipients, message): """Send message to recipients via e-mail.""" self.email_sender.send(from_addr, recipients, message) def notify(self, event): """Distribute an event to all subscriptions. :param event: a `NotificationEvent` """ self.distribute_event(event, self.subscriptions(event)) def distribute_event(self, event, subscriptions): """Distribute a event to all subscriptions. :param event: a `NotificationEvent` :param subscriptions: a list of tuples (sid, authenticated, address, transport, format) where either sid or address can be `None` """ packages = {} for sid, authenticated, address, transport, format in subscriptions: package = packages.setdefault(transport, set()) package.add((sid, authenticated, address, format)) for distributor in self.distributors: for transport in distributor.transports(): if transport in packages: recipients = list(packages[transport]) distributor.distribute(transport, recipients, event) def subscriptions(self, event): """Return all subscriptions for a given event. :return: a list of (sid, authenticated, address, transport, format) """ subscriptions = [] for subscriber in self.subscribers: subscriptions.extend(x for x in subscriber.matches(event) if x) # For each (transport, sid, authenticated) combination check the # subscription with the highest priority: # If it is "always" keep it. If it is "never" drop it. # sort by (transport, sid, authenticated, priority) ordered = sorted(subscriptions, key=itemgetter(1, 2, 3, 6)) previous_combination = None for rule, transport, sid, auth, addr, fmt, prio, adverb in ordered: if (transport, sid, auth) == previous_combination: continue if adverb == 'always': self.log.debug( "Adding (%s [%s]) for 'always' on rule (%s) " "for (%s)", sid, auth, rule, transport) yield (sid, auth, addr, transport, fmt) else: self.log.debug( "Ignoring (%s [%s]) for 'never' on rule (%s) " "for (%s)", sid, auth, rule, transport) # Also keep subscriptions without sid (raw email subscription) if sid: previous_combination = (transport, sid, auth)
class Symbols(Component): """Replace character sequences with symbols. Characters and symbols are configurable in the `[wikiextras-symbols]` section in `trac.ini`. Use the `ShowSymbols` macro to display a list of currently defined symbols. """ implements(IWikiMacroProvider, IWikiSyntaxProvider) symbols_section = ConfigSection( 'wikiextras-symbols', """The set of symbols is configurable by providing associations between symbols and wiki keywords. A default set of symbols and keywords is defined, which can be revoked one-by-one (_remove) or all at once (_remove_defaults). Example: {{{ [wikiextras-symbols] _remove_defaults = true _remove = <- -> « = << » = >> ∑ = (SUM) ♥ = <3 }}} Keywords are space-separated! A symbol can also be removed by associating it with no keyword: {{{ ← = }}} Use the `ShowSymbols` macro to find out the current set of symbols and keywords. """) remove_defaults = BoolOption('wikiextras-symbols', '_remove_defaults', False, doc="Set to true to remove all " "default symbols.") remove = ListOption('wikiextras-symbols', '_remove', sep=' ', doc="""\ Space-separated(!) list of keywords that shall not be interpreted as symbols (even if defined in this section).""") def __init__(self): self.symbols = None # IWikiSyntaxProvider methods def get_wiki_syntax(self): if self.symbols is None: self.symbols = SYMBOLS.copy() if self.remove_defaults: self.symbols = {} for symbol, value in self.symbols_section.options(): if not symbol.startswith('_remove'): if value: for keyword in value.split(): self.symbols[keyword.strip()] = symbol else: # no keyword, remove all keywords associated with # symbol for k in self.symbols.keys(): if self.symbols[k] == symbol: del self.symbols[k] for keyword in self.remove: if keyword in self.symbols: del self.symbols[keyword] if self.symbols: yield ('!?%s' % prepare_regexp(self.symbols), self._format_symbol) else: yield (None, None) def get_link_resolvers(self): return [] #noinspection PyUnusedLocal def _format_symbol(self, formatter, match, fullmatch): return Markup(self.symbols[match]) # IWikiMacroProvider methods def get_macros(self): yield 'ShowSymbols' #noinspection PyUnusedLocal def get_macro_description(self, name): return ("Renders in a table the list of known symbols. " "Optional argument is the number of columns in the table " "(defaults 3).") #noinspection PyUnusedLocal def expand_macro(self, formatter, name, content, args=None): return render_table(self.symbols.keys(), content, lambda s: self._format_symbol(formatter, s, None), colspace=4)
class TagWikiMacros(TagTemplateProvider): """Provides macros, that utilize the tagging system in wiki.""" implements(IWikiMacroProvider) caseless_sort = BoolOption( 'tags', 'cloud_caseless_sort', default=False, doc="""Whether the tag cloud should be sorted case-sensitive.""") default_cols = Option( 'tags', 'listtagged_default_table_cols', 'id|description|tags', doc="""Select columns and order for table format using a "|"-separated list of column names. Supported columns: realm, id, description, tags """) default_format = Option( 'tags', 'listtagged_default_format', 'oldlist', doc="""Set the default format for the handler of the `/tags` domain. || `oldlist` (default value) || The original format with a bulleted-list of "linked-id description (tags)" || || `compact` || bulleted-list of "linked-description" || || `table` || table... (see corresponding column option) || """) exclude_realms = ListOption( 'tags', 'listtagged_exclude_realms', [], doc="""Comma-separated list of realms to exclude from tags queries by default, unless specifically included using "realm:realm-name" in a query.""") items_per_page = Option( 'tags', 'listtagged_items_per_page', 100, doc="""Number of tagged resources displayed per page in tag queries, by default""") items_per_page = as_int(items_per_page, 100) supported_cols = frozenset(['realm', 'id', 'description', 'tags']) def __init__(self): # TRANSLATOR: Keep macro doc style formatting here, please. self.doc_cloud = _("""Display a tag cloud. Show a tag cloud for all tags on resources matching query. Usage: {{{ [[TagCloud(query,caseless_sort=<bool>,mincount=<n>)]] }}} caseless_sort:: Whether the tag cloud should be sorted case-sensitive. mincount:: Optional integer threshold to hide tags with smaller count. See tags documentation for the query syntax. """) self.doc_listtagged = _("""List tagged resources. Usage: {{{ [[ListTagged(query)]] }}} See tags documentation for the query syntax. """) # IWikiMacroProvider def get_macros(self): yield 'ListTagged' yield 'TagCloud' def get_macro_description(self, name): if name == 'ListTagged': return self.doc_listtagged elif name == 'TagCloud': return self.doc_cloud def expand_macro(self, formatter, name, content): env = self.env req = formatter.req args, kw = parse_args(content) # Use macro arguments (most likely wiki macro calls). realms = 'realm' in kw and kw['realm'].split('|') or [] tag_system = TagSystem(env) all_realms = [p.get_taggable_realm() for p in tag_system.tag_providers] self.all_realms = all_realms self.realms = realms if name == 'TagCloud': args.append(' or '.join(['realm:%s' % r for r in realms])) all_tags = tag_system.get_all_tags(req, ' '.join(args)) mincount = 'mincount' in kw and kw['mincount'] or None return self.render_cloud(req, all_tags, caseless_sort=self.caseless_sort, mincount=mincount) elif name == 'ListTagged': if _OBSOLETE_ARGS_RE.search(content): data = {'warning': 'obsolete_args'} else: data = {'warning': None} context = formatter.context # Use TagsQuery arguments (most likely wiki macro calls). cols = 'cols' in kw and kw['cols'] or self.default_cols format = 'format' in kw and kw['format'] or self.default_format query = args and args[0].strip() or None if query and not realms: # First read query arguments (most likely a web-UI call). for realm in all_realms: if re.search('(^|\W)realm:%s(\W|$)' % (realm), query): realms = realms and realms.append(realm) or [realm] if not realms: # Apply ListTagged defaults to macro call w/o realm. realms = list(set(all_realms) - set(self.exclude_realms)) if not realms: return '' else: self.query = query self.realms = realms query = '(%s) (%s)' % (query or '', ' or '.join( ['realm:%s' % (r) for r in realms])) env.log.debug('LISTTAGGED_QUERY: ' + query) query_result = tag_system.query(req, query) if not query_result: return '' def _link(resource): if resource.realm == 'tag': # Keep realm selection in tag links. return builder.a(resource.id, href=self.get_href(req, tag=resource)) elif resource.realm == 'ticket': # Return resource link including ticket status dependend # class to allow for common Trac ticket link style. ticket = Ticket(env, resource.id) return builder.a('#%s' % ticket.id, class_=ticket['status'], href=formatter.href.ticket(ticket.id), title=shorten_line(ticket['summary'])) return render_resource_link(env, context, resource, 'compact') if format == 'table': cols = [ col for col in cols.split('|') if col in self.supported_cols ] # Use available translations from Trac core. try: labels = TicketSystem(env).get_ticket_field_labels() labels['id'] = _('Id') except AttributeError: # Trac 0.11 neither has the attribute nor uses i18n. labels = {'id': 'Id', 'description': 'Description'} labels['realm'] = _('Realm') labels['tags'] = _('Tags') headers = [{'label': labels.get(col)} for col in cols] data.update({'cols': cols, 'headers': headers}) results = sorted(query_result, key=lambda r: \ embedded_numbers(to_unicode(r[0].id))) results = self._paginate(req, results) rows = [] for resource, tags in results: desc = tag_system.describe_tagged_resource(req, resource) tags = sorted(tags) if tags: rendered_tags = [ _link(Resource('tag', tag)) for tag in tags ] if 'oldlist' == format: resource_link = _link(resource) else: desc = desc or \ get_resource_description(env, resource, context=context) resource_link = builder.a(desc, href=get_resource_url( env, resource, context.href)) if 'table' == format: cells = [] for col in cols: if col == 'id': cells.append(_link(resource)) # Don't duplicate links to resource in both. elif col == 'description' and 'id' in cols: cells.append(desc) elif col == 'description': cells.append(resource_link) elif col == 'realm': cells.append(resource.realm) elif col == 'tags': cells.append( builder([(tag, ' ') for tag in rendered_tags])) rows.append({'cells': cells}) continue rows.append({ 'desc': desc, 'rendered_tags': None, 'resource_link': _link(resource) }) data.update({ 'format': format, 'paginator': results, 'results': rows, 'tags_url': req.href('tags') }) # Work around a bug in trac/templates/layout.html, that causes a # TypeError for the wiki macro call, if we use add_link() alone. add_stylesheet(req, 'common/css/search.css') return Chrome(env).render_template(req, 'listtagged_results.html', data, 'text/html', True) def get_href(self, req, per_page=None, page=None, tag=None, **kwargs): """Prepare href objects for tag links and pager navigation. Generate form-related arguments, strip arguments with default values. """ form_realms = {} # Prepare realm arguments to keep form data consistent. for realm in self.realms: form_realms[realm] = 'on' realms = self.realms if not page and not per_page: # We're not serving pager navigation here. return get_resource_url(self.env, tag, req.href, form_realms=form_realms, **kwargs) if page == 1: page = None if per_page == self.items_per_page: per_page = None return req.href(req.path_info, form_realms, q=self.query, realms=realms, listtagged_per_page=per_page, listtagged_page=page, **kwargs) def render_cloud(self, req, cloud, renderer=None, caseless_sort=False, mincount=None): """Render a tag cloud. :cloud: Dictionary of {object: count} representing the cloud. :param renderer: A callable with signature (tag, count, percent) used to render the cloud objects. :param caseless_sort: Boolean, whether tag cloud should be sorted case-sensitive. :param mincount: Integer threshold to hide tags with smaller count. """ min_px = 10.0 max_px = 30.0 scale = 1.0 if renderer is None: def default_renderer(tag, count, percent): href = self.get_href(req, tag=Resource('tag', tag)) return builder.a(tag, rel='tag', title='%i' % count, href=href, style='font-size: %ipx' % int(min_px + percent * (max_px - min_px))) renderer = default_renderer # A LUT from count to n/len(cloud) size_lut = dict([ (c, float(i)) for i, c in enumerate(sorted(set([r for r in cloud.values()]))) ]) if size_lut: scale = 1.0 / len(size_lut) if caseless_sort: # Preserve upper-case precedence within similar tags. items = reversed( sorted(cloud.iteritems(), key=lambda t: t[0].lower(), reverse=True)) else: items = sorted(cloud.iteritems()) ul = li = None for i, (tag, count) in enumerate(items): percent = size_lut[count] * scale if mincount and count < as_int(mincount, 1): # Tag count is too low. continue if ul: # Found new tag for cloud; now add previously prepared one. ul('\n', li) else: # Found first tag for cloud; now create the list. ul = builder.ul(class_='tagcloud') # Prepare current tag entry. li = builder.li(renderer(tag, count, percent)) if li: # All tags checked; mark latest tag as last one (no tailing colon). li(class_='last') ul('\n', li, '\n') return ul and ul or _("No tags found") def _paginate(self, req, results): self.query = req.args.get('q', None) current_page = as_int(req.args.get('listtagged_page'), 1) items_per_page = as_int(req.args.get('listtagged_per_page'), None) if items_per_page is None: items_per_page = self.items_per_page result = Paginator(results, current_page - 1, items_per_page) pagedata = [] shown_pages = result.get_shown_pages(21) for page in shown_pages: page_href = self.get_href(req, items_per_page, page) pagedata.append( [page_href, None, str(page), _("Page %(num)d", num=page)]) attributes = ['href', 'class', 'string', 'title'] result.shown_pages = [dict(zip(attributes, p)) for p in pagedata] result.current_page = { 'href': None, 'class': 'current', 'string': str(result.page + 1), 'title': None } if result.has_next_page: next_href = self.get_href(req, items_per_page, current_page + 1) add_link(req, 'next', next_href, _('Next Page')) if result.has_previous_page: prev_href = self.get_href(req, items_per_page, current_page - 1) add_link(req, 'prev', prev_href, _('Previous Page')) return result
class AuthCaptcha(Component): ### class data implements(IRequestFilter, ITemplateStreamFilter, ITemplateProvider, IAuthenticator, IEnvironmentSetupParticipant, IRequireComponents, INavigationContributor) dict_file = Option( 'captchaauth', 'dictionary_file', default= "http://java.sun.com/docs/books/tutorial/collections/interfaces/examples/dictionary.txt" ) captcha_type = Option('captchaauth', 'type', default="png") realms = ListOption('captchaauth', 'realms', default="wiki, newticket") permissions = { 'wiki': ['WIKI_CREATE', 'WIKI_MODIFY'], 'newticket': ['TICKET_CREATE'] } xpath = {'ticket.html': "//div[@class='buttons']"} delete = {'ticket.html': "//div[@class='field']"} ### IRequestFilter methods def pre_process_request(self, req, handler): """Called after initial handler selection, and can be used to change the selected handler or redirect request. Always returns the request handler, even if unchanged. """ if req.method == 'GET': if req.path_info.strip('/') in ['register', 'login' ] and req.authname != 'anonymous': login_module = LoginModule(self.env) login_module._do_logout(req) req.redirect(req.href(req.path_info)) if req.method == 'POST': realm = self.realm(req) # set the session data for name and email if CAPTCHA-authenticated if 'captchaauth' in req.args: name, email = self.identify(req) for field in 'name', 'email': value = locals()[field] if value: req.session[field] = value req.session.save() if req.authname != 'anonymous' and realm == 'newticket': req.args['author'] = name if email: req.args['author'] += ' <%s>' % email # redirect anonymous user posts that are not CAPTCHA-identified if req.authname == 'anonymous' and realm in self.realms: if 'captchaauth' in req.args and 'captchaid' in req.args: # add warnings from CAPTCHA authentication captcha = self.captcha(req) if req.args['captchaauth'] != captcha: add_warning( req, "You typed the wrong word. Please try again.") try: # delete used CAPTCHA execute_non_query( self.env, "DELETE FROM captcha WHERE id=%s", req.args['captchaid']) except: pass name, email = self.identify(req) if not name: add_warning(req, 'Please provide your name') if AccountManager and name in AccountManager( self.env).get_users(): add_warning( req, '%s is already taken as by a registered user. Please login or use a different name' % name) # redirect to previous location location = req.get_header('referer') if location: location, query = urllib.splitquery(location) if realm == 'newticket': args = [(key.split('field_', 1)[-1], value) for key, value in req.args.items() if key.startswith('field_')] location += '?%s' % urllib.urlencode(args) else: location = req.href() req.redirect(location) return handler # for ClearSilver templates def post_process_request(self, req, template, content_type): """Do any post-processing the request might need; typically adding values to req.hdf, or changing template or mime type. Always returns a tuple of (template, content_type), even if unchanged. Note that `template`, `content_type` will be `None` if: - called when processing an error page - the default request handler did not return any result (for 0.10 compatibility; only used together with ClearSilver templates) """ return (template, content_type) # for Genshi templates def post_process_request(self, req, template, data, content_type): """Do any post-processing the request might need; typically adding values to the template `data` dictionary, or changing template or mime type. `data` may be update in place. Always returns a tuple of (template, data, content_type), even if unchanged. Note that `template`, `data`, `content_type` will be `None` if: - called when processing an error page - the default request handler did not return any result (Since 0.11) """ return (template, data, content_type) ### ITemplateStreamFilter method def filter_stream(self, req, method, filename, stream, data): """Return a filtered Genshi event stream, or the original unfiltered stream if no match. `req` is the current request object, `method` is the Genshi render method (xml, xhtml or text), `filename` is the filename of the template to be rendered, `stream` is the event stream and `data` is the data for the current template. See the Genshi documentation for more information. """ # only show CAPTCHAs for anonymous users if req.authname != 'anonymous': return stream # only put CAPTCHAs in the realms specified realm = self.realm(req) if realm not in self.realms: return stream # add the CAPTCHA to the stream if filename in self.xpath: # store CAPTCHA in DB and session word = random_word(self.dict_file) insert_update(self.env, 'captcha', 'id', req.session.sid, dict(word=word)) req.session['captcha'] = word req.session.save() # render the template chrome = Chrome(self.env) template = chrome.load_template('captcha.html') _data = {} # CAPTCHA type if self.captcha_type == 'png': captcha = tag.img(None, src=req.href('captcha.png')) else: captcha = Markup(skimpyAPI.Pre(word).data()) _data['captcha'] = captcha _data['email'] = req.session.get('email', '') _data['name'] = req.session.get('name', '') _data['captchaid'] = req.session.sid xpath = self.xpath[filename] stream |= Transformer(xpath).before(template.generate(**_data)) if filename in self.delete: stream |= Transformer(self.delete[filename]).remove() return stream ### methods for ITemplateProvider """Extension point interface for components that provide their own ClearSilver templates and accompanying static resources. """ def get_htdocs_dirs(self): """Return a list of directories with static resources (such as style sheets, images, etc.) Each item in the list must be a `(prefix, abspath)` tuple. The `prefix` part defines the path in the URL that requests to these resources are prefixed with. The `abspath` is the absolute path to the directory containing the resources on the local file system. """ return [] def get_templates_dirs(self): """Return a list of directories containing the provided template files. """ return [resource_filename(__name__, 'templates')] ### method for IAuthenticator """Extension point interface for components that can provide the name of the remote user.""" def authenticate(self, req): """Return the name of the remote user, or `None` if the identity of the user is unknown.""" # check for an authenticated user login_module = LoginModule(self.env) remote_user = login_module.authenticate(req) if remote_user: return remote_user # authenticate via a CAPTCHA if 'captchaauth' in req.args and 'captchaid' in req.args: # ensure CAPTCHA identification captcha = self.captcha(req) if captcha != req.args['captchaauth']: return # ensure sane identity name, email = self.identify(req) if name is None: return if AccountManager and name in AccountManager(self.env).get_users(): return # delete used CAPTCHA on success try: execute_non_query(self.env, "DELETE FROM captcha WHERE id=%s", req.args['captchaid']) except: pass # log the user in req.environ['REMOTE_USER'] = name login_module._do_login(req) ### methods for INavigationContributor """Extension point interface for components that contribute items to the navigation. """ def get_active_navigation_item(self, req): """This method is only called for the `IRequestHandler` processing the request. It should return the name of the navigation item that should be highlighted as active/current. """ return None def get_navigation_items(self, req): """Should return an iterable object over the list of navigation items to add, each being a tuple in the form (category, name, text). """ if req.authname != 'anonymous' and 'captcha' in req.session: return [('metanav', '_login', tag.a("Login", href=req.href.login())), ('metanav', '_register', tag.a("Register", href=req.href.register()))] return [] ### methods for IEnvironmentSetupParticipant """Extension point interface for components that need to participate in the creation and upgrading of Trac environments, for example to create additional database tables.""" def environment_created(self): """Called when a new Trac environment is created.""" if self.environment_needs_upgrade(None): self.upgrade_environment(None) def environment_needs_upgrade(self, db): """Called when Trac checks whether the environment needs to be upgraded. Should return `True` if this participant needs an upgrade to be performed, `False` otherwise. """ try: get_table(self.env, 'captcha') except: return True return False def upgrade_environment(self, db): """Actually perform an environment upgrade. Implementations of this method should not commit any database transactions. This is done implicitly after all participants have performed the upgrades they need without an error being raised. """ # table of CAPTCHAs captcha_table = Table('captcha', key='key')[Column('id'), Column('word')] create_table(self.env, captcha_table) ### method for IRequireComponents def requires(self): """list of component classes that this component depends on""" return [ImageCaptcha] ### internal methods def identify(self, req): """ identify the user, ensuring uniqueness (TODO); returns a tuple of (name, email) or success or None """ name = req.args.get('name', None).strip() email = req.args.get('email', None).strip() return name, email def realm(self, req): """ returns the realm according to the request """ path = req.path_info.strip('/').split('/') if not path: return # TODO: default handler ('/') return path[0] def captcha(self, req): return get_scalar(self.env, "SELECT word FROM captcha WHERE id=%s", 0, req.args['captchaid'])
class SubTicketsModule(Component): implements(IRequestFilter, ITicketManipulator, ITemplateProvider, ITemplateStreamFilter) # Simple Options opt_skip_validation = ListOption('subtickets', 'skip_closure_validation', default=[], doc=_(""" Normally, reopening a child with a `closed` parent will be refused and closing a parent with non-`closed` children will also be refused. Adding either of `reopen` or `resolve` to this option will make Subtickets skip this validation for the respective action. Separate by comma if both actions are listed. Caveat: This functionality will be made workflow-independent in a future release of !SubTicketsPlugin. """)) opt_recursion_depth = IntOption('subtickets', 'recursion_depth', default=-1, doc=_(""" Limit the number of recursive levels when listing subtickets. Default is infinity, represented by`-1`. The value zero (0) limits the listing to immediate children. """)) opt_add_style = ChoiceOption('subtickets', 'add_style', ['button', 'link'], doc=_(""" Choose whether to make `Add` look like a button (default) or a link """)) opt_owner_url = Option('subtickets', 'owner_url', doc=_(""" Currently undocumented. """)) # Per-ticket type options -- all initialised in __init__() opt_inherit_fields = dict() opt_columns = dict() def _add_per_ticket_type_option(self, ticket_type): self.opt_inherit_fields[ticket_type] = ListOption( 'subtickets', 'type.%s.child_inherits' % ticket_type, default='', doc=_("""Comma-separated list of ticket fields whose values are to be copied from a parent ticket into a newly created child ticket. """)) self.opt_columns[ticket_type] = ListOption('subtickets', 'type.%s.table_columns' % ticket_type, default='status,owner', doc=_(""" Comma-separated list of ticket fields whose values are to be shown for each child ticket in the subtickets list """)) def __init__(self): # The following initialisations must happen inside init() # in order to be able to access self.env for tt in TicketType.select(self.env): self._add_per_ticket_type_option(tt.name) # ITemplateProvider methods def get_htdocs_dirs(self): from pkg_resources import resource_filename return [('subtickets', resource_filename(__name__, 'htdocs'))] def get_templates_dirs(self): return [] # IRequestFilter methods def pre_process_request(self, req, handler): return handler def post_process_request(self, req, template, data, content_type): path = req.path_info if path.startswith('/ticket/') or path.startswith('/newticket'): # get parent ticket's data if data and 'ticket' in data: ticket = data['ticket'] parents = ticket['parents'] or '' ids = set(NUMBERS_RE.findall(parents)) if len(parents) > 0: self._append_parent_links(req, data, ids) children = self.get_children(ticket.id) if children: data['subtickets'] = children elif path.startswith('/admin/ticket/type') \ and data \ and set(['add', 'name']).issubset(data.keys()) \ and data['add'] == 'Add': self._add_per_ticket_type_option(data['name']) return template, data, content_type def _append_parent_links(self, req, data, ids): links = [] for id in sorted(ids, key=lambda x: int(x)): try: ticket = Ticket(self.env, id) elem = tag.a('#%s' % id, href=req.href.ticket(id), class_='%s ticket' % ticket['status'], title=ticket['summary']) if len(links) > 0: links.append(', ') links.append(elem) except ResourceNotFound: pass for field in data.get('fields', ''): if field.get('name') == 'parents': field['rendered'] = tag.span(*links) # ITicketManipulator methods def prepare_ticket(self, req, ticket, fields, actions): pass def get_children(self, parent_id, depth=0): children = {} for parent, child in self.env.db_query( """ SELECT parent, child FROM subtickets WHERE parent=%s """, (parent_id, )): children[child] = None if self.opt_recursion_depth > depth or self.opt_recursion_depth == -1: for id in children: children[id] = self.get_children(id, depth + 1) return children def validate_ticket(self, req, ticket): action = req.args.get('action') if action in self.opt_skip_validation: return if action == 'resolve': for parent, child in self.env.db_query( """ SELECT parent, child FROM subtickets WHERE parent=%s """, (ticket.id, )): if Ticket(self.env, child)['status'] != 'closed': yield None, _("""Cannot close/resolve because child ticket #%(child)s is still open""", child=child) elif action == 'reopen': ids = set(NUMBERS_RE.findall(ticket['parents'] or '')) for id in ids: if Ticket(self.env, id)['status'] == 'closed': msg = _( "Cannot reopen because parent ticket #%(id)s " "is closed", id=id) yield None, msg # ITemplateStreamFilter method def _create_subtickets_table(self, req, children, tbody, depth=0): """Recursively create list table of subtickets """ if not children: return for id in sorted(children, key=lambda x: int(x)): ticket = Ticket(self.env, id) # the row r = [] # Always show ID and summary attrs = {'href': req.href.ticket(id)} if ticket['status'] == 'closed': attrs['class_'] = 'closed' link = tag.a('#%s' % id, **attrs) summary = tag.td(link, ': %s' % ticket['summary'], style='padding-left: %dpx;' % (depth * 15)) r.append(summary) # Add other columns as configured. for column in \ self.env.config.getlist('subtickets', 'type.%(type)s.table_columns' % ticket): if column == 'owner': if self.opt_owner_url: href = req.href(self.opt_owner_url % ticket['owner']) else: href = req.href.query(status='!closed', owner=ticket['owner']) e = tag.td(tag.a(ticket['owner'], href=href)) elif column == 'milestone': href = req.href.query(status='!closed', milestone=ticket['milestone']) e = tag.td(tag.a(ticket['milestone'], href=href)) else: e = tag.td(ticket[column]) r.append(e) tbody.append(tag.tr(*r)) self._create_subtickets_table(req, children[id], tbody, depth + 1) def filter_stream(self, req, method, filename, stream, data): if not req.path_info.startswith('/ticket/'): return stream div = None link = None button = None if 'ticket' in data: # get parents data ticket = data['ticket'] # title div = tag.div(class_='description') if 'TICKET_CREATE' in req.perm(ticket.resource) \ and ticket['status'] != 'closed': opt_inherit = self.env.config.getlist( 'subtickets', 'type.%(type)s.child_inherits' % ticket) if self.opt_add_style == 'link': inh = {f: ticket[f] for f in opt_inherit} link = tag.a(_('add'), href=req.href.newticket(parents=ticket.id, **inh)) link = tag.span('(', link, ')', class_='addsubticket') else: inh = [ tag.input(type='hidden', name=f, value=ticket[f]) for f in opt_inherit ] button = tag.form(tag.div(tag.input( type="submit", value=_("Create"), title=_("Create a child ticket")), inh, tag.input(type="hidden", name="parents", value=str(ticket.id)), class_="inlinebuttons"), method="get", action=req.href.newticket()) div.append(button) div.append(tag.h3(_('Subtickets '), link)) if 'subtickets' in data: # table tbody = tag.tbody() div.append(tag.table(tbody, class_='subtickets')) # tickets self._create_subtickets_table(req, data['subtickets'], tbody) if div: add_stylesheet(req, 'subtickets/css/subtickets.css') ''' If rendered in preview mode, DIV we're interested in isn't a child but the root and transformation won't succeed. According to HTML specification, id's must be unique within a document, so it's safe to omit the leading '.' in XPath expression to select all matching regardless of hierarchy their in. ''' stream |= Transformer('//div[@id="ticket"]').append(div) return stream
class TagRequestHandler(TagTemplateProvider): """[main] Implements the /tags handler.""" implements(INavigationContributor, IRequestHandler) cloud_mincount = Option( 'tags', 'cloud_mincount', 1, doc="""Integer threshold to hide tags with smaller count.""") default_cols = Option( 'tags', 'default_table_cols', 'id|description|tags', doc="""Select columns and order for table format using a "|"-separated list of column names. Supported columns: realm, id, description, tags """) default_format = Option( 'tags', 'default_format', 'oldlist', doc="""Set the default format for the handler of the `/tags` domain. || `oldlist` (default value) || The original format with a bulleted-list of "linked-id description (tags)" || || `compact` || bulleted-list of "linked-description" || || `table` || table... (see corresponding column option) || """) exclude_realms = ListOption( 'tags', 'exclude_realms', [], doc="""Comma-separated list of realms to exclude from tags queries by default, unless specifically included using "realm:realm-name" in a query.""") # INavigationContributor methods def get_active_navigation_item(self, req): if 'TAGS_VIEW' in req.perm: return 'tags' def get_navigation_items(self, req): if 'TAGS_VIEW' in req.perm: label = tag_("Tags") yield ('mainnav', 'tags', builder.a(label, href=req.href.tags(), accesskey='T')) # IRequestHandler methods def match_request(self, req): return req.path_info.startswith('/tags') def process_request(self, req): req.perm.require('TAGS_VIEW') match = re.match(r'/tags/?(.*)', req.path_info) tag_id = match.group(1) and match.group(1) or None query = req.args.get('q', '') # Consider only providers, that are permitted for display. tag_system = TagSystem(self.env) all_realms = tag_system.get_taggable_realms(req.perm) if not (tag_id or query) or [r for r in all_realms if r in req.args ] == []: for realm in all_realms: if realm not in self.exclude_realms: req.args[realm] = 'on' checked_realms = [r for r in all_realms if r in req.args] if query: # Add permitted realms from query expression. checked_realms.extend(query_realms(query, all_realms)) realm_args = dict( zip([r for r in checked_realms], ['on' for r in checked_realms])) # Switch between single tag and tag query expression mode. if tag_id and not re.match(r"""(['"]?)(\S+)\1$""", tag_id, re.UNICODE): # Convert complex, invalid tag ID's --> query expression. req.redirect(req.href.tags(realm_args, q=tag_id)) elif query: single_page = re.match(r"""(['"]?)(\S+)\1$""", query, re.UNICODE) if single_page: # Convert simple query --> single tag. req.redirect(req.href.tags(single_page.group(2), realm_args)) data = dict(page_title=_("Tags"), checked_realms=checked_realms) # Populate the TagsQuery form field. data['tag_query'] = tag_id and tag_id or query data['tag_realms'] = list( dict(name=realm, checked=realm in checked_realms) for realm in all_realms) if tag_id: data['tag_page'] = WikiPage(self.env, tag_system.wiki_page_prefix + tag_id) if query or tag_id: macro = 'ListTagged' # TRANSLATOR: The meta-nav link label. add_ctxtnav(req, _("Back to Cloud"), req.href.tags()) args = "%s,format=%s,cols=%s" % \ (tag_id and tag_id or query, self.default_format, self.default_cols) data['mincount'] = None else: macro = 'TagCloud' mincount = as_int(req.args.get('mincount', None), self.cloud_mincount) args = mincount and "mincount=%s" % mincount or None data['mincount'] = mincount formatter = Formatter(self.env, web_context(req, Resource('tag'))) self.env.log.debug("%s macro arguments: %s", macro, args and args or '(none)') macros = TagWikiMacros(self.env) try: # Query string without realm throws 'NotImplementedError'. data['tag_body'] = checked_realms and \ macros.expand_macro(formatter, macro, args, realms=checked_realms) \ or '' except InvalidQuery, e: data['tag_query_error'] = to_unicode(e) data['tag_body'] = macros.expand_macro(formatter, 'TagCloud', '') add_stylesheet(req, 'tags/css/tractags.css') return 'tag_view.html', data, None
def __init__(self, section, name, default=None, itemsep=',', subsep=':', keep_empty=False, doc='', doc_domain='tracini'): ListOption.__init__(self, section, name, default, itemsep, keep_empty, doc, doc_domain) self.subsep = subsep
class BrowserModule(Component): implements(INavigationContributor, IPermissionRequestor, IRequestHandler, IWikiSyntaxProvider, IHTMLPreviewAnnotator, IWikiMacroProvider) property_renderers = ExtensionPoint(IPropertyRenderer) downloadable_paths = ListOption( 'browser', 'downloadable_paths', '/trunk, /branches/*, /tags/*', doc="""List of repository paths that can be downloaded. Leave the option empty if you want to disable all downloads, otherwise set it to a comma-separated list of authorized paths (those paths are glob patterns, i.e. "*" can be used as a wild card) (''since 0.10'')""") color_scale = BoolOption('browser', 'color_scale', True, doc="""Enable colorization of the ''age'' column. This uses the same color scale as the source code annotation: blue is older, red is newer. (''since 0.11'')""") NEWEST_COLOR = (255, 136, 136) newest_color = Option( 'browser', 'newest_color', repr(NEWEST_COLOR), doc="""(r,g,b) color triple to use for the color corresponding to the newest color, for the color scale used in ''blame'' or the browser ''age'' column if `color_scale` is enabled. (''since 0.11'')""") OLDEST_COLOR = (136, 136, 255) oldest_color = Option( 'browser', 'oldest_color', repr(OLDEST_COLOR), doc="""(r,g,b) color triple to use for the color corresponding to the oldest color, for the color scale used in ''blame'' or the browser ''age'' column if `color_scale` is enabled. (''since 0.11'')""") intermediate_point = Option( 'browser', 'intermediate_point', '', doc="""If set to a value between 0 and 1 (exclusive), this will be the point chosen to set the `intermediate_color` for interpolating the color value. (''since 0.11'')""") intermediate_color = Option( 'browser', 'intermediate_color', '', doc="""(r,g,b) color triple to use for the color corresponding to the intermediate color, if two linear interpolations are used for the color scale (see `intermediate_point`). If not set, the intermediate color between `oldest_color` and `newest_color` will be used. (''since 0.11'')""") render_unsafe_content = BoolOption( 'browser', 'render_unsafe_content', 'false', """Whether raw files should be rendered in the browser, or only made downloadable. Pretty much any file may be interpreted as HTML by the browser, which allows a malicious user to create a file containing cross-site scripting attacks. For open repositories where anyone can check-in a file, it is recommended to leave this option disabled (which is the default).""") hidden_properties = ListOption( 'browser', 'hide_properties', 'svk:merge', doc="""Comma-separated list of version control properties to hide from the repository browser. (''since 0.9'')""") # public methods def get_custom_colorizer(self): """Returns a converter for values from [0.0, 1.0] to a RGB triple.""" def interpolate(old, new, value): # Provides a linearly interpolated color triple for `value` # which must be a floating point value between 0.0 and 1.0 return tuple([int(b + (a - b) * value) for a, b in zip(new, old)]) def parse_color(rgb, default): # Get three ints out of a `rgb` string or return `default` try: t = tuple([int(v) for v in re.split(r'(\d+)', rgb)[1::2]]) return t if len(t) == 3 else default except ValueError: return default newest_color = parse_color(self.newest_color, self.NEWEST_COLOR) oldest_color = parse_color(self.oldest_color, self.OLDEST_COLOR) try: intermediate = float(self.intermediate_point) except ValueError: intermediate = None if intermediate: intermediate_color = parse_color(self.intermediate_color, None) if not intermediate_color: intermediate_color = tuple([ (a + b) / 2 for a, b in zip(newest_color, oldest_color) ]) def colorizer(value): if value <= intermediate: value = value / intermediate return interpolate(oldest_color, intermediate_color, value) else: value = (value - intermediate) / (1.0 - intermediate) return interpolate(intermediate_color, newest_color, value) else: def colorizer(value): return interpolate(oldest_color, newest_color, value) return colorizer # INavigationContributor methods def get_active_navigation_item(self, req): return 'browser' def get_navigation_items(self, req): rm = RepositoryManager(self.env) if 'BROWSER_VIEW' in req.perm and rm.get_real_repositories(): yield ('mainnav', 'browser', tag.a(_('Browse Source'), href=req.href.browser())) # IPermissionRequestor methods def get_permission_actions(self): return ['BROWSER_VIEW', 'FILE_VIEW'] # IRequestHandler methods def match_request(self, req): match = re.match(r'/(export|browser|file)(/.*)?$', req.path_info) if match: mode, path = match.groups() if mode == 'export': if path and '/' in path: path_elts = path.split('/', 2) if len(path_elts) != 3: return False path = path_elts[2] req.args['rev'] = path_elts[1] req.args['format'] = 'raw' elif mode == 'file': req.redirect(req.href.browser(path, rev=req.args.get('rev'), format=req.args.get('format')), permanent=True) req.args['path'] = path or '/' return True def process_request(self, req): req.perm.require('BROWSER_VIEW') presel = req.args.get('preselected') if presel and (presel + '/').startswith(req.href.browser() + '/'): req.redirect(presel) path = req.args.get('path', '/') rev = req.args.get('rev', '') if rev.lower() in ('', 'head'): rev = None order = req.args.get('order', 'name').lower() desc = req.args.has_key('desc') xhr = req.get_header('X-Requested-With') == 'XMLHttpRequest' rm = RepositoryManager(self.env) all_repositories = rm.get_all_repositories() reponame, repos, path = rm.get_repository_by_path(path) # Repository index show_index = not reponame and path == '/' if show_index: if repos and (as_bool(all_repositories[''].get('hidden')) or not repos.is_viewable(req.perm)): repos = None if not repos and reponame: raise ResourceNotFound( _("Repository '%(repo)s' not found", repo=reponame)) if reponame and reponame != repos.reponame: # Redirect alias qs = req.query_string req.redirect( req.href.browser(repos.reponame or None, path) + ('?' + qs if qs else '')) reponame = repos.reponame if repos else None # Find node for the requested path/rev context = web_context(req) node = None display_rev = lambda rev: rev if repos: try: if rev: rev = repos.normalize_rev(rev) # If `rev` is `None`, we'll try to reuse `None` consistently, # as a special shortcut to the latest revision. rev_or_latest = rev or repos.youngest_rev node = get_existing_node(req, repos, path, rev_or_latest) except NoSuchChangeset, e: raise ResourceNotFound(e.message, _('Invalid changeset number')) context = context.child( repos.resource.child('source', path, version=rev_or_latest)) display_rev = repos.display_rev # Prepare template data path_links = get_path_links(req.href, reponame, path, rev, order, desc) repo_data = dir_data = file_data = None if show_index: repo_data = self._render_repository_index(context, all_repositories, order, desc) if node: if node.isdir: dir_data = self._render_dir(req, repos, node, rev, order, desc) elif node.isfile: file_data = self._render_file(req, context, repos, node, rev) if not repos and not (repo_data and repo_data['repositories']): raise ResourceNotFound(_("No node %(path)s", path=path)) quickjump_data = properties_data = None if node and not xhr: properties_data = self.render_properties('browser', context, node.get_properties()) quickjump_data = list(repos.get_quickjump_entries(rev)) data = { 'context': context, 'reponame': reponame, 'repos': repos, 'repoinfo': all_repositories.get(reponame or ''), 'path': path, 'rev': node and node.rev, 'stickyrev': rev, 'display_rev': display_rev, 'created_path': node and node.created_path, 'created_rev': node and node.created_rev, 'properties': properties_data, 'path_links': path_links, 'order': order, 'desc': 1 if desc else None, 'repo': repo_data, 'dir': dir_data, 'file': file_data, 'quickjump_entries': quickjump_data, 'wiki_format_messages': \ self.config['changeset'].getbool('wiki_format_messages'), 'xhr': xhr, } if xhr: # render and return the content only return 'dir_entries.html', data, None if dir_data or repo_data: add_script(req, 'common/js/expand_dir.js') add_script(req, 'common/js/keyboard_nav.js') # Links for contextual navigation if node: if node.isfile: prev_rev = repos.previous_rev(rev=node.rev, path=node.created_path) if prev_rev: href = req.href.browser(reponame, node.created_path, rev=prev_rev) add_link(req, 'prev', href, _('Revision %(num)s', num=display_rev(prev_rev))) if rev is not None: add_link(req, 'up', req.href.browser(reponame, node.created_path)) next_rev = repos.next_rev(rev=node.rev, path=node.created_path) if next_rev: href = req.href.browser(reponame, node.created_path, rev=next_rev) add_link(req, 'next', href, _('Revision %(num)s', num=display_rev(next_rev))) prevnext_nav(req, _('Previous Revision'), _('Next Revision'), _('Latest Revision')) else: if path != '/': add_link(req, 'up', path_links[-2]['href'], _('Parent directory')) add_ctxtnav( req, tag.a(_('Last Change'), href=req.href.changeset(node.rev, reponame, node.created_path))) if node.isfile: if data['file']['annotate']: add_ctxtnav(req, _('Normal'), title=_('View file without annotations'), href=req.href.browser(reponame, node.created_path, rev=rev)) else: add_ctxtnav(req, _('Annotate'), title=_('Annotate each line with the last ' 'changed revision ' '(this can be time consuming...)'), href=req.href.browser(reponame, node.created_path, rev=rev, annotate='blame')) add_ctxtnav(req, _('Revision Log'), href=req.href.log(reponame, path, rev=rev)) path_url = repos.get_path_url(path, rev) if path_url: if path_url.startswith('//'): path_url = req.scheme + ':' + path_url add_ctxtnav(req, _('Repository URL'), href=path_url) add_stylesheet(req, 'common/css/browser.css') return 'browser.html', data, None