def has_resource(dist, module, resource_name): if dist.location.endswith('.egg'): # installed by easy_install return dist.has_resource(resource_name) if dist.has_metadata('installed-files.txt'): # installed by pip resource_name = os.path.normpath('../' + resource_name) return any(resource_name == os.path.normpath(name) for name in dist.get_metadata_lines('installed-files.txt')) if dist.has_metadata('SOURCES.txt'): resource_name = os.path.normpath(resource_name) return any(resource_name == os.path.normpath(name) for name in dist.get_metadata_lines('SOURCES.txt')) if dist.has_metadata('RECORD'): # *.dist-info/RECORD reader = csv.reader(StringIO(dist.get_metadata('RECORD'))) return any(resource_name == row[0] for row in reader) if dist.has_metadata('PKG-INFO'): try: pkginfo = parse_pkginfo(dist, 'PKG-INFO') provides = pkginfo.get_all('Provides', ()) names = module.__name__.split('.') if any('.'.join(names[:n + 1]) in provides for n in xrange(len(names))): return True except (IOError, email.Errors.MessageError): pass toplevel = resource_name.split('/')[0] if dist.has_metadata('top_level.txt'): return toplevel in dist.get_metadata_lines('top_level.txt') return dist.key == toplevel.lower()
def test_passwords(self): template, data = self.iniadmin.render_admin_panel( self.req, 'tracini', 'trac', '') self.assertTrue(any(opt['type'] == 'password' for opt in data['iniadmin']['options'] if opt['name'] == 'database')) template, data = self.iniadmin.render_admin_panel( self.req, 'tracini', 'notification', '') self.assertTrue(any(opt['type'] == 'password' for opt in data['iniadmin']['options'] if opt['name'] == 'smtp_password'))
def test_passwords(self): template, data = self.iniadmin.render_admin_panel( self.req, 'tracini', 'trac', '') self.assertTrue( any(opt['type'] == 'password' for opt in data['iniadmin']['options'] if opt['name'] == 'database')) template, data = self.iniadmin.render_admin_panel( self.req, 'tracini', 'notification', '') self.assertTrue( any(opt['type'] == 'password' for opt in data['iniadmin']['options'] if opt['name'] == 'smtp_password'))
def get_macro_descr(): for macro_provider in formatter.wiki.macro_providers: names = list(macro_provider.get_macros() or []) if name_filter and not any(name.startswith(name_filter) for name in names): continue try: name_descriptions = [ (name, macro_provider.get_macro_description(name)) for name in names] except Exception, e: yield system_message( _("Error: Can't get description for macro %(name)s", name=names[0]), e), names else: for descr, pairs in groupby(name_descriptions, key=lambda p: p[1]): if descr: descr = to_unicode(descr) or '' if content == '*': descr = format_to_oneliner( self.env, formatter.context, descr, shorten=True) else: descr = format_to_html( self.env, formatter.context, descr) yield descr, [name for name, descr in pairs]
def _makedoc(self, target, visibility): """Warning: this helper method returns a `str` object.""" module, object = self.load_object(target) try: self.makedoc_lock.acquire() if visibility == 'private' or \ visibility == '' and any([module.__name__.startswith(p) for p in self.show_private]): try: # save pydoc's original visibility function visiblename = pydoc.visiblename # define our own: show everything but imported symbols is_imported_from_other_module = {} for k, v in object.__dict__.iteritems(): if hasattr(v, '__module__') and \ v.__module__ != module.__name__: is_imported_from_other_module[k] = True def show_private(name, all=None): return not is_imported_from_other_module.has_key(name) # install our visibility function pydoc.visiblename = show_private return self.doc.document(object) finally: # restore saved visibility function pydoc.visiblename = visiblename else: return self.doc.document(object) finally: self.makedoc_lock.release()
def _format_diff_link(self, formatter, ns, target, label): params, query, fragment = formatter.split_link(target) def pathrev(path): if '@' in path: return path.split('@', 1) else: return (path, None) if '//' in params: p1, p2 = params.split('//', 1) old, new = pathrev(p1), pathrev(p2) data = {'old_path': old[0], 'old_rev': old[1], 'new_path': new[0], 'new_rev': new[1]} else: old_path, old_rev = pathrev(params) new_rev = None if old_rev and ':' in old_rev: old_rev, new_rev = old_rev.split(':', 1) data = {'old_path': old_path, 'old_rev': old_rev, 'new_path': old_path, 'new_rev': new_rev} title = self.title_for_diff(data) href = None if any(data.values()): if query: query = '&' + query[1:] href = formatter.href.changeset(new_path=data['new_path'] or None, new=data['new_rev'], old_path=data['old_path'] or None, old=data['old_rev']) + query return tag.a(label, class_="changeset", title=title, href=href)
def expand_macro(self, formatter, name, content, args={}): reponame = args.get('repository') or '' rev = args.get('revision') repos = RepositoryManager(self.env).get_repository(reponame) try: changeset = repos.get_changeset(rev) message = changeset.message rev = changeset.rev resource = repose.resource except Exception: message = content resource = Resource('repository', reponame) if formatter.context.resource.realm == 'ticket': ticket_re = MultiProjectCommitTicketUpdater.ticket_re if not any(int(tkt_id) == int(formatter.context.resource.id) for tkt_id in ticket_re.findall(message)): return tag.p("(The changeset message doesn't reference this " "ticket)", class_='hint') if ChangesetModule(self.env).wiki_format_messages: return tag.div(format_to_html(self.env, formatter.context('changeset', rev, parent=resource), message, escape_newlines=True), class_='message') else: return tag.pre(message, class_='message')
def create_zipinfo(filename, mtime=None, dir=False, executable=False, symlink=False, comment=None): """Create a instance of `ZipInfo`. :param filename: file name of the entry :param mtime: modified time of the entry :param dir: if `True`, the entry is a directory :param executable: if `True`, the entry is a executable file :param symlink: if `True`, the entry is a symbolic link :param comment: comment of the entry """ from zipfile import ZipInfo, ZIP_DEFLATED, ZIP_STORED zipinfo = ZipInfo() # The general purpose bit flag 11 is used to denote # UTF-8 encoding for path and comment. Only set it for # non-ascii files for increased portability. # See http://www.pkware.com/documents/casestudies/APPNOTE.TXT if any(ord(c) >= 128 for c in filename): zipinfo.flag_bits |= 0x0800 zipinfo.filename = filename.encode('utf-8') if mtime is not None: mtime = to_datetime(mtime, utc) zipinfo.date_time = mtime.utctimetuple()[:6] # The "extended-timestamp" extra field is used for the # modified time of the entry in unix time. It avoids # extracting wrong modified time if non-GMT timezone. # See http://www.opensource.apple.com/source/zip/zip-6/unzip/unzip # /proginfo/extra.fld zipinfo.extra += struct.pack( '<hhBl', 0x5455, # extended-timestamp extra block type 1 + 4, # size of this block 1, # modification time is present to_timestamp(mtime)) # time of last modification # external_attr is 4 bytes in size. The high order two # bytes represent UNIX permission and file type bits, # while the low order two contain MS-DOS FAT file # attributes, most notably bit 4 marking directories. if dir: if not zipinfo.filename.endswith('/'): zipinfo.filename += '/' zipinfo.compress_type = ZIP_STORED zipinfo.external_attr = 040755 << 16L # permissions drwxr-xr-x zipinfo.external_attr |= 0x10 # MS-DOS directory flag else: zipinfo.compress_type = ZIP_DEFLATED zipinfo.external_attr = 0644 << 16L # permissions -r-wr--r-- if executable: zipinfo.external_attr |= 0755 << 16L # -rwxr-xr-x if symlink: zipinfo.compress_type = ZIP_STORED zipinfo.external_attr |= 0120000 << 16L # symlink file type if comment: zipinfo.comment = comment.encode('utf-8') return zipinfo
def has_resource(dist, resource_name): if dist.location.endswith('.egg'): # installed by easy_install return dist.has_resource(resource_name) if dist.has_metadata('installed-files.txt'): # installed by pip resource_name = os.path.normpath('../' + resource_name) return any( resource_name == os.path.normpath(name) for name in dist.get_metadata_lines('installed-files.txt')) if dist.has_metadata('SOURCES.txt'): resource_name = os.path.normpath(resource_name) return any(resource_name == os.path.normpath(name) for name in dist.get_metadata_lines('SOURCES.txt')) toplevel = resource_name.split('/')[0] if dist.has_metadata('top_level.txt'): return toplevel in dist.get_metadata_lines('top_level.txt') return dist.key == toplevel.lower()
def get_environments(environ, warn=False): """Retrieve canonical environment name to path mapping. The environments may not be all valid environments, but they are good candidates. """ env_paths = environ.get('trac.env_paths', []) env_parent_dir = environ.get('trac.env_parent_dir') if env_parent_dir: env_parent_dir = os.path.normpath(env_parent_dir) paths = dircache.listdir(env_parent_dir)[:] dircache.annotate(env_parent_dir, paths) # Filter paths that match the .tracignore patterns ignore_patterns = get_tracignore_patterns(env_parent_dir) paths = [path[:-1] for path in paths if path[-1] == '/' and not any(fnmatch.fnmatch(path[:-1], pattern) for pattern in ignore_patterns)] env_paths.extend(os.path.join(env_parent_dir, project) \ for project in paths) envs = {} for env_path in env_paths: env_path = os.path.normpath(env_path) if not os.path.isdir(env_path): continue env_name = os.path.split(env_path)[1] if env_name in envs: if warn: print >> sys.stderr, ('Warning: Ignoring project "%s" since ' 'it conflicts with project "%s"' % (env_path, envs[env_name])) else: envs[env_name] = env_path return envs
def ticket_changed(self, ticket, comment, author, old_values): """Called when a ticket is modified.""" # Sync only on change of ticket fields, that are exposed as tags. if any(f in self.fields for f in old_values.keys()): self.set_resource_tags(Mock(perm=MockPerm()), ticket, None) if self.use_cache: # Invalidate resource cache. del self._tagged_resources
def test_excludes(self): self.assertRaises(TracError, self.iniadmin.render_admin_panel, self.req, 'tracini', 'iniadmin', '') template, data = self.iniadmin.render_admin_panel( self.req, 'tracini', 'trac', '') self.assertTrue(any(opt['name'] == 'database' for opt in data['iniadmin']['options']))
def test_excludes(self): self.assertRaises(TracError, self.iniadmin.render_admin_panel, self.req, 'tracini', 'iniadmin', '') template, data = self.iniadmin.render_admin_panel( self.req, 'tracini', 'trac', '') self.assertTrue( any(opt['name'] == 'database' for opt in data['iniadmin']['options']))
def render_admin_panel(self, req, cat, page, path_info): assert req.perm.has_permission('TRAC_ADMIN') excludes_match = self._patterns_match(self.excludes) if page not in self._get_sections_set(excludes_match): raise TracError("Invalid section %s" % page) options = sorted( [option for (section, name), option in Option.registry.iteritems() if section == page and \ not excludes_match('%s:%s' % (section, name))], key=lambda opt: opt.name) # Apply changes if req.method == 'POST': modified = False for name, value in req.args.iteritems(): if any(name == opt.name for opt in options): if self.config.get(page, name) != value: self.config.set(page, name, value) modified = True if modified: self.log.debug("Updating trac.ini") self.config.save() req.redirect(req.href.admin(cat, page)) add_stylesheet(req, 'iniadmin/css/iniadmin.css') password_match = self._patterns_match(self.passwords) options_data = [] for option in options: doc = self._get_doc(option) value = self.config.get(page, option.name) # We assume the classes all end in "Option" type = option.__class__.__name__.lower()[:-6] or 'text' if type == 'list' and not isinstance(value, basestring): value = unicode(option.sep).join(list(value)) option_data = { 'name': option.name, 'default': option.default, 'doc': doc, 'value': value, 'type': type } if type == 'extension': option_data['options'] = sorted( impl.__class__.__name__ for impl in option.xtnpt.extensions(self)) elif type == 'text' and \ password_match('%s:%s' % (option.section, option.name)): option_data['type'] = 'password' options_data.append(option_data) data = {'iniadmin': {'section': page, 'options': options_data}} return 'iniadmin.html', data
def check_permission(self, action, username, resource, perm): realm = resource and resource.realm or None if (realm, action) in self._handled_perms: authz, users = self._get_authz_info() if authz is None: return False if username == 'anonymous': usernames = ('$anonymous', '*') else: usernames = (username, '$authenticated', '*') if resource is None: return users & set(usernames) and True or None rm = RepositoryManager(self.env) try: repos = rm.get_repository(resource.parent.id) except TracError: return True # Allow error to be displayed in the repo index if repos is None: return True modules = [resource.parent.id or self.authz_module_name] if modules[0]: modules.append('') def check_path(path): path = '/' + join(repos.scope, path) if path != '/': path += '/' # Walk from resource up parent directories for spath in parent_iter(path): for module in modules: section = authz.get(module, {}).get(spath) if section: for user in usernames: result = section.get(user) if result is not None: return result # Allow access to parent directories of allowed resources if any(section.get(user) is True for module in modules for spath, section in authz.get(module, {}).iteritems() if spath.startswith(path) for user in usernames): return True if realm == 'source': return check_path(resource.id) elif realm == 'changeset': changes = list(repos.get_changeset(resource.id).get_changes()) if not changes or any(check_path(change[0]) for change in changes): return True
def check_permission(self, action, username, resource, perm): realm = resource and resource.realm or None if (realm, action) in self._handled_perms: authz, users = self._get_authz_info() if authz is None: return False if username == 'anonymous': usernames = ('$anonymous', '*') else: usernames = (username, '$authenticated', '*') if resource is None: return users & set(usernames) and True or None rm = RepositoryManager(self.env) try: repos = rm.get_repository(resource.parent.id) except TracError: return True # Allow error to be displayed in the repo index if repos is None: return True modules = [resource.parent.id or self.authz_module_name] if modules[0]: modules.append('') def check_path(path): path = '/' + join(repos.scope, path) if path != '/': path += '/' # Walk from resource up parent directories for spath in parent_iter(path): for module in modules: section = authz.get(module, {}).get(spath) if section: for user in usernames: result = section.get(user) if result is not None: return result # Allow access to parent directories of allowed resources if any( section.get(user) is True for module in modules for spath, section in authz.get( module, {}).iteritems() if spath.startswith(path) for user in usernames): return True if realm == 'source': return check_path(resource.id) elif realm == 'changeset': changes = list(repos.get_changeset(resource.id).get_changes()) if not changes or any( check_path(change[0]) for change in changes): return True
def accepts_mimetype(req, mimetype): if isinstance(mimetype, basestring): mimetype = (mimetype,) accept = req.get_header('Accept') if accept is None : # Don't make judgements if no MIME type expected and method is GET return req.method == 'GET' else : accept = accept.split(',') return any(x.strip().startswith(y) for x in accept for y in mimetype)
def accepts_mimetype(req, mimetype): if isinstance(mimetype, basestring): mimetype = (mimetype, ) accept = req.get_header('Accept') if accept is None: # Don't make judgements if no MIME type expected and method is GET return req.method == 'GET' else: accept = accept.split(',') return any(x.strip().startswith(y) for x in accept for y in mimetype)
def has_resource(dist, resource_name): if dist.location.endswith('.egg'): # installed by easy_install return dist.has_resource(resource_name) if dist.has_metadata('installed-files.txt'): # installed by pip resource_name = os.path.normpath('../' + resource_name) return any( resource_name == os.path.normpath(name) for name in dist.get_metadata_lines('installed-files.txt')) if dist.has_metadata('SOURCES.txt'): resource_name = os.path.normpath(resource_name) return any(resource_name == os.path.normpath(name) for name in dist.get_metadata_lines('SOURCES.txt')) if dist.has_metadata('RECORD'): # *.dist-info/RECORD reader = csv.reader(StringIO(dist.get_metadata('RECORD'))) return any(resource_name == row[0] for row in reader) toplevel = resource_name.split('/')[0] if dist.has_metadata('top_level.txt'): return toplevel in dist.get_metadata_lines('top_level.txt') return dist.key == toplevel.lower()
def has_resource(dist, resource_name): if dist.location.endswith('.egg'): # installed by easy_install return dist.has_resource(resource_name) if dist.has_metadata('installed-files.txt'): # installed by pip resource_name = os.path.normpath('../' + resource_name) return any(resource_name == os.path.normpath(name) for name in dist.get_metadata_lines('installed-files.txt')) if dist.has_metadata('SOURCES.txt'): resource_name = os.path.normpath(resource_name) return any(resource_name == os.path.normpath(name) for name in dist.get_metadata_lines('SOURCES.txt')) if dist.has_metadata('RECORD'): # *.dist-info/RECORD reader = csv.reader(StringIO(dist.get_metadata('RECORD'))) return any(resource_name == row[0] for row in reader) toplevel = resource_name.split('/')[0] if dist.has_metadata('top_level.txt'): return toplevel in dist.get_metadata_lines('top_level.txt') return dist.key == toplevel.lower()
def post_process_request(self, req, template, data, content_type): path_info = req.path_info if any(path_info.startswith(panel, len('/admin/ticket/')) for panel in self._panels): if self._has_add_jquery_ui: Chrome(self.env).add_jquery_ui(req) add_script(req, 'adminenumlistplugin/adminenumlist.js') elif self._jquery_ui_filename: add_script(req, self._jquery_ui_filename) add_script(req, 'adminenumlistplugin/adminenumlist.js') return template, data, content_type
def get_sources(path): """Return a dictionary mapping Python module source paths to the distributions that contain them. """ sources = {} for dist in find_distributions(path, only=True): if not dist.has_metadata('top_level.txt'): continue toplevels = dist.get_metadata_lines('top_level.txt') toplevels = [top + '/' for top in toplevels] if dist.has_metadata('SOURCES.txt'): # *.egg-info/SOURCES.txt sources.update((src, dist) for src in dist.get_metadata_lines('SOURCES.txt') if any(src.startswith(top) for top in toplevels)) continue if dist.has_metadata('RECORD'): # *.dist-info/RECORD reader = csv.reader(StringIO(dist.get_metadata('RECORD'))) sources.update((row[0], dist) for row in reader if any(row[0].startswith(top) for top in toplevels)) continue return sources
def render_admin_panel(self, req, cat, page, path_info): assert req.perm.has_permission('TRAC_ADMIN') excludes_match = self._patterns_match(self.excludes) if page not in self._get_sections_set(excludes_match): raise TracError("Invalid section %s" % page) options = sorted( [option for (section, name), option in Option.registry.iteritems() if section == page and \ not excludes_match('%s:%s' % (section, name))], key=lambda opt: opt.name) # Apply changes if req.method == 'POST': modified = False for name, value in req.args.iteritems(): if any(name == opt.name for opt in options): if self.config.get(page, name) != value: self.config.set(page, name, value) modified = True if modified: self.log.debug("Updating trac.ini") self.config.save() req.redirect(req.href.admin(cat, page)) add_stylesheet(req, 'iniadmin/css/iniadmin.css') password_match = self._patterns_match(self.passwords) options_data = [] for option in options: doc = self._get_doc(option) value = self.config.get(page, option.name) # We assume the classes all end in "Option" type = option.__class__.__name__.lower()[:-6] or 'text' if type == 'list' and not isinstance(value,basestring): value = unicode(option.sep).join(list(value)) option_data = {'name': option.name, 'default': option.default, 'doc': doc, 'value': value, 'type': type} if type == 'extension': option_data['options'] = sorted( impl.__class__.__name__ for impl in option.xtnpt.extensions(self)) elif type == 'text' and \ password_match('%s:%s' % (option.section, option.name)): option_data['type'] = 'password' options_data.append(option_data) data = {'iniadmin': {'section': page, 'options': options_data}} return 'iniadmin.html', data
def set_defaults(self, compmgr=None): """Retrieve all default values and store them explicitly in the configuration, so that they can be saved to file. Values already set in the configuration are not overridden. """ for section, default_options in self.defaults(compmgr).items(): for name, value in default_options.items(): if not self.parser.has_option(_to_utf8(section), _to_utf8(name)): if any(parent[section].contains(name, defaults=False) for parent in self.parents): value = None self.set(section, name, value)
def get_sources(path): """Return a dictionary mapping Python module source paths to the distributions that contain them. """ sources = {} for dist in find_distributions(path, only=True): try: toplevels = dist.get_metadata('top_level.txt').splitlines() toplevels = [each + '/' for each in toplevels] files = dist.get_metadata('SOURCES.txt').splitlines() sources.update((src, dist) for src in files if any( src.startswith(toplevel) for toplevel in toplevels)) except (KeyError, IOError): pass # Metadata not found return sources
def render(self, context, mimetype, content, filename=None, rev=None): req = context.req content = content_to_unicode(self.env, content, mimetype) changes = self._diff_to_hdf(content.splitlines(), Mimeview(self.env).tab_width) if not changes or not any(c['diffs'] for c in changes): self.log.warning('Invalid unified diff content') return data = {'diff': {'style': 'inline'}, 'no_id': True, 'changes': changes, 'longcol': 'File', 'shortcol': ''} add_script(req, 'common/js/diff.js') add_stylesheet(req, 'common/css/diff.css') return Chrome(self.env).render_template(req, 'diff_div.html', data, fragment=True)
def _check_dir(self, req, dir): """Check that a repository directory is valid, and add a warning message if not. """ if not os.path.isabs(dir): add_warning(req, _('The repository directory must be an absolute path.')) return False prefixes = [os.path.join(self.env.path, prefix) for prefix in self.allowed_repository_dir_prefixes] if prefixes and not any(util.is_path_below(dir, prefix) for prefix in prefixes): add_warning(req, _('The repository directory must be located below one of the following directories: ' '%(dirs)s', dirs=', '.join(prefixes))) return False return True
def get_sources(path): """Return a dictionary mapping Python module source paths to the distributions that contain them. """ sources = {} for dist in find_distributions(path, only=True): try: toplevels = dist.get_metadata('top_level.txt').splitlines() toplevels = [each + '/' for each in toplevels] files = dist.get_metadata('SOURCES.txt').splitlines() sources.update((src, dist) for src in files if any(src.startswith(toplevel) for toplevel in toplevels)) except (KeyError, IOError): pass # Metadata not found return sources
def _check_dir(self, req, dir): """Check that a repository directory is valid, and add a warning message if not. """ if not os.path.isabs(dir): add_warning(req, _('The repository directory must be an absolute ' 'path.')) return False prefixes = [os.path.join(self.env.path, prefix) for prefix in self.allowed_repository_dir_prefixes] if prefixes and not any(is_path_below(dir, prefix) for prefix in prefixes): add_warning(req, _('The repository directory must be located ' 'below one of the following directories: ' '%(dirs)s', dirs=', '.join(prefixes))) return False return True
def resolve_relative_name(pagename, referrer): """Resolver for Trac wiki page paths. This method handles absolute as well as relative wiki paths. """ # Code taken from trac.wiki.api.WikiSystem at r10905, # improved by Jun Omae for compatibility with Python2.4. if not any(pagename.startswith(prefix) for prefix in ('./', '../')): return pagename.lstrip('/') base = referrer.split('/') components = pagename.split('/') for i, comp in enumerate(components): if comp == '..': if base: base.pop() elif comp and comp != '.': base.extend(components[i:]) break return '/'.join(base)
def resolve_relative_name(pagename, referrer): """Resolver for Trac wiki page paths. This method handles absolute as well as relative wiki paths. """ # Code taken from trac.wiki.api.WikiSystem at r10905, # improved by Jun Omae for compatibility with Python2.4. if not any(pagename.startswith(prefix) for prefix in ("./", "../")): return pagename.lstrip("/") base = referrer.split("/") components = pagename.split("/") for i, comp in enumerate(components): if comp == "..": if base: base.pop() elif comp and comp != ".": base.extend(components[i:]) break return "/".join(base)
def _do_dump(self, directory, *names): if not names: names = ['*'] pages = self.get_wiki_list() if not os.path.isdir(directory): if not os.path.exists(directory): os.mkdir(directory) else: raise AdminCommandError(_("'%(name)s' is not a directory", name=directory)) db = self.env.get_db_cnx() cursor = db.cursor() for p in pages: if any(p == name or (name.endswith('*') and p.startswith(name[:-1])) for name in names): dst = os.path.join(directory, unicode_quote(p, '')) printout(' %s => %s' % (p, dst)) self.export_page(p, dst, cursor)
def check_path(path): path = '/' + join(repos.scope, path) if path != '/': path += '/' # Walk from resource up parent directories for spath in parent_iter(path): for module in modules: section = authz.get(module, {}).get(spath) if section: for user in usernames: result = section.get(user) if result is not None: return result # Allow access to parent directories of allowed resources if any(section.get(user) is True for module in modules for spath, section in authz.get(module, {}).iteritems() if spath.startswith(path) for user in usernames): return True
def check_path(path): path = '/' + join(repos.scope, path) if path != '/': path += '/' # Walk from resource up parent directories for spath in parent_iter(path): for module in modules: section = authz.get(module, {}).get(spath) if section: for user in usernames: result = section.get(user) if result is not None: return result # Allow access to parent directories of allowed resources if any( section.get(user) is True for module in modules for spath, section in authz.get( module, {}).iteritems() if spath.startswith(path) for user in usernames): return True
def create(self, options=[]): """Create the basic directory structure of the environment, initialize the database and populate the configuration file with default values. If options contains ('inherit', 'file'), default values will not be loaded; they are expected to be provided by that file or other options. """ # Create the directory structure if not os.path.exists(self.path): os.mkdir(self.path) os.mkdir(self.get_log_dir()) os.mkdir(self.get_htdocs_dir()) os.mkdir(os.path.join(self.path, 'plugins')) # Create a few files create_file(os.path.join(self.path, 'VERSION'), 'Trac Environment Version 1\n') create_file( os.path.join(self.path, 'README'), 'This directory contains a Trac environment.\n' 'Visit http://trac.edgewall.org/ for more information.\n') # Setup the default configuration os.mkdir(os.path.join(self.path, 'conf')) create_file(os.path.join(self.path, 'conf', 'trac.ini.sample')) config = Configuration(os.path.join(self.path, 'conf', 'trac.ini')) for section, name, value in options: config.set(section, name, value) config.save() self.setup_config() if not any((section, option) == ('inherit', 'file') for section, option, value in options): self.config.set_defaults(self) self.config.save() # Create the database DatabaseManager(self).init_db()
def expand_macro(self, formatter, name, content, args={}): if args: reponame = args.get('repository', '') rev = args.get('revision') else: if ',' in content: reponame = '' rev = 0 for c in [x.strip() for x in content.split(',')]: if c.isnumeric(): rev = c else: reponame = c else: rev = content.strip() reponame = '' repos = RepositoryManager(self.env).get_repository(reponame) if repos: changeset = repos.get_changeset(rev) message = changeset.message rev = changeset.rev else: message = content if formatter.context.resource.realm == 'ticket': ticket_re = CommitTicketUpdater.ticket_re if not any(int(tkt_id) == formatter.context.resource.id for tkt_id in ticket_re.findall(message)): return tag.div(tag.p(_("(The changeset message doesn't " "reference this ticket)"), class_='hint'), class_='commitmessage') if ChangesetModule(self.env).wiki_format_messages: return tag.div(format_to_html(self.env, formatter.context('changeset', rev, parent=repos.resource), message, escape_newlines=True), class_='commitmessage') else: return tag.pre(message, class_='commitmessage')
def get_environments(environ, warn=False): """Retrieve canonical environment name to path mapping. The environments may not be all valid environments, but they are good candidates. """ env_paths = environ.get('trac.env_paths', []) env_parent_dir = environ.get('trac.env_parent_dir') if env_parent_dir: env_parent_dir = os.path.normpath(env_parent_dir) paths = dircache.listdir(env_parent_dir)[:] dircache.annotate(env_parent_dir, paths) # Filter paths that match the .tracignore patterns ignore_patterns = get_tracignore_patterns(env_parent_dir) paths = [ path[:-1] for path in paths if path[-1] == '/' and not any( fnmatch.fnmatch(path[:-1], pattern) for pattern in ignore_patterns) ] env_paths.extend(os.path.join(env_parent_dir, project) \ for project in paths) envs = {} for env_path in env_paths: env_path = os.path.normpath(env_path) if not os.path.isdir(env_path): continue env_name = os.path.split(env_path)[1] if env_name in envs: if warn: print >> sys.stderr, ('Warning: Ignoring project "%s" since ' 'it conflicts with project "%s"' % (env_path, envs[env_name])) else: envs[env_name] = env_path return envs
def create(self, options=[]): """Create the basic directory structure of the environment, initialize the database and populate the configuration file with default values. If options contains ('inherit', 'file'), default values will not be loaded; they are expected to be provided by that file or other options. """ # Create the directory structure if not os.path.exists(self.path): os.mkdir(self.path) os.mkdir(self.get_log_dir()) os.mkdir(self.get_htdocs_dir()) os.mkdir(os.path.join(self.path, 'plugins')) # Create a few files create_file(os.path.join(self.path, 'VERSION'), 'Trac Environment Version 1\n') create_file(os.path.join(self.path, 'README'), 'This directory contains a Trac environment.\n' 'Visit http://trac.edgewall.org/ for more information.\n') # Setup the default configuration os.mkdir(os.path.join(self.path, 'conf')) create_file(os.path.join(self.path, 'conf', 'trac.ini.sample')) config = Configuration(os.path.join(self.path, 'conf', 'trac.ini')) for section, name, value in options: config.set(section, name, value) config.save() self.setup_config() if not any((section, option) == ('inherit', 'file') for section, option, value in options): self.config.set_defaults(self) self.config.save() # Create the database DatabaseManager(self).init_db()
def render(self, context, mimetype, content, filename=None, rev=None): req = context.req content = content_to_unicode(self.env, content, mimetype) changes = self._diff_to_hdf(content.splitlines(), Mimeview(self.env).tab_width) if not changes or not any(c['diffs'] for c in changes): self.log.warning('Invalid unified diff content') return data = { 'diff': { 'style': 'inline' }, 'no_id': True, 'changes': changes, 'longcol': 'File', 'shortcol': '' } add_script(req, 'common/js/diff.js') add_stylesheet(req, 'common/css/diff.css') return Chrome(self.env).render_template(req, 'diff_div.html', data, fragment=True)
def save(self, author, comment='', when=None, db=None): """Save new links.""" if when is None: when = datetime.now(utc) when_ts = to_utimestamp(when) handle_commit = False if db is None: db = self.env.get_db_cnx() handle_commit = True cursor = db.cursor() new_blocking = set(int(n) for n in self.blocking) new_blocked_by = set(int(n) for n in self.blocked_by) to_check = [ # new, old, field (new_blocking, self._old_blocking, 'blockedby', ('source', 'dest') ), (new_blocked_by, self._old_blocked_by, 'blocking', ('dest', 'source')), ] for new_ids, old_ids, field, sourcedest in to_check: for n in new_ids | old_ids: update_field = None if n in new_ids and n not in old_ids: # New ticket added cursor.execute( 'INSERT INTO mastertickets (%s, %s) VALUES (%%s, %%s)' % sourcedest, (self.tkt.id, n)) update_field = lambda lst: lst.append(str(self.tkt.id)) elif n not in new_ids and n in old_ids: # Old ticket removed cursor.execute( 'DELETE FROM mastertickets WHERE %s=%%s AND %s=%%s' % sourcedest, (self.tkt.id, n)) update_field = lambda lst: lst.remove(str(self.tkt.id)) if update_field is not None: cursor.execute( 'SELECT value FROM ticket_custom WHERE ticket=%s AND name=%s', (n, str(field))) old_value = (cursor.fetchone() or ('', ))[0] new_value = [ x.strip() for x in old_value.split(',') if x.strip() ] update_field(new_value) new_value = ', '.join( sorted(new_value, key=lambda x: int(x))) # ticket, time and field must be unique for database integrity # The TicketImportPlugin assigns the same changetime to all ticket # if not specified, which was causing an IntegrityError (#10194). changelog = Ticket(self.env, n).get_changelog(when=when) if any(field in cl for cl in changelog): cursor.execute( """ UPDATE ticket_change SET author=%s, oldvalue=%s, newvalue=%s WHERE ticket=%s AND time=%s AND field=%s """, (author, old_value, new_value, n, when_ts, field)) else: cursor.execute( """ INSERT INTO ticket_change (ticket, time, author, field, oldvalue, newvalue) VALUES (%s, %s, %s, %s, %s, %s)""", (n, when_ts, author, field, old_value, new_value)) # Add comment to referenced ticket if a comment hasn't already been added if comment and not any( 'comment' in entry for entry in self.tkt.get_changelog(when)): cursor.execute( 'INSERT INTO ticket_change (ticket, time, author, field, oldvalue, newvalue) VALUES (%s, %s, %s, %s, %s, %s)', (n, when_ts, author, 'comment', '', '(In #%s) %s' % (self.tkt.id, comment))) cursor.execute( 'UPDATE ticket_custom SET value=%s WHERE ticket=%s AND name=%s', (new_value, n, field)) # refresh the changetime to prevent concurrent edits cursor.execute( 'UPDATE ticket SET changetime=%s WHERE id=%s', (when_ts, n)) if not cursor.rowcount: cursor.execute( 'INSERT INTO ticket_custom (ticket, name, value) VALUES (%s, %s, %s)', (n, field, new_value)) # cursor.execute('DELETE FROM mastertickets WHERE source=%s OR dest=%s', (self.tkt.id, self.tkt.id)) # data = [] # for tkt in self.blocking: # if isinstance(tkt, Ticket): # tkt = tkt.id # data.append((self.tkt.id, tkt)) # for tkt in self.blocked_by: # if isisntance(tkt, Ticket): # tkt = tkt.id # data.append((tkt, self.tkt.id)) # # cursor.executemany('INSERT INTO mastertickets (source, dest) VALUES (%s, %s)', data) if handle_commit: db.commit()
def _render_dir(self, req, repos, node, rev, order, desc): req.perm(node.resource).require('BROWSER_VIEW') # Entries metadata class entry(object): __slots__ = 'name rev kind isdir path content_length'.split() def __init__(self, node): for f in entry.__slots__: setattr(self, f, getattr(node, f)) entries = [ entry(n) for n in node.get_entries() if n.can_view(req.perm) ] changes = get_changes(repos, [i.rev for i in entries], self.log) if rev: newest = repos.get_changeset(rev).date else: newest = datetime.now(req.tz) # Color scale for the age column timerange = custom_colorizer = None if self.color_scale: timerange = TimeRange(newest) max_s = req.args.get('range_max_secs') min_s = req.args.get('range_min_secs') parent_range = [ timerange.from_seconds(long(s)) for s in [max_s, min_s] if s ] this_range = [c.date for c in changes.values() if c] for dt in this_range + parent_range: timerange.insert(dt) custom_colorizer = self.get_custom_colorizer() # Ordering of entries if order == 'date': def file_order(a): return (changes[a.rev].date, embedded_numbers(a.name.lower())) elif order == 'size': def file_order(a): return (a.content_length, embedded_numbers(a.name.lower())) elif order == 'author': def file_order(a): return (changes[a.rev].author.lower(), embedded_numbers(a.name.lower())) else: def file_order(a): return embedded_numbers(a.name.lower()) dir_order = desc and 1 or -1 def browse_order(a): return a.isdir and dir_order or 0, file_order(a) entries = sorted(entries, key=browse_order, reverse=desc) # ''Zip Archive'' alternate link path = node.path.strip('/') if repos.reponame: path = repos.reponame + '/' + path if any( fnmatchcase(path, p.strip('/')) for p in self.downloadable_paths): zip_href = req.href.changeset(rev or repos.youngest_rev, repos.reponame or None, node.path, old=rev, old_path=repos.reponame or '/', format='zip') add_link(req, 'alternate', zip_href, _('Zip Archive'), 'application/zip', 'zip') return { 'entries': entries, 'changes': changes, 'timerange': timerange, 'colorize_age': custom_colorizer, 'range_max_secs': (timerange and timerange.to_seconds(timerange.newest)), 'range_min_secs': (timerange and timerange.to_seconds(timerange.oldest)), }
def _format_link(self, formatter, ns, match, label, fullmatch=None): if ns == 'log1': groups = fullmatch.groupdict() it_log = groups.get('it_log') revs = groups.get('log_revs') path = groups.get('log_path') or '/' target = '%s%s@%s' % (it_log, path, revs) # prepending it_log is needed, as the helper expects it there intertrac = formatter.shorthand_intertrac_helper( 'log', target, label, fullmatch) if intertrac: return intertrac path, query, fragment = formatter.split_link(path) else: assert ns in ('log', 'log2') if ns == 'log': match, query, fragment = formatter.split_link(match) else: query = fragment = '' match = ''.join(reversed(match.split('/', 1))) path = match revs = '' if self.LOG_LINK_RE.match(match): indexes = [sep in match and match.index(sep) for sep in ':@'] idx = min([i for i in indexes if i is not False]) path, revs = match[:idx], match[idx+1:] rm = RepositoryManager(self.env) try: reponame, repos, path = rm.get_repository_by_path(path) if not reponame: reponame = rm.get_default_repository(formatter.context) if reponame is not None: repos = rm.get_repository(reponame) if repos: revranges = None if any(c for c in ':-,' if c in revs): revranges = self._normalize_ranges(repos, path, revs) revs = None if 'LOG_VIEW' in formatter.perm: if revranges: href = formatter.href.log(repos.reponame or None, path or '/', revs=str(revranges)) else: try: rev = repos.normalize_rev(revs) except NoSuchChangeset: rev = None href = formatter.href.log(repos.reponame or None, path or '/', rev=rev) if query and (revranges or revs): query = '&' + query[1:] return tag.a(label, class_='source', href=href + query + fragment) errmsg = _("No permission to view change log") elif reponame: errmsg = _("Repository '%(repo)s' not found", repo=reponame) else: errmsg = _("No default repository defined") except TracError, e: errmsg = to_unicode(e)
def save(self, author, comment="", when=None, db=None): """Save new links.""" if when is None: when = datetime.now(utc) when_ts = to_utimestamp(when) handle_commit = False if db is None: db = self.env.get_db_cnx() handle_commit = True cursor = db.cursor() new_blocking = set(int(n) for n in self.blocking) new_blocked_by = set(int(n) for n in self.blocked_by) to_check = [ # new, old, field (new_blocking, self._old_blocking, "blockedby", ("source", "dest")), (new_blocked_by, self._old_blocked_by, "blocking", ("dest", "source")), ] for new_ids, old_ids, field, sourcedest in to_check: for n in new_ids | old_ids: update_field = None if n in new_ids and n not in old_ids: # New ticket added cursor.execute( "INSERT INTO mastertickets (%s, %s) VALUES (%%s, %%s)" % sourcedest, (self.tkt.id, n) ) update_field = lambda lst: lst.append(str(self.tkt.id)) elif n not in new_ids and n in old_ids: # Old ticket removed cursor.execute("DELETE FROM mastertickets WHERE %s=%%s AND %s=%%s" % sourcedest, (self.tkt.id, n)) update_field = lambda lst: lst.remove(str(self.tkt.id)) if update_field is not None: cursor.execute("SELECT value FROM ticket_custom WHERE ticket=%s AND name=%s", (n, str(field))) old_value = (cursor.fetchone() or ("",))[0] new_value = [x.strip() for x in old_value.split(",") if x.strip()] update_field(new_value) new_value = ", ".join(sorted(new_value, key=lambda x: int(x))) # ticket, time and field must be unique for database integrity # The TicketImportPlugin assigns the same changetime to all ticket # if not specified, which was causing an IntegrityError (#10194). changelog = Ticket(self.env, n).get_changelog(when=when) if any(field in cl for cl in changelog): cursor.execute( """ UPDATE ticket_change SET author=%s, oldvalue=%s, newvalue=%s WHERE ticket=%s AND time=%s AND field=%s """, (author, old_value, new_value, n, when_ts, field), ) else: cursor.execute( """ INSERT INTO ticket_change (ticket, time, author, field, oldvalue, newvalue) VALUES (%s, %s, %s, %s, %s, %s)""", (n, when_ts, author, field, old_value, new_value), ) # Add comment to referenced ticket if a comment hasn't already been added if comment and not any("comment" in entry for entry in self.tkt.get_changelog(when)): cursor.execute( "INSERT INTO ticket_change (ticket, time, author, field, oldvalue, newvalue) VALUES (%s, %s, %s, %s, %s, %s)", (n, when_ts, author, "comment", "", "(In #%s) %s" % (self.tkt.id, comment)), ) cursor.execute( "UPDATE ticket_custom SET value=%s WHERE ticket=%s AND name=%s", (new_value, n, field) ) # refresh the changetime to prevent concurrent edits cursor.execute("UPDATE ticket SET changetime=%s WHERE id=%s", (when_ts, n)) if not cursor.rowcount: cursor.execute( "INSERT INTO ticket_custom (ticket, name, value) VALUES (%s, %s, %s)", (n, field, new_value) ) # cursor.execute('DELETE FROM mastertickets WHERE source=%s OR dest=%s', (self.tkt.id, self.tkt.id)) # data = [] # for tkt in self.blocking: # if isinstance(tkt, Ticket): # tkt = tkt.id # data.append((self.tkt.id, tkt)) # for tkt in self.blocked_by: # if isisntance(tkt, Ticket): # tkt = tkt.id # data.append((tkt, self.tkt.id)) # # cursor.executemany('INSERT INTO mastertickets (source, dest) VALUES (%s, %s)', data) if handle_commit: db.commit()
def post_process_request(self, req, template, data, content_type): if (template and template.endswith('.html') and any( template.startswith(prefix) for prefix in ('wiki_', 'roadmap', 'milestone_'))): self._add_header_contents(req) return template, data, content_type
def send_internal_error(env, req, exc_info): if env: env.log.error("Internal Server Error: %s", exception_to_unicode(exc_info[1], traceback=True)) message = exception_to_unicode(exc_info[1]) traceback = get_last_traceback() frames, plugins, faulty_plugins = [], [], [] tracker = 'http://trac.edgewall.org' th = 'http://trac-hacks.org' has_admin = False try: has_admin = 'TRAC_ADMIN' in req.perm except Exception: pass if has_admin and not isinstance(exc_info[1], MemoryError): # Collect frame and plugin information frames = get_frame_info(exc_info[2]) if env: plugins = [p for p in get_plugin_info(env) if any(c['enabled'] for m in p['modules'].itervalues() for c in m['components'].itervalues())] match_plugins_to_frames(plugins, frames) # Identify the tracker where the bug should be reported faulty_plugins = [p for p in plugins if 'frame_idx' in p] faulty_plugins.sort(key=lambda p: p['frame_idx']) if faulty_plugins: info = faulty_plugins[0]['info'] if 'trac' in info: tracker = info['trac'] elif info.get('home_page', '').startswith(th): tracker = th def get_description(_): if env and has_admin: sys_info = "".join("|| '''`%s`''' || `%s` ||\n" % (k, v.replace('\n', '` [[br]] `')) for k, v in env.get_systeminfo()) sys_info += "|| '''`jQuery`''' || `#JQUERY#` ||\n" enabled_plugins = "".join("|| '''`%s`''' || `%s` ||\n" % (p['name'], p['version'] or _('N/A')) for p in plugins) else: sys_info = _("''System information not available''\n") enabled_plugins = _("''Plugin information not available''\n") return _("""\ ==== How to Reproduce ==== While doing a %(method)s operation on `%(path_info)s`, Trac issued an internal error. ''(please provide additional details here)'' Request parameters: {{{ %(req_args)s }}} User agent: `#USER_AGENT#` ==== System Information ==== %(sys_info)s ==== Enabled Plugins ==== %(enabled_plugins)s ==== Python Traceback ==== {{{ %(traceback)s}}}""", method=req.method, path_info=req.path_info, req_args=pformat(req.args), sys_info=sys_info, enabled_plugins=enabled_plugins, traceback=to_unicode(traceback)) # Generate the description once in English, once in the current locale description_en = get_description(lambda s, **kw: safefmt(s, kw)) try: description = get_description(_) except Exception: description = description_en data = {'title': 'Internal Error', 'type': 'internal', 'message': message, 'traceback': traceback, 'frames': frames, 'shorten_line': shorten_line, 'plugins': plugins, 'faulty_plugins': faulty_plugins, 'tracker': tracker, 'description': description, 'description_en': description_en} try: req.send_error(exc_info, status=500, env=env, data=data) except RequestDone: pass
def is_component_enabled(self, cls): name = self._component_name(cls) if not any(name.startswith(mod) for mod in ('trac.', 'tracopt.')): return False return Environment.is_component_enabled(self, cls)
def expand_macro(self, formatter, name, content, realms=[]): """Evaluate macro call and render results. Calls from web-UI come with pre-processed realm selection. """ env = self.env req = formatter.req tag_system = TagSystem(env) all_realms = tag_system.get_taggable_realms() if not all_realms: # Tag providers are required, no result without at least one. return '' args, kw = parse_args(content) query = args and args[0].strip() or None if not realms: # Check macro arguments for realms (typical wiki macro call). realms = 'realm' in kw and kw['realm'].split('|') or [] if query: # Add realms from query expression. realms.extend(query_realms(query, all_realms)) # Remove redundant realm selection for performance. if set(realms) == all_realms: query = re.sub('(^|\W)realm:\S+(\W|$)', ' ', query).strip() if name == 'TagCloud': # Set implicit 'all tagged realms' as default. if not realms: realms = all_realms if query: all_tags = Counter() # Require per resource query including view permission checks. for resource, tags in tag_system.query(req, query): all_tags.update(tags) else: # Allow faster per tag query, side steps permission checks. all_tags = tag_system.get_all_tags(req, realms=realms) mincount = 'mincount' in kw and kw['mincount'] or None return self.render_cloud(req, all_tags, caseless_sort=self.caseless_sort, mincount=mincount, realms=realms) elif name == 'ListTagged': if content and _OBSOLETE_ARGS_RE.search(content): data = {'warning': 'obsolete_args'} else: data = {'warning': None} context = formatter.context # Use TagsQuery arguments (most likely wiki macro calls). cols = 'cols' in kw and kw['cols'] or self.default_cols format = 'format' in kw and kw['format'] or self.default_format if not realms: # Apply ListTagged defaults to macro call w/o realm. realms = list(set(all_realms)-set(self.exclude_realms)) if not realms: return '' query = '(%s) (%s)' % (query or '', ' or '.join(['realm:%s' % (r) for r in realms])) query_result = tag_system.query(req, query) excludes = [exc.strip() for exc in kw.get('exclude', '' ).split(':') if exc.strip()] if excludes and query_result: filtered_result = [(resource, tags) for resource, tags in query_result if not any(fnmatchcase(resource.id, exc) for exc in excludes)] query_result = filtered_result if not query_result: return '' def _link(resource): if resource.realm == 'tag': # Keep realm selection in tag links. return builder.a(resource.id, href=self.get_href(req, realms, tag=resource)) elif resource.realm == 'ticket': # Return resource link including ticket status dependend # class to allow for common Trac ticket link style. ticket = Ticket(env, resource.id) return builder.a('#%s' % ticket.id, class_=ticket['status'], href=formatter.href.ticket(ticket.id), title=shorten_line(ticket['summary'])) return render_resource_link(env, context, resource, 'compact') if format == 'table': cols = [col for col in cols.split('|') if col in self.supported_cols] # Use available translations from Trac core. try: labels = TicketSystem(env).get_ticket_field_labels() labels['id'] = _('Id') except AttributeError: # Trac 0.11 neither has the attribute nor uses i18n. labels = {'id': 'Id', 'description': 'Description'} labels['realm'] = _('Realm') labels['tags'] = _('Tags') headers = [{'label': labels.get(col)} for col in cols] data.update({'cols': cols, 'headers': headers}) results = sorted(query_result, key=lambda r: \ embedded_numbers(to_unicode(r[0].id))) results = self._paginate(req, results, realms) rows = [] for resource, tags in results: desc = tag_system.describe_tagged_resource(req, resource) tags = sorted(tags) wiki_desc = format_to_oneliner(env, context, desc) if tags: rendered_tags = [_link(Resource('tag', tag)) for tag in tags] if 'oldlist' == format: resource_link = _link(resource) else: resource_link = builder.a(wiki_desc, href=get_resource_url( env, resource, context.href)) if 'table' == format: cells = [] for col in cols: if col == 'id': cells.append(_link(resource)) # Don't duplicate links to resource in both. elif col == 'description' and 'id' in cols: cells.append(wiki_desc) elif col == 'description': cells.append(resource_link) elif col == 'realm': cells.append(resource.realm) elif col == 'tags': cells.append( builder([(tag, ' ') for tag in rendered_tags])) rows.append({'cells': cells}) continue rows.append({'desc': wiki_desc, 'rendered_tags': None, 'resource_link': _link(resource)}) data.update({'format': format, 'paginator': results, 'results': rows, 'tags_url': req.href('tags')}) # Work around a bug in trac/templates/layout.html, that causes a # TypeError for the wiki macro call, if we use add_link() alone. add_stylesheet(req, 'common/css/search.css') return Chrome(env).render_template( req, 'listtagged_results.html', data, 'text/html', True)
def _render_dir(self, req, repos, node, rev, order, desc): req.perm(node.resource).require('BROWSER_VIEW') # Entries metadata class entry(object): __slots__ = 'name rev kind isdir path content_length'.split() def __init__(self, node): for f in entry.__slots__: setattr(self, f, getattr(node, f)) entries = [entry(n) for n in node.get_entries() if n.can_view(req.perm)] changes = get_changes(repos, [i.rev for i in entries], self.log) if rev: newest = repos.get_changeset(rev).date else: newest = datetime.now(req.tz) # Color scale for the age column timerange = custom_colorizer = None if self.color_scale: timerange = TimeRange(newest) max_s = req.args.get('range_max_secs') min_s = req.args.get('range_min_secs') parent_range = [timerange.from_seconds(long(s)) for s in [max_s, min_s] if s] this_range = [c.date for c in changes.values() if c] for dt in this_range + parent_range: timerange.insert(dt) custom_colorizer = self.get_custom_colorizer() # Ordering of entries if order == 'date': def file_order(a): return (changes[a.rev].date, embedded_numbers(a.name.lower())) elif order == 'size': def file_order(a): return (a.content_length, embedded_numbers(a.name.lower())) elif order == 'author': def file_order(a): return (changes[a.rev].author.lower(), embedded_numbers(a.name.lower())) else: def file_order(a): return embedded_numbers(a.name.lower()) dir_order = desc and 1 or -1 def browse_order(a): return a.isdir and dir_order or 0, file_order(a) entries = sorted(entries, key=browse_order, reverse=desc) # ''Zip Archive'' alternate link path = node.path.strip('/') if repos.reponame: path = repos.reponame + '/' + path if any(fnmatchcase(path, p.strip('/')) for p in self.downloadable_paths): zip_href = req.href.changeset(rev or repos.youngest_rev, repos.reponame or None, node.path, old=rev, old_path=repos.reponame or '/', format='zip') add_link(req, 'alternate', zip_href, _('Zip Archive'), 'application/zip', 'zip') return {'entries': entries, 'changes': changes, 'timerange': timerange, 'colorize_age': custom_colorizer, 'range_max_secs': (timerange and timerange.to_seconds(timerange.newest)), 'range_min_secs': (timerange and timerange.to_seconds(timerange.oldest)), }
def _validate_workflow(self, req, params): if 'textError' in params: return {}, params['textError'] errors = [] if not 'actions' in params: errors.append(_("Invalid request without actions. Please restart " "your browser and retry.")) if len(params['actions']) == 0: errors.append(_("Need at least one action.")) if not 'status' in params: errors.append(_("Invalid request without statuses. Please restart " "your browser and retry.")) if len(params['status']) == 0: errors.append(_("Need at least one status.")) newOptions = {} if len(errors) == 0: if not any(act['next'] == '*' and \ 'leave_status' in act.get('operations', ()) for act in params['actions']): errors.append(_("The action with operation 'leave_status' and " "next status '*' is certainly required.")) if len(errors) == 0: lineInfo = params['lineInfo'] perms = self._get_permissions(req) perms.append('All Users') operations = self.operations actionNames = [] for act in params['actions']: lineErrors = [] tempName = act.get('tempName') action = act.get('action') if tempName not in lineInfo: lineErrors.append(_( "Line %(num)d: The definition of '%(aname)s' is not found.", aname=tempName, num=params['firstLineInfo'][tempName])) elif action == '': lineErrors.append(_("Line %(num)d: Action cannot be emptied.", num=lineInfo[tempName])) elif not self._action_name_re.match(action): lineErrors.append(_( "Line %(num)d: Use alphanumeric, dash, and underscore " "characters in the action name.", num=lineInfo[tempName])) elif action in actionNames: lineErrors.append(_( "Line %(num)d: Action name is duplicated. The name " "must be unique.", num=lineInfo[tempName])) elif not 'next' in act: lineErrors.append(_("Line %(num)d: No next status.", num=lineInfo[tempName])) elif not act['next'] in params['status'] and act['next'] != '*': lineErrors.append(_( "Line %(num)d: '%(status)s' is invalid next status.", num=lineInfo[tempName], status=act['next'])) elif not act.get('before'): lineErrors.append(_("Line %(num)d: Statuses is empty.", num=lineInfo[tempName])) else: for stat in act['before']: if not stat in params['status'] and stat != '*': lineErrors.append(_( "Line %(num)d: Status '%(status)s' is invalid.", num=lineInfo[tempName], status=stat)) if 'operations' in act: lineErrors.extend(_("Line %(num)d: Unknown operator.", num=lineInfo[tempName + '.operations']) for operation in act['operations'] if operation not in operations) if 'permissions' in act: lineErrors.extend(_("Line %(num)d: Unknown permission.", num=lineInfo[tempName + '.permissions']) for perm in act['permissions'] if not perm in perms) if 'default' in act and act['default'] == -1: lineErrors.append(_( "Line %(num)d: specify a numerical value to 'default'.", num=lineInfo[tempName + '.default'])) if len(lineErrors) == 0: key = action if 'before' in act: tmp = [] for stat in params['status']: if stat in act['before']: tmp.append(stat) before = ','.join(tmp) else: before = '*' newOptions[key] = before + ' -> ' + act['next'] newOptions[key + '.name'] = act['name'] newOptions[key + '.default'] = act['default'] if not 'All Users' in act['permissions']: newOptions[key + '.permissions'] = ','.join(act['permissions']) if act.get('operations'): newOptions[key + '.operations'] = ','.join(act['operations']) if action in params['others']: for otherKey, otherValue in params['others'][action].iteritems(): newOptions[key + '.' + otherKey] = otherValue else: errors.extend(lineErrors) actionNames.append(action) count = 1 for stat in params['status']: if len(stat) == 0: errors.append(_("Status column %(num)d: Status name is empty.", num=count)) elif ';' in stat or '#' in stat: errors.append(_( "Status column %(num)d: The characters '#' and ';' " "cannot be used for status name.", num=count)) if stat in params['status'][:count - 1]: errors.append(_( "Status column %(num)d: Status name is duplicated. " "The name must be unique.", num=count)) count += 1 return newOptions, errors
def _diff_to_hdf(self, difflines, tabwidth): """ Translate a diff file into something suitable for inclusion in HDF. The result is [(filename, revname_old, revname_new, changes)], where changes has the same format as the result of `trac.versioncontrol.diff.hdf_diff`. If the diff cannot be parsed, this method returns None. """ def _markup_intraline_change(fromlines, tolines): from trac.versioncontrol.diff import _get_change_extent for i in xrange(len(fromlines)): fr, to = fromlines[i], tolines[i] (start, end) = _get_change_extent(fr, to) if start != 0 or end != 0: last = end+len(fr) fromlines[i] = fr[:start] + '\0' + fr[start:last] + \ '\1' + fr[last:] last = end+len(to) tolines[i] = to[:start] + '\0' + to[start:last] + \ '\1' + to[last:] import re space_re = re.compile(' ( +)|^ ') def htmlify(match): div, mod = divmod(len(match.group(0)), 2) return div * ' ' + mod * ' ' comments = [] changes = [] lines = iter(difflines) try: line = lines.next() while True: oldpath = oldrev = newpath = newrev = '' oldinfo = newinfo = [] binary = False # consume preample, storing free lines in comments # (also detect the special case of git binary patches) if not line.startswith('--- '): if not line.startswith('Index: ') and line != '=' * 67: comments.append(line) if line == "GIT binary patch": binary = True diffcmd_line = comments[0] # diff --git a/... b/,,, oldpath, newpath = diffcmd_line.split()[-2:] if any(c.startswith('new file') for c in comments): oldpath = '/dev/null' if any(c.startswith('deleted file') for c in comments): newpath = '/dev/null' oldinfo = ['', oldpath] newinfo = ['', newpath] index = [c for c in comments if c.startswith('index ')] if index: # index 8f****78..1e****5c oldrev, newrev = index[0].split()[-1].split('..') oldinfo.append(oldrev) newinfo.append(newrev) line = lines.next() while line: comments.append(line) line = lines.next() else: line = lines.next() continue if not oldinfo and not newinfo: # Base filename/version from '--- <file> [rev]' oldinfo = line.split(None, 2) if len(oldinfo) > 1: oldpath = oldinfo[1] if len(oldinfo) > 2: oldrev = oldinfo[2] # Changed filename/version from '+++ <file> [rev]' line = lines.next() if not line.startswith('+++ '): self.log.debug('expected +++ after ---, got ' + line) return None newinfo = line.split(None, 2) if len(newinfo) > 1: newpath = newinfo[1] if len(newinfo) > 2: newrev = newinfo[2] shortrev = ('old', 'new') if oldpath or newpath: sep = re.compile(r'([/.~\\])') commonprefix = ''.join(os.path.commonprefix( [sep.split(newpath), sep.split(oldpath)])) commonsuffix = ''.join(os.path.commonprefix( [sep.split(newpath)[::-1], sep.split(oldpath)[::-1]])[::-1]) if len(commonprefix) > len(commonsuffix): common = commonprefix elif commonsuffix: common = commonsuffix.lstrip('/') a = oldpath[:-len(commonsuffix)] b = newpath[:-len(commonsuffix)] if len(a) < 4 and len(b) < 4: shortrev = (a, b) elif oldpath == '/dev/null': common = _("new file %(new)s", new=newpath.lstrip('b/')) shortrev = ('-', '+') elif newpath == '/dev/null': common = _("deleted file %(deleted)s", deleted=oldpath.lstrip('a/')) shortrev = ('+', '-') else: common = '(a) %s vs. (b) %s' % (oldpath, newpath) shortrev = ('a', 'b') else: common = '' groups = [] groups_title = [] changes.append({'change': 'edit', 'props': [], 'comments': '\n'.join(comments), 'binary': binary, 'diffs': groups, 'diffs_title': groups_title, 'old': {'path': common, 'rev': ' '.join(oldinfo[1:]), 'shortrev': shortrev[0]}, 'new': {'path': common, 'rev': ' '.join(newinfo[1:]), 'shortrev': shortrev[1]}}) comments = [] line = lines.next() while line: # "@@ -333,10 +329,8 @@" or "@@ -1 +1 @@ [... title ...]" r = re.match(r'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@' '(.*)', line) if not r: break blocks = [] groups.append(blocks) fromline, fromend, toline, toend = \ [int(x or 1) for x in r.groups()[:4]] groups_title.append(r.group(5)) last_type = extra = None fromend += fromline toend += toline line = lines.next() while fromline < fromend or toline < toend or extra: # First character is the command command = ' ' if line: command, line = line[0], line[1:] # Make a new block? if (command == ' ') != last_type: last_type = command == ' ' kind = last_type and 'unmod' or 'mod' block = {'type': kind, 'base': {'offset': fromline - 1, 'lines': []}, 'changed': {'offset': toline - 1, 'lines': []}} blocks.append(block) else: block = blocks[-1] if command == ' ': sides = ['base', 'changed'] elif command == '+': last_side = 'changed' sides = [last_side] elif command == '-': last_side = 'base' sides = [last_side] elif command == '\\' and last_side: meta = block[last_side].setdefault('meta', {}) meta[len(block[last_side]['lines'])] = True sides = [last_side] elif command == '@': # ill-formed patch groups_title[-1] = "%s (%s)" % ( groups_title[-1], _("this hunk was shorter than expected")) line = '@'+line break else: self.log.debug('expected +, - or \\, got '+command) return None for side in sides: if side == 'base': fromline += 1 else: toline += 1 block[side]['lines'].append(line) line = lines.next() extra = line and line[0] == '\\' except StopIteration: pass # Go through all groups/blocks and mark up intraline changes, and # convert to html for o in changes: for group in o['diffs']: for b in group: base, changed = b['base'], b['changed'] f, t = base['lines'], changed['lines'] if b['type'] == 'mod': if len(f) == 0: b['type'] = 'add' elif len(t) == 0: b['type'] = 'rem' elif len(f) == len(t): _markup_intraline_change(f, t) for i in xrange(len(f)): line = expandtabs(f[i], tabwidth, '\0\1') line = escape(line, quotes=False) line = '<del>'.join([space_re.sub(htmlify, seg) for seg in line.split('\0')]) line = line.replace('\1', '</del>') f[i] = Markup(line) if 'meta' in base and i in base['meta']: f[i] = Markup('<em>%s</em>') % f[i] for i in xrange(len(t)): line = expandtabs(t[i], tabwidth, '\0\1') line = escape(line, quotes=False) line = '<ins>'.join([space_re.sub(htmlify, seg) for seg in line.split('\0')]) line = line.replace('\1', '</ins>') t[i] = Markup(line) if 'meta' in changed and i in changed['meta']: t[i] = Markup('<em>%s</em>') % t[i] return changes
def post_process_request(self, req, template, data, content_type): if (template and template.endswith('.html') and any(template.startswith(prefix) for prefix in ('wiki_', 'roadmap', 'milestone_'))): self._add_header_contents(req) return template, data, content_type
def _format_link(self, formatter, ns, match, label, fullmatch=None): if ns == 'log1': groups = fullmatch.groupdict() it_log = groups.get('it_log') revs = groups.get('log_revs') path = groups.get('log_path') or '/' target = '%s%s@%s' % (it_log, path, revs) # prepending it_log is needed, as the helper expects it there intertrac = formatter.shorthand_intertrac_helper( 'log', target, label, fullmatch) if intertrac: return intertrac path, query, fragment = formatter.split_link(path) else: assert ns in ('log', 'log2') if ns == 'log': match, query, fragment = formatter.split_link(match) else: query = fragment = '' match = ''.join(reversed(match.split('/', 1))) path = match revs = '' if self.LOG_LINK_RE.match(match): indexes = [sep in match and match.index(sep) for sep in ':@'] idx = min([i for i in indexes if i is not False]) path, revs = match[:idx], match[idx + 1:] rm = RepositoryManager(self.env) try: reponame, repos, path = rm.get_repository_by_path(path) if not reponame: reponame = rm.get_default_repository(formatter.context) if reponame is not None: repos = rm.get_repository(reponame) if repos: revranges = None if any(c for c in ':-,' if c in revs): revranges = self._normalize_ranges(repos, path, revs) revs = None if 'LOG_VIEW' in formatter.perm: if revranges: href = formatter.href.log(repos.reponame or None, path or '/', revs=str(revranges)) else: try: rev = repos.normalize_rev(revs) except NoSuchChangeset: rev = None href = formatter.href.log(repos.reponame or None, path or '/', rev=rev) if query and (revranges or revs): query = '&' + query[1:] return tag.a(label, class_='source', href=href + query + fragment) errmsg = _("No permission to view change log") elif reponame: errmsg = _("Repository '%(repo)s' not found", repo=reponame) else: errmsg = _("No default repository defined") except TracError, e: errmsg = to_unicode(e)