def _get_revision(self, revision): """ Given any revision identifier, returns a 40 char string with revision hash. :param revision: str or int or None """ if self._empty: raise EmptyRepositoryError("There are no changesets yet") if revision in [-1, None]: revision = b'tip' elif isinstance(revision, str): revision = safe_bytes(revision) try: if isinstance(revision, int): return ascii_str(self._repo[revision].hex()) return ascii_str( mercurial.scmutil.revsymbol(self._repo, revision).hex()) except (IndexError, ValueError, mercurial.error.RepoLookupError, TypeError): msg = "Revision %r does not exist for %s" % (safe_str(revision), self.name) raise ChangesetDoesNotExistError(msg) except (LookupError, ): msg = "Ambiguous identifier `%s` for %s" % (safe_str(revision), self.name) raise ChangesetDoesNotExistError(msg)
def get_file_history(self, path, limit=None): """ Returns history of file as reversed list of ``Changeset`` objects for which file at given ``path`` has been modified. TODO: This function now uses os underlying 'git' and 'grep' commands which is generally not good. Should be replaced with algorithm iterating commits. """ self._get_filectx(path) cs_id = safe_str(self.id) f_path = safe_str(path) if limit is not None: cmd = [ 'log', '-n', str(safe_int(limit, 0)), '--pretty=format:%H', '-s', cs_id, '--', f_path ] else: cmd = ['log', '--pretty=format:%H', '-s', cs_id, '--', f_path] so, se = self.repository.run_git_command(cmd) ids = re.findall(r'[0-9a-fA-F]{40}', so) return [self.repository.get_changeset(sha) for sha in ids]
def _get_id_for_path(self, path): path = safe_str(path) # FIXME: Please, spare a couple of minutes and make those codes cleaner; if not path in self._paths: path = path.strip('/') # set root tree tree = self.repository._repo[self._tree_id] if path == '': self._paths[''] = tree.id return tree.id splitted = path.split('/') dirs, name = splitted[:-1], splitted[-1] curdir = '' # initially extract things from root dir for item, stat, id in tree.iteritems(): if curdir: name = '/'.join((curdir, item)) else: name = item self._paths[name] = id self._stat_modes[name] = stat for dir in dirs: if curdir: curdir = '/'.join((curdir, dir)) else: curdir = dir dir_id = None for item, stat, id in tree.iteritems(): if dir == item: dir_id = id if dir_id: # Update tree tree = self.repository._repo[dir_id] if not isinstance(tree, objects.Tree): raise ChangesetError('%s is not a directory' % curdir) else: raise ChangesetError('%s have not been found' % curdir) # cache all items from the given traversed tree for item, stat, id in tree.iteritems(): if curdir: name = '/'.join((curdir, item)) else: name = item self._paths[name] = id self._stat_modes[name] = stat if not path in self._paths: raise NodeDoesNotExistError( "There is no file nor directory " "at the given path '%s' at revision %s" % (path, safe_str(self.short_id))) return self._paths[path]
def _get_id_for_path(self, path): path = safe_str(path) # FIXME: Please, spare a couple of minutes and make those codes cleaner; if not path in self._paths: path = path.strip('/') # set root tree tree = self.repository._repo[self._tree_id] if path == '': self._paths[''] = tree.id return tree.id splitted = path.split('/') dirs, name = splitted[:-1], splitted[-1] curdir = '' # initially extract things from root dir for item, stat, id in tree.iteritems(): if curdir: name = '/'.join((curdir, item)) else: name = item self._paths[name] = id self._stat_modes[name] = stat for dir in dirs: if curdir: curdir = '/'.join((curdir, dir)) else: curdir = dir dir_id = None for item, stat, id in tree.iteritems(): if dir == item: dir_id = id if dir_id: # Update tree tree = self.repository._repo[dir_id] if not isinstance(tree, objects.Tree): raise ChangesetError('%s is not a directory' % curdir) else: raise ChangesetError('%s have not been found' % curdir) # cache all items from the given traversed tree for item, stat, id in tree.iteritems(): if curdir: name = '/'.join((curdir, item)) else: name = item self._paths[name] = id self._stat_modes[name] = stat if not path in self._paths: raise NodeDoesNotExistError("There is no file nor directory " "at the given path '%s' at revision %s" % (path, safe_str(self.short_id))) return self._paths[path]
def _get_revision(self, revision): """ Gets an ID revision given as str. This will always return a fill 40 char revision number :param revision: str or int or None """ if isinstance(revision, unicode): revision = safe_str(revision) if self._empty: raise EmptyRepositoryError("There are no changesets yet") if revision in [-1, 'tip', None]: revision = 'tip' try: revision = hex(self._repo.lookup(revision)) except (LookupError, ): msg = ("Ambiguous identifier `%s` for %s" % (revision, self)) raise ChangesetDoesNotExistError(msg) except (IndexError, ValueError, RepoLookupError, TypeError): msg = ("Revision %s does not exist for %s" % (revision, self)) raise ChangesetDoesNotExistError(msg) return revision
def get_ref_revision(self, ref_type, ref_name): """ Returns revision number for the given reference. """ ref_name = safe_str(ref_name) if ref_type == 'rev' and not ref_name.strip('0'): return self.EMPTY_CHANGESET # lookup up the exact node id _revset_predicates = { 'branch': 'branch', 'book': 'bookmark', 'tag': 'tag', 'rev': 'id', } # avoid expensive branch(x) iteration over whole repo rev_spec = "%%s & %s(%%s)" % _revset_predicates[ref_type] try: revs = self._repo.revs(rev_spec, ref_name, ref_name) except LookupError: msg = ("Ambiguous identifier %s:%s for %s" % (ref_type, ref_name, self.name)) raise ChangesetDoesNotExistError(msg) except RepoLookupError: msg = ("Revision %s:%s does not exist for %s" % (ref_type, ref_name, self.name)) raise ChangesetDoesNotExistError(msg) if revs: try: revision = revs.last() except AttributeError: # removed in hg 3.2 revision = revs[-1] else: # TODO: just report 'not found'? revision = ref_name return self._get_revision(revision)
def annotate_highlight(filenode, annotate_from_changeset_func, order=None, headers=None, **options): """ Returns html portion containing annotated table with 3 columns: line numbers, changeset information and pygmentized line of code. :param filenode: FileNode object :param annotate_from_changeset_func: function taking changeset and returning single annotate cell; needs break line at the end :param order: ordered sequence of ``ls`` (line numbers column), ``annotate`` (annotate column), ``code`` (code column); Default is ``['ls', 'annotate', 'code']`` :param headers: dictionary with headers (keys are whats in ``order`` parameter) """ from kallithea.lib.pygmentsutils import get_custom_lexer options['linenos'] = True formatter = AnnotateHtmlFormatter( filenode=filenode, annotate_from_changeset_func=annotate_from_changeset_func, order=order, headers=headers, **options) lexer = get_custom_lexer(filenode.extension) or filenode.lexer highlighted = highlight(safe_str(filenode.content), lexer, formatter) return highlighted
def _get_repo(self, create, src_url=None, update_after_clone=False): """ Function will check for mercurial repository in given path and return a localrepo object. If there is no repository in that path it will raise an exception unless ``create`` parameter is set to True - in that case repository would be created and returned. If ``src_url`` is given, would try to clone repository from the location at given clone_point. Additionally it'll make update to working copy accordingly to ``update_after_clone`` flag """ try: if src_url: url = safe_str(self._get_url(src_url)) opts = {} if not update_after_clone: opts.update({'noupdate': True}) MercurialRepository._check_url(url, self.baseui) clone(self.baseui, url, self.path, **opts) # Don't try to create if we've already cloned repo create = False return localrepository(self.baseui, self.path, create=create) except (Abort, RepoError) as err: if create: msg = "Cannot create repository at %s. Original error was %s" \ % (self.path, err) else: msg = "Not valid repository at %s. Original error was %s" \ % (self.path, err) raise RepositoryError(msg)
def _get_revision(self, revision): """ Gets an ID revision given as str. This will always return a fill 40 char revision number :param revision: str or int or None """ if isinstance(revision, unicode): revision = safe_str(revision) if self._empty: raise EmptyRepositoryError("There are no changesets yet") if revision in [-1, 'tip', None]: revision = 'tip' try: revision = hex(self._repo.lookup(revision)) except (IndexError, ValueError, RepoLookupError, TypeError): msg = ("Revision %s does not exist for %s" % (revision, self)) raise ChangesetDoesNotExistError(msg) except (LookupError, ): msg = ("Ambiguous identifier `%s` for %s" % (revision, self)) raise ChangesetDoesNotExistError(msg) return revision
def get_config_value(self, section, name, config_file=None): """ Returns configuration value for a given [``section``] and ``name``. :param section: Section we want to retrieve value from :param name: Name of configuration we want to retrieve :param config_file: A path to file which should be used to retrieve configuration from (might also be a list of file paths) """ if config_file is None: config_file = [] elif isinstance(config_file, str): config_file = [config_file] def gen_configs(): for path in config_file + self._config_files: try: yield ConfigFile.from_path(path) except (IOError, OSError, ValueError): continue for config in gen_configs(): try: value = config.get(section, name) except KeyError: continue return None if value is None else safe_str(value) return None
def _check_url(cls, url): """ Function will check given url and try to verify if it's a valid link. Sometimes it may happened that git will issue basic auth request that can cause whole API to hang when used from python or other external calls. On failures it'll raise urllib2.HTTPError, exception is also thrown when the return code is non 200 """ # check first if it's not an local url if os.path.isdir(url) or url.startswith('file:'): return True if url.startswith('git://'): return True if '+' in url[:url.find('://')]: url = url[url.find('+') + 1:] handlers = [] url_obj = mercurial.util.url(safe_bytes(url)) test_uri, authinfo = url_obj.authinfo() if not test_uri.endswith(b'info/refs'): test_uri = test_uri.rstrip(b'/') + b'/info/refs' url_obj.passwd = b'*****' cleaned_uri = str(url_obj) if authinfo: # create a password manager passmgr = urllib.request.HTTPPasswordMgrWithDefaultRealm() passmgr.add_password(*authinfo) handlers.extend((mercurial.url.httpbasicauthhandler(passmgr), mercurial.url.httpdigestauthhandler(passmgr))) o = urllib.request.build_opener(*handlers) o.addheaders = [('User-Agent', 'git/1.7.8.0')] # fake some git req = urllib.request.Request( "%s?%s" % (safe_str(test_uri), urllib.parse.urlencode({"service": 'git-upload-pack'}))) try: resp = o.open(req) if resp.code != 200: raise Exception('Return Code is not 200') except Exception as e: # means it cannot be cloned raise urllib.error.URLError("[%s] org_exc: %s" % (cleaned_uri, e)) # now detect if it's proper git repo gitdata = resp.read() if b'service=git-upload-pack' not in gitdata: raise urllib.error.URLError("url [%s] does not look like an git" % cleaned_uri) return True
def _get_tags(self): if not self.revisions: return {} _tags = [(safe_str(key), ascii_str(sha)) for key, (sha, type_) in self._parsed_refs.items() if type_ == b'T'] return OrderedDict( sorted(_tags, key=(lambda ctx: ctx[0]), reverse=True))
def branches(self): if not self.revisions: return {} _branches = [(safe_str(key), ascii_str(sha)) for key, (sha, type_) in self._parsed_refs.items() if type_ == b'H'] return OrderedDict( sorted(_branches, key=(lambda ctx: ctx[0]), reverse=False))
def branch(self): # Note: This function will return one branch name for the changeset - # that might not make sense in Git where branches() is a better match # for the basic model heads = self.repository._heads(reverse=False) ref = heads.get(self._commit.id) if ref: return safe_str(ref)
def get_file_mode(self, path): """ Returns stat mode of the file at the given ``path``. """ # ensure path is traversed path = safe_str(path) self._get_id_for_path(path) return self._stat_modes[path]
def get_file_mode(self, path): """ Returns stat mode of the file at the given ``path``. """ # ensure path is traversed path = safe_str(path) self._get_id_for_path(path) return self._stat_modes[path]
def _get_url(self, url): """ Returns normalized url. If schema is not given, would fall to filesystem (``file:///``) schema. """ url = safe_str(url) if url != 'default' and not '://' in url: url = ':///'.join(('file', url)) return url
def _get_url(self, url): """ Returns normalized url. If schema is not given, would fall to filesystem (``file:///``) schema. """ url = safe_str(url) if url != 'default' and not '://' in url: url = ':///'.join(('file', url)) return url
def run_git_command(self, cmd): """ Runs given ``cmd`` as git command with cwd set to current repo. Returns stdout as unicode str ... or raise RepositoryError. """ cwd = None if os.path.isdir(self.path): cwd = self.path stdout, _stderr = self._run_git_command(cmd, cwd=cwd) return safe_str(stdout)
def _fix_path(self, path): """ Paths are stored without trailing slash so we need to get rid off it if needed. Also mercurial keeps filenodes as str so we need to decode from unicode to str """ if path.endswith('/'): path = path.rstrip('/') return safe_str(path)
def _fix_path(self, path): """ Paths are stored without trailing slash so we need to get rid off it if needed. Also mercurial keeps filenodes as str so we need to decode from unicode to str """ if path.endswith('/'): path = path.rstrip('/') return safe_str(path)
def _get_url(self, url): """ Returns normalized url. If schema is not given, would fall to filesystem (``file:///``) schema. """ url = safe_str(url) if url != 'default' and not '://' in url: url = "file:" + urllib.pathname2url(url) return url
def successors(self): successors = mercurial.obsutil.successorssets(self._ctx._repo, self._ctx.node(), closest=True) # flatten the list here handles both divergent (len > 1) # and the usual case (len = 1) return [ safe_str(mercurial.node.hex(n)[:12]) for sub in successors for n in sub if n != self._ctx.node() ]
def _get_bookmarks(self): if self._empty: return {} return OrderedDict( sorted( ((safe_str(n), ascii_str(h)) for n, h in self._repo._bookmarks.items()), reverse=True, key=lambda x: x[0], # sort by name ))
def _get_tags(self): if self._empty: return {} return OrderedDict( sorted( ((safe_str(n), ascii_str(mercurial.node.hex(h))) for n, h in self._repo.tags().items()), reverse=True, key=lambda x: x[0], # sort by name ))
def __init__(self, path, kind): if path.startswith('/'): raise NodeError("Cannot initialize Node objects with slash at " "the beginning as only relative paths are supported") self.path = safe_str(path.rstrip('/')) # we store paths as str if path == '' and kind != NodeKind.DIR: raise NodeError("Only DirNode and its subclasses may be " "initialized with empty path") self.kind = kind #self.dirs, self.files = [], [] if self.is_root() and not self.is_dir(): raise NodeError("Root node cannot be FILE kind")
def __init__(self, path, kind): if path.startswith('/'): raise NodeError("Cannot initialize Node objects with slash at " "the beginning as only relative paths are supported") self.path = safe_str(path.rstrip('/')) # we store paths as str if path == '' and kind != NodeKind.DIR: raise NodeError("Only DirNode and its subclasses may be " "initialized with empty path") self.kind = kind #self.dirs, self.files = [], [] if self.is_root() and not self.is_dir(): raise NodeError("Root node cannot be FILE kind")
def _get_branches(self, normal=True, closed=False): """ Gets branches for this repository Returns only not closed branches by default :param closed: return also closed branches for mercurial :param normal: return also normal branches """ if self._empty: return {} bt = OrderedDict() for bn, _heads, node, isclosed in sorted( self._repo.branchmap().iterbranches()): if isclosed: if closed: bt[safe_str(bn)] = ascii_str(mercurial.node.hex(node)) else: if normal: bt[safe_str(bn)] = ascii_str(mercurial.node.hex(node)) return bt
def _get_archives(self, archive_name='tip'): allowed = self.baseui.configlist(b"web", b"allow_archive", untrusted=True) for name, ext in [(b'zip', '.zip'), (b'gz', '.tar.gz'), (b'bz2', '.tar.bz2')]: if name in allowed or self._repo.ui.configbool( b"web", b"allow" + name, untrusted=True): yield { "type": safe_str(name), "extension": ext, "node": archive_name }
def lexer(self): """ Returns pygment's lexer class. Would try to guess lexer taking file's content, name and mimetype. """ from pygments import lexers try: lexer = lexers.guess_lexer_for_filename(self.name, safe_str(self.content), stripnl=False) except lexers.ClassNotFound: lexer = lexers.TextLexer(stripnl=False) # returns first alias return lexer
def get_file_history(self, path, limit=None): """ Returns history of file as reversed list of ``Changeset`` objects for which file at given ``path`` has been modified. TODO: This function now uses os underlying 'git' and 'grep' commands which is generally not good. Should be replaced with algorithm iterating commits. """ self._get_filectx(path) cs_id = safe_str(self.id) f_path = safe_str(path) if limit: cmd = 'log -n %s --pretty="format: %%H" -s %s -- "%s"' % ( safe_int(limit, 0), cs_id, f_path) else: cmd = 'log --pretty="format: %%H" -s %s -- "%s"' % ( cs_id, f_path) so, se = self.repository.run_git_command(cmd) ids = re.findall(r'[0-9a-fA-F]{40}', so) return [self.repository.get_changeset(sha) for sha in ids]
def get_nodes(self, path): """ Returns combined ``DirNode`` and ``FileNode`` objects list representing state of changeset at the given ``path``. If node at the given ``path`` is not instance of ``DirNode``, ChangesetError would be raised. """ if self._get_kind(path) != NodeKind.DIR: raise ChangesetError("Directory does not exist for revision %s at " " '%s'" % (self.revision, path)) path = path.rstrip('/') id = self._get_id_for_path(path) tree = self.repository._repo[id] dirnodes = [] filenodes = [] als = self.repository.alias for name, stat, id in tree.items(): obj_path = safe_str(name) if path != '': obj_path = '/'.join((path, obj_path)) if objects.S_ISGITLINK(stat): root_tree = self.repository._repo[self._tree_id] cf = ConfigFile.from_file( BytesIO( self.repository._repo.get_object( root_tree[b'.gitmodules'][1]).data)) url = ascii_str(cf.get(('submodule', obj_path), 'url')) dirnodes.append( SubModuleNode(obj_path, url=url, changeset=ascii_str(id), alias=als)) continue obj = self.repository._repo.get_object(id) if obj_path not in self._stat_modes: self._stat_modes[obj_path] = stat if isinstance(obj, objects.Tree): dirnodes.append(DirNode(obj_path, changeset=self)) elif isinstance(obj, objects.Blob): filenodes.append(FileNode(obj_path, changeset=self, mode=stat)) else: raise ChangesetError("Requested object should be Tree " "or Blob, is %r" % type(obj)) nodes = dirnodes + filenodes for node in nodes: if node.path not in self.nodes: self.nodes[node.path] = node nodes.sort() return nodes
def __init__(self, repository, revision): self._stat_modes = {} self.repository = repository revision = safe_str(revision) try: commit = self.repository._repo[revision] if isinstance(commit, objects.Tag): revision = safe_str(commit.object[1]) commit = self.repository._repo.get_object(commit.object[1]) except KeyError: raise RepositoryError("Cannot get object with id %s" % revision) self.raw_id = revision self.id = self.raw_id self.short_id = self.raw_id[:12] self._commit = commit self._tree_id = commit.tree self._committer_property = 'committer' self._author_property = 'author' self._date_property = 'commit_time' self._date_tz_property = 'commit_timezone' self.revision = repository.revisions.index(revision) self.nodes = {} self._paths = {}
def _changes_cache(self): added = set() modified = set() deleted = set() _r = self.repository._repo parents = self.parents if not self.parents: parents = [EmptyChangeset()] for parent in parents: if isinstance(parent, EmptyChangeset): oid = None else: oid = _r[parent._commit.id].tree changes = _r.object_store.tree_changes(oid, _r[self._commit.id].tree) for (oldpath, newpath), (_, _), (_, _) in changes: if newpath and oldpath: modified.add(safe_str(newpath)) elif newpath and not oldpath: added.add(safe_str(newpath)) elif not newpath and oldpath: deleted.add(safe_str(oldpath)) return added, modified, deleted
def __init__(self, repository, revision): self._stat_modes = {} self.repository = repository revision = safe_str(revision) try: commit = self.repository._repo[revision] if isinstance(commit, objects.Tag): revision = safe_str(commit.object[1]) commit = self.repository._repo.get_object(commit.object[1]) except KeyError: raise RepositoryError("Cannot get object with id %s" % revision) self.raw_id = revision self.id = self.raw_id self.short_id = self.raw_id[:12] self._commit = commit self._tree_id = commit.tree self._committer_property = 'committer' self._author_property = 'author' self._date_property = 'commit_time' self._date_tz_property = 'commit_timezone' self.revision = repository.revisions.index(revision) self.nodes = {} self._paths = {}
def get_config_value(self, section, name=None, config_file=None): """ Returns configuration value for a given [``section``] and ``name``. :param section: Section we want to retrieve value from :param name: Name of configuration we want to retrieve :param config_file: A path to file which should be used to retrieve configuration from (might also be a list of file paths) """ if config_file is None: config_file = [] elif isinstance(config_file, str): config_file = [config_file] config = self._repo.ui if config_file: config = mercurial.ui.ui() for path in config_file: config.readconfig(safe_bytes(path)) value = config.config(safe_bytes(section), safe_bytes(name)) return value if value is None else safe_str(value)
def remove_tag(self, name, user, message=None, date=None): """ Removes tag with the given ``name``. :param name: name of the tag to be removed :param user: full username, i.e.: "Joe Doe <*****@*****.**>" :param message: message of the tag's removal commit :param date: date of tag's removal commit :raises TagDoesNotExistError: if tag with given name does not exists """ if name not in self.tags: raise TagDoesNotExistError("Tag %s does not exist" % name) # self._repo.refs is a DiskRefsContainer, and .path gives the full absolute path of '.git' tagpath = os.path.join(safe_str(self._repo.refs.path), 'refs', 'tags', name) try: os.remove(tagpath) self._parsed_refs = self._get_parsed_refs() self.tags = self._get_tags() except OSError as e: raise RepositoryError(e.strerror)
def get_ref_revision(self, ref_type, ref_name): """ Returns revision number for the given reference. """ ref_name = safe_str(ref_name) if ref_type == 'rev' and not ref_name.strip('0'): return self.EMPTY_CHANGESET # lookup up the exact node id _revset_predicates = { 'branch': 'branch', 'book': 'bookmark', 'tag': 'tag', 'rev': 'id', } # avoid expensive branch(x) iteration over whole repo rev_spec = "%%s & %s(%%s)" % _revset_predicates[ref_type] try: revs = self._repo.revs(rev_spec, ref_name, ref_name) except LookupError: msg = ("Ambiguous identifier %s:%s for %s" % (ref_type, ref_name, self.name)) raise ChangesetDoesNotExistError(msg) except RepoLookupError: msg = ("Revision %s:%s does not exist for %s" % (ref_type, ref_name, self.name)) raise ChangesetDoesNotExistError(msg) if revs: try: revision = revs.last() except AttributeError: # removed in hg 3.2 revision = revs[-1] else: # TODO: just report 'not found'? revision = ref_name return self._get_revision(revision)
def filectxfn(_repo, memctx, bytes_path): """ Callback from Mercurial, returning ctx to commit for the given path. """ path = safe_str(bytes_path) # check if this path is removed if path in (node.path for node in self.removed): return None # check if this path is added for node in self.added: if node.path == path: return mercurial.context.memfilectx( _repo, memctx, path=bytes_path, data=node.content, islink=False, isexec=node.is_executable, copysource=False) # or changed for node in self.changed: if node.path == path: return mercurial.context.memfilectx( _repo, memctx, path=bytes_path, data=node.content, islink=False, isexec=node.is_executable, copysource=False) raise RepositoryError("Given path haven't been marked as added, " "changed or removed (%s)" % path)
def _get_repo_refs(self, repo, rev=None, branch=None, branch_rev=None): """return a structure with repo's interesting changesets, suitable for the selectors in pullrequest.html rev: a revision that must be in the list somehow and selected by default branch: a branch that must be in the list and selected by default - even if closed branch_rev: a revision of which peers should be preferred and available.""" # list named branches that has been merged to this named branch - it should probably merge back peers = [] if rev: rev = safe_str(rev) if branch: branch = safe_str(branch) if branch_rev: branch_rev = safe_str(branch_rev) # a revset not restricting to merge() would be better # (especially because it would get the branch point) # ... but is currently too expensive # including branches of children could be nice too peerbranches = set() for i in repo._repo.revs( "sort(parents(branch(id(%s)) and merge()) - branch(id(%s)), -rev)", branch_rev, branch_rev): abranch = repo.get_changeset(i).branch if abranch not in peerbranches: n = 'branch:%s:%s' % (abranch, repo.get_changeset(abranch).raw_id) peers.append((n, abranch)) peerbranches.add(abranch) selected = None tiprev = repo.tags.get('tip') tipbranch = None branches = [] for abranch, branchrev in repo.branches.iteritems(): n = 'branch:%s:%s' % (abranch, branchrev) desc = abranch if branchrev == tiprev: tipbranch = abranch desc = '%s (current tip)' % desc branches.append((n, desc)) if rev == branchrev: selected = n if branch == abranch: if not rev: selected = n branch = None if branch: # branch not in list - it is probably closed branchrev = repo.closed_branches.get(branch) if branchrev: n = 'branch:%s:%s' % (branch, branchrev) branches.append((n, _('%s (closed)') % branch)) selected = n branch = None if branch: log.debug('branch %r not found in %s', branch, repo) bookmarks = [] for bookmark, bookmarkrev in repo.bookmarks.iteritems(): n = 'book:%s:%s' % (bookmark, bookmarkrev) bookmarks.append((n, bookmark)) if rev == bookmarkrev: selected = n tags = [] for tag, tagrev in repo.tags.iteritems(): if tag == 'tip': continue n = 'tag:%s:%s' % (tag, tagrev) tags.append((n, tag)) if rev == tagrev: selected = n # prio 1: rev was selected as existing entry above # prio 2: create special entry for rev; rev _must_ be used specials = [] if rev and selected is None: selected = 'rev:%s:%s' % (rev, rev) specials = [(selected, '%s: %s' % (_("Changeset"), rev[:12]))] # prio 3: most recent peer branch if peers and not selected: selected = peers[0][0] # prio 4: tip revision if not selected: if h.is_hg(repo): if tipbranch: selected = 'branch:%s:%s' % (tipbranch, tiprev) else: selected = 'tag:null:' + repo.EMPTY_CHANGESET tags.append((selected, 'null')) else: if 'master' in repo.branches: selected = 'branch:master:%s' % repo.branches['master'] else: k, v = repo.branches.items()[0] selected = 'branch:%s:%s' % (k, v) groups = [(specials, _("Special")), (peers, _("Peer branches")), (bookmarks, _("Bookmarks")), (branches, _("Branches")), (tags, _("Tags")), ] return [g for g in groups if g[0]], selected
def commit(self, message, author, parents=None, branch=None, date=None, **kwargs): """ Performs in-memory commit (doesn't check workdir in any way) and returns newly created ``Changeset``. Updates repository's ``revisions``. :param message: message of the commit :param author: full username, i.e. "Joe Doe <*****@*****.**>" :param parents: single parent or sequence of parents from which commit would be derived :param date: ``datetime.datetime`` instance. Defaults to ``datetime.datetime.now()``. :param branch: branch name, as string. If none given, default backend's branch would be used. :raises ``CommitError``: if any error occurs while committing """ self.check_integrity(parents) from .repository import GitRepository if branch is None: branch = GitRepository.DEFAULT_BRANCH_NAME repo = self.repository._repo object_store = repo.object_store ENCODING = "UTF-8" # Create tree and populates it with blobs commit_tree = self.parents[0] and repo[self.parents[0]._commit.tree] or\ objects.Tree() for node in self.added + self.changed: # Compute subdirs if needed dirpath, nodename = posixpath.split(node.path) dirnames = map(safe_str, dirpath and dirpath.split('/') or []) parent = commit_tree ancestors = [('', parent)] # Tries to dig for the deepest existing tree while dirnames: curdir = dirnames.pop(0) try: dir_id = parent[curdir][1] except KeyError: # put curdir back into dirnames and stops dirnames.insert(0, curdir) break else: # If found, updates parent parent = self.repository._repo[dir_id] ancestors.append((curdir, parent)) # Now parent is deepest existing tree and we need to create subtrees # for dirnames (in reverse order) [this only applies for nodes from added] new_trees = [] if not node.is_binary: content = node.content.encode(ENCODING) else: content = node.content blob = objects.Blob.from_string(content) node_path = node.name.encode(ENCODING) if dirnames: # If there are trees which should be created we need to build # them now (in reverse order) reversed_dirnames = list(reversed(dirnames)) curtree = objects.Tree() curtree[node_path] = node.mode, blob.id new_trees.append(curtree) for dirname in reversed_dirnames[:-1]: newtree = objects.Tree() #newtree.add(stat.S_IFDIR, dirname, curtree.id) newtree[dirname] = stat.S_IFDIR, curtree.id new_trees.append(newtree) curtree = newtree parent[reversed_dirnames[-1]] = stat.S_IFDIR, curtree.id else: parent.add(name=node_path, mode=node.mode, hexsha=blob.id) new_trees.append(parent) # Update ancestors for parent, tree, path in reversed([(a[1], b[1], b[0]) for a, b in zip(ancestors, ancestors[1:])]): parent[path] = stat.S_IFDIR, tree.id object_store.add_object(tree) object_store.add_object(blob) for tree in new_trees: object_store.add_object(tree) for node in self.removed: paths = node.path.split('/') tree = commit_tree trees = [tree] # Traverse deep into the forest... for path in paths: try: obj = self.repository._repo[tree[path][1]] if isinstance(obj, objects.Tree): trees.append(obj) tree = obj except KeyError: break # Cut down the blob and all rotten trees on the way back... for path, tree in reversed(zip(paths, trees)): del tree[path] if tree: # This tree still has elements - don't remove it or any # of it's parents break object_store.add_object(commit_tree) # Create commit commit = objects.Commit() commit.tree = commit_tree.id commit.parents = [p._commit.id for p in self.parents if p] commit.author = commit.committer = safe_str(author) commit.encoding = ENCODING commit.message = safe_str(message) # Compute date if date is None: date = time.time() elif isinstance(date, datetime.datetime): date = time.mktime(date.timetuple()) author_time = kwargs.pop('author_time', date) commit.commit_time = int(date) commit.author_time = int(author_time) tz = time.timezone author_tz = kwargs.pop('author_timezone', tz) commit.commit_timezone = tz commit.author_timezone = author_tz object_store.add_object(commit) ref = 'refs/heads/%s' % branch repo.refs[ref] = commit.id # Update vcs repository object & recreate dulwich repo self.repository.revisions.append(commit.id) # invalidate parsed refs after commit self.repository._parsed_refs = self.repository._get_parsed_refs() tip = self.repository.get_changeset() self.reset() return tip
def show(self, repo_name, pull_request_id, extra=None): repo_model = RepoModel() c.users_array = repo_model.get_users_js() c.user_groups_array = repo_model.get_user_groups_js() c.pull_request = PullRequest.get_or_404(pull_request_id) c.allowed_to_change_status = self._get_is_allowed_change_status(c.pull_request) cc_model = ChangesetCommentsModel() cs_model = ChangesetStatusModel() # pull_requests repo_name we opened it against # ie. other_repo must match if repo_name != c.pull_request.other_repo.repo_name: raise HTTPNotFound # load compare data into template context c.cs_repo = c.pull_request.org_repo (c.cs_ref_type, c.cs_ref_name, c.cs_rev) = c.pull_request.org_ref.split(':') c.a_repo = c.pull_request.other_repo (c.a_ref_type, c.a_ref_name, c.a_rev) = c.pull_request.other_ref.split(':') # other_rev is ancestor org_scm_instance = c.cs_repo.scm_instance # property with expensive cache invalidation check!!! c.cs_repo = c.cs_repo try: c.cs_ranges = [org_scm_instance.get_changeset(x) for x in c.pull_request.revisions] except ChangesetDoesNotExistError: c.cs_ranges = [] c.cs_ranges_org = None # not stored and not important and moving target - could be calculated ... revs = [ctx.revision for ctx in reversed(c.cs_ranges)] c.jsdata = json.dumps(graph_data(org_scm_instance, revs)) c.is_range = False if c.a_ref_type == 'rev': # this looks like a free range where target is ancestor cs_a = org_scm_instance.get_changeset(c.a_rev) root_parents = c.cs_ranges[0].parents c.is_range = cs_a in root_parents #c.merge_root = len(root_parents) > 1 # a range starting with a merge might deserve a warning avail_revs = set() avail_show = [] c.cs_branch_name = c.cs_ref_name other_scm_instance = c.a_repo.scm_instance c.update_msg = "" c.update_msg_other = "" try: if org_scm_instance.alias == 'hg' and c.a_ref_name != 'ancestor': if c.cs_ref_type != 'branch': c.cs_branch_name = org_scm_instance.get_changeset(c.cs_ref_name).branch # use ref_type ? c.a_branch_name = c.a_ref_name if c.a_ref_type != 'branch': try: c.a_branch_name = other_scm_instance.get_changeset(c.a_ref_name).branch # use ref_type ? except EmptyRepositoryError: c.a_branch_name = 'null' # not a branch name ... but close enough # candidates: descendants of old head that are on the right branch # and not are the old head itself ... # and nothing at all if old head is a descendant of target ref name if not c.is_range and other_scm_instance._repo.revs('present(%s)::&%s', c.cs_ranges[-1].raw_id, c.a_branch_name): c.update_msg = _('This pull request has already been merged to %s.') % c.a_branch_name elif c.pull_request.is_closed(): c.update_msg = _('This pull request has been closed and can not be updated.') else: # look for descendants of PR head on source branch in org repo avail_revs = org_scm_instance._repo.revs('%s:: & branch(%s)', revs[0], c.cs_branch_name) if len(avail_revs) > 1: # more than just revs[0] # also show changesets that not are descendants but would be merged in targethead = other_scm_instance.get_changeset(c.a_branch_name).raw_id if org_scm_instance.path != other_scm_instance.path: # Note: org_scm_instance.path must come first so all # valid revision numbers are 100% org_scm compatible # - both for avail_revs and for revset results hgrepo = unionrepo.unionrepository(org_scm_instance.baseui, org_scm_instance.path, other_scm_instance.path) else: hgrepo = org_scm_instance._repo show = set(hgrepo.revs('::%ld & !::parents(%s) & !::%s', avail_revs, revs[0], targethead)) c.update_msg = _('The following changes are available on %s:') % c.cs_branch_name else: show = set() avail_revs = set() # drop revs[0] c.update_msg = _('No changesets found for updating this pull request.') # TODO: handle branch heads that not are tip-most brevs = org_scm_instance._repo.revs('%s - %ld - %s', c.cs_branch_name, avail_revs, revs[0]) if brevs: # also show changesets that are on branch but neither ancestors nor descendants show.update(org_scm_instance._repo.revs('::%ld - ::%ld - ::%s', brevs, avail_revs, c.a_branch_name)) show.add(revs[0]) # make sure graph shows this so we can see how they relate c.update_msg_other = _('Note: Branch %s has another head: %s.') % (c.cs_branch_name, h.short_id(org_scm_instance.get_changeset((max(brevs))).raw_id)) avail_show = sorted(show, reverse=True) elif org_scm_instance.alias == 'git': c.cs_repo.scm_instance.get_changeset(c.cs_rev) # check it exists - raise ChangesetDoesNotExistError if not c.update_msg = _("Git pull requests don't support updates yet.") except ChangesetDoesNotExistError: c.update_msg = _('Error: revision %s was not found. Please create a new pull request!') % c.cs_rev c.avail_revs = avail_revs c.avail_cs = [org_scm_instance.get_changeset(r) for r in avail_show] c.avail_jsdata = json.dumps(graph_data(org_scm_instance, avail_show)) raw_ids = [x.raw_id for x in c.cs_ranges] c.cs_comments = c.cs_repo.get_comments(raw_ids) c.statuses = c.cs_repo.statuses(raw_ids) ignore_whitespace = request.GET.get('ignorews') == '1' line_context = request.GET.get('context', 3) c.ignorews_url = _ignorews_url c.context_url = _context_url c.fulldiff = request.GET.get('fulldiff') diff_limit = self.cut_off_limit if not c.fulldiff else None # we swap org/other ref since we run a simple diff on one repo log.debug('running diff between %s and %s in %s', c.a_rev, c.cs_rev, org_scm_instance.path) try: txtdiff = org_scm_instance.get_diff(rev1=safe_str(c.a_rev), rev2=safe_str(c.cs_rev), ignore_whitespace=ignore_whitespace, context=line_context) except ChangesetDoesNotExistError: txtdiff = _("The diff can't be shown - the PR revisions could not be found.") diff_processor = diffs.DiffProcessor(txtdiff or '', format='gitdiff', diff_limit=diff_limit) _parsed = diff_processor.prepare() c.limited_diff = False if isinstance(_parsed, LimitedDiffContainer): c.limited_diff = True c.files = [] c.changes = {} c.lines_added = 0 c.lines_deleted = 0 for f in _parsed: st = f['stats'] c.lines_added += st['added'] c.lines_deleted += st['deleted'] fid = h.FID('', f['filename']) c.files.append([fid, f['operation'], f['filename'], f['stats']]) htmldiff = diff_processor.as_html(enable_comments=True, parsed_lines=[f]) c.changes[fid] = [f['operation'], f['filename'], htmldiff] # inline comments c.inline_cnt = 0 c.inline_comments = cc_model.get_inline_comments( c.db_repo.repo_id, pull_request=pull_request_id) # count inline comments for __, lines in c.inline_comments: for comments in lines.values(): c.inline_cnt += len(comments) # comments c.comments = cc_model.get_comments(c.db_repo.repo_id, pull_request=pull_request_id) # (badly named) pull-request status calculation based on reviewer votes (c.pull_request_reviewers, c.pull_request_pending_reviewers, c.current_voting_result, ) = cs_model.calculate_pull_request_result(c.pull_request) c.changeset_statuses = ChangesetStatus.STATUSES c.as_form = False c.ancestor = None # there is one - but right here we don't know which return render('/pullrequests/pullrequest_show.html')