def addbranchrevs(lrepo, repo, hashbranch): '''wrap hg.addbranchrevs to work on 1.5 and 1.6 and returns the first value (revs) only 1.5 added the call. 1.6 split up the revs parameter into a two-tuple. ''' if _HG_VERSION < (1, 6, 0): branches = hashbranch and [hashbranch] or [] revs, checkout = hg.addbranchrevs(lrepo, repo, branches, None) else: branches = hashbranch, [] revs, checkout = hg.addbranchrevs(lrepo, repo, branches, None) return revs
def goutgoing(ui, repo, dest=None, **opts): """show the outgoing changesets alongside an ASCII revision graph Print the outgoing changesets alongside a revision graph drawn with ASCII characters. Nodes printed as an @ character are parents of the working directory. """ check_unsupported_flags(opts) dest = ui.expandpath(dest or 'default-push', dest or 'default') dest, branches = hg.parseurl(dest, opts.get('branch')) revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) other = hg.repository(cmdutil.remoteui(ui, opts), dest) if revs: revs = [repo.lookup(rev) for rev in revs] ui.status(_('comparing with %s\n') % url.hidepassword(dest)) o = repo.findoutgoing(other, force=opts.get('force')) if not o: ui.status(_("no changes found\n")) return o = repo.changelog.nodesbetween(o, revs)[0] revdag = graphrevs(repo, o, opts) displayer = show_changeset(ui, repo, opts, buffered=True) showparents = [ctx.node() for ctx in repo[None].parents()] generate(ui, revdag, displayer, showparents, asciiedges)
def findoutgoing(ui, repo, remote=None, force=False, opts=None): """utility function to find the first outgoing changeset Used by initialization code""" if opts is None: opts = {} dest = ui.expandpath(remote or 'default-push', remote or 'default') dest, revs = hg.parseurl(dest, None)[:2] ui.status(_('comparing with %s\n') % util.hidepassword(dest)) revs, checkout = hg.addbranchrevs(repo, repo, revs, None) other = hg.peer(repo, opts, dest) if revs: revs = [repo.lookup(rev) for rev in revs] outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force) if not outgoing.missing: raise error.Abort(_('no outgoing ancestors')) roots = list(repo.revs("roots(%ln)", outgoing.missing)) if 1 < len(roots): msg = _('there are ambiguous outgoing revisions') hint = _('see "hg help histedit" for more detail') raise error.Abort(msg, hint=hint) return repo.lookup(roots[0])
def outgoing(wdrepo, masterrepo): try: return wdrepo.findoutgoing(masterrepo) except AttributeError: from mercurial import hg, discovery revs, checkout = hg.addbranchrevs(wdrepo, wdrepo, ('', []), None) o = discovery.findoutgoing(wdrepo, masterrepo) return wdrepo.changelog.nodesbetween(o, revs)[0]
def gincoming(ui, repo, source="default", **opts): """show the incoming changesets alongside an ASCII revision graph Print the incoming changesets alongside a revision graph drawn with ASCII characters. Nodes printed as an @ character are parents of the working directory. """ check_unsupported_flags(opts) source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) other = hg.repository(cmdutil.remoteui(repo, opts), source) revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev')) ui.status(_('comparing with %s\n') % url.hidepassword(source)) if revs: revs = [other.lookup(rev) for rev in revs] incoming = repo.findincoming(other, heads=revs, force=opts["force"]) if not incoming: try: os.unlink(opts["bundle"]) except: pass ui.status(_("no changes found\n")) return cleanup = None try: fname = opts["bundle"] if fname or not other.local(): # create a bundle (uncompressed if other repo is not local) if revs is None: cg = other.changegroup(incoming, "incoming") else: cg = other.changegroupsubset(incoming, revs, 'incoming') bundletype = other.local() and "HG10BZ" or "HG10UN" fname = cleanup = changegroup.writebundle(cg, fname, bundletype) # keep written bundle? if opts["bundle"]: cleanup = None if not other.local(): # use the created uncompressed bundlerepo other = bundlerepo.bundlerepository(ui, repo.root, fname) chlist = other.changelog.nodesbetween(incoming, revs)[0] revdag = graphrevs(other, chlist, opts) displayer = show_changeset(ui, other, opts, buffered=True) showparents = [ctx.node() for ctx in repo[None].parents()] generate(ui, revdag, displayer, showparents, asciiedges) finally: if hasattr(other, 'close'): other.close() if cleanup: os.unlink(cleanup)
def bigpush(push_fn, ui, repo, dest=None, *files, **opts): '''Pushes this repository to a target repository. If this repository is small, behaves as the native push command. For large, remote repositories, the repository is pushed in chunks of 1000 changesets.''' if not opts.get('chunked'): return push_fn(ui, repo, dest, **opts) source, revs = parseurl( ui.expandpath(dest or 'default-push', dest or 'default')) other = hg.repository(remoteui(repo, opts), source) if hasattr(hg, 'addbranchrevs'): revs = hg.addbranchrevs(repo, other, revs, opts.get('rev'))[0] if revs: revs = [repo.lookup(rev) for rev in revs] if other.local(): return push_fn(ui, repo, dest, **opts) ui.status(_('pushing to %s\n') % other.path) outgoing = findoutgoing(repo, other) if outgoing: outgoing = repo.changelog.nodesbetween(outgoing, revs)[0] # if the push will create multiple heads and isn't forced, fail now # (prepush prints an error message, so we can just exit) if not opts.get('force') and not opts.get( 'new_branch') and None == prepush(repo, other, False, revs)[0]: return try: push_size = 1 while len(outgoing) > 0: ui.debug('start: %d to push\n' % len(outgoing)) current_push_size = min(push_size, len(outgoing)) ui.debug('pushing: %d\n' % current_push_size) # force the push, because we checked above that by the time the whole push is done, we'll have merged back to one head remote_heads = repo.push(other, force=True, revs=outgoing[:current_push_size]) if remote_heads: # push succeeded outgoing = outgoing[current_push_size:] ui.debug('pushed %d ok\n' % current_push_size) if push_size < max_push_size: push_size *= 2 else: # push failed; try again with a smaller size push_size /= 2 ui.debug('failed, trying %d\n' % current_push_size) if push_size == 0: raise UnpushableChangesetError except UnpushableChangesetError: ui.status(_('unable to push changeset %s\n') % outgoing[0]) ui.debug('done\n')
def test(ui, repo, source, **opts): source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) other = hg.repository(hg.remoteui(repo, opts), source) revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev')) if revs: try: revs = [other.lookup(rev) for rev in revs] except CapabilityError: err = _("Other repository doesn't support revision lookup, " "so a rev cannot be specified.") raise util.Abort(err)
def getoutgoing(dest, revs): '''Return the revisions present locally but not in dest''' dest = ui.expandpath(dest or 'default-push', dest or 'default') dest, branches = hg.parseurl(dest) revs, checkout = hg.addbranchrevs(repo, repo, branches, revs) other = hg.peer(repo, opts, dest) ui.status(_('comparing with %s\n') % util.hidepassword(dest)) common, _anyinc, _heads = discovery.findcommonincoming(repo, other) nodes = revs and map(repo.lookup, revs) or revs o = repo.changelog.findmissing(common, heads=nodes) if not o: ui.status(_("no changes found\n")) return [] return [str(repo.changelog.rev(r)) for r in o]
def test(ui, repo, dest=None, **opts): dest = ui.expandpath(dest or 'default-push', dest or 'default') dest, branches = hg.parseurl(dest, opts.get('branch')) revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) if hasattr(hg, 'peer'): other = hg.peer(ui, opts, dest) localother = other.local() if localother: other = localother else: other = hg.repository(hg.remoteui(repo, opts), dest) if revs: revs = [other.lookup(rev) for rev in revs]
def bigpush(push_fn, ui, repo, dest=None, *files, **opts): """Pushes this repository to a target repository. If this repository is small, behaves as the native push command. For large, remote repositories, the repository is pushed in chunks of size optimized for performance on the network.""" if not opts.get("chunked"): return push_fn(ui, repo, dest, **opts) source, revs = parseurl(ui.expandpath(dest or "default-push", dest or "default")) other = hg.repository(remoteui(repo, opts), source) if hasattr(hg, "addbranchrevs"): revs = hg.addbranchrevs(repo, other, revs, opts.get("rev"))[0] if revs: revs = [repo.lookup(rev) for rev in revs] if other.local(): return push_fn(ui, repo, dest, **opts) ui.status(_("pushing to %s\n") % other.path) outgoing = findoutgoing(repo, other) if outgoing: outgoing = repo.changelog.nodesbetween(outgoing, revs)[0] # if the push will create multiple heads and isn't forced, fail now # (prepush prints an error message, so we can just exit) if not opts.get("force") and not opts.get("new_branch") and None == prepush(repo, other, False, revs)[0]: return try: push_size = 1 while len(outgoing) > 0: ui.debug("start: %d to push\n" % len(outgoing)) current_push_size = min(push_size, len(outgoing)) ui.debug("pushing: %d\n" % current_push_size) # force the push, because we checked above that by the time the whole push is done, we'll have merged back to one head remote_heads = repo.push(other, force=True, revs=outgoing[:current_push_size]) if remote_heads: # push succeeded outgoing = outgoing[current_push_size:] ui.debug("pushed %d ok\n" % current_push_size) if push_size < max_push_size: push_size *= 2 else: # push failed; try again with a smaller size push_size /= 2 ui.debug("failed, trying %d\n" % current_push_size) if push_size == 0: raise UnpushableChangesetError except UnpushableChangesetError: ui.status(_("unable to push changeset %s\n") % outgoing[0]) ui.debug("done\n")
def outgoing(dest, revs): '''Return the revisions present locally but not in dest''' dest = ui.expandpath(dest or 'default-push', dest or 'default') dest, branches = hg.parseurl(dest) revs, checkout = hg.addbranchrevs(repo, repo, branches, revs) if revs: revs = [repo.lookup(rev) for rev in revs] other = hg.repository(cmdutil.remoteui(repo, opts), dest) ui.status(_('comparing with %s\n') % dest) o = repo.findoutgoing(other) if not o: ui.status(_("no changes found\n")) return [] o = repo.changelog.nodesbetween(o, revs)[0] return [str(repo.changelog.rev(r)) for r in o]
def getoutgoing(dest, revs): '''Return the revisions present locally but not in dest''' dest = ui.expandpath(dest or 'default-push', dest or 'default') dest, branches = hg.parseurl(dest) revs, checkout = hg.addbranchrevs(repo, repo, branches, revs) if revs: revs = [repo.lookup(rev) for rev in revs] other = hg.repository(hg.remoteui(repo, opts), dest) ui.status(_('comparing with %s\n') % url.hidepassword(dest)) o = discovery.findoutgoing(repo, other) if not o: ui.status(_("no changes found\n")) return [] o = repo.changelog.nodesbetween(o, revs)[0] return [str(repo.changelog.rev(r)) for r in o]
def push(self, dest=None, rev=None, force=False): """\ Push changes into destination. If destination is none, the source of this repo will be used. If revision is not specified, the current working dir will be pushed. If this spawns a new head, this operation must be forced. Forcing will have the side effect of creating a new branch, and it may not be desirable. By default, no remote branch will be created. """ # find parents # if there are two parents, take the first one, # (ui should warn users about uncommitted merge/confirmation) # if not force, do it and see if head shows up if rev is None: rev = [self._repo.lookup('.')] dest = self._ui.expandpath(dest or 'default-push', dest or 'default') repo = self._repo dest, branches = hg.parseurl(dest) revs, checkout = hg.addbranchrevs(repo, repo, branches, rev) if dest in ('default', 'default-push',): raise RepoNotFoundError('no suitable target found') other = hg.peer(self._repo, {}, dest) self._ui.status('pushing to %s\n' % (dest)) if revs: revs = [self._repo.lookup(rev) for rev in revs] try: r = self._repo.push(other, force, revs=revs) except Abort: raise ProtocolError() # check to see if this revision is present on destination # XXX assuming other is localrepo try: result = other.lookup(revs[0]) except: result = None return result is not None
def pull(self, source='default', update=True): """\ Pull new revisions from source. source - if value is 'default', the default source of this repo will be used. Default: 'default' update - if True, this sandbox will be updated to the latest data that was pulled, if possible. return value is a number of total heads generated from the pull. 0 = no changes 1 = updated >1 = merge will be required, no automatic update """ # not using another Storage because localrepo.addchangegroup # appends output to its ui, so the 'other' repo must be # created using the ui belonging to this object. if not isinstance(source, basestring): raise TypeError('source must be a string') # pull from main repo only. # XXX could implement pull up to specific revs repo = self._repo dest, branches = hg.parseurl(source) revs, checkout = hg.addbranchrevs(repo, repo, branches, []) if source == 'default': raise RepoNotFoundError('no suitable repository found') other = hg.peer(self._repo, {}, source) self._ui.status('pulling from %s\n' % (source)) modheads = self._repo.pull(other, revs) if update: if modheads <= 1 or checkout: hg.update(self._repo, checkout) self._changectx() else: self._ui.status(_("not updating, since new heads added\n")) return modheads
def get_outgoing_bfiles(ui, repo, dest=None, **opts): dest = ui.expandpath(dest or 'default-push', dest or 'default') dest, branches = hg.parseurl(dest, opts.get('branch')) revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) if revs: revs = [repo.lookup(rev) for rev in revs] # Mercurial <= 1.5 had remoteui in cmdutil, then it moved to hg try: remoteui = cmdutil.remoteui except AttributeError: remoteui = hg.remoteui try: remote = hg.repository(remoteui(repo, opts), dest) except error.RepoError: return None o = bfutil.findoutgoing(repo, remote, False) if not o: return None o = repo.changelog.nodesbetween(o, revs)[0] if opts.get('newest_first'): o.reverse() toupload = set() for n in o: parents = [p for p in repo.changelog.parents(n) if p != node.nullid] ctx = repo[n] files = set(ctx.files()) if len(parents) == 2: mc = ctx.manifest() mp1 = ctx.parents()[0].manifest() mp2 = ctx.parents()[1].manifest() for f in mp1: if f not in mc: files.add(f) for f in mp2: if f not in mc: files.add(f) for f in mc: if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): files.add(f) toupload = toupload.union(set([f for f in files if bfutil.is_standin(f) and f in ctx])) return toupload
def incoming(wdrepo, masterrepo): try: return wdrepo.findincoming(masterrepo) except AttributeError: from mercurial import hg, discovery revs, checkout = hg.addbranchrevs(wdrepo, masterrepo, ('', []), None) common, incoming, rheads = discovery.findcommonincoming( wdrepo, masterrepo, heads=revs) if not masterrepo.local(): from mercurial import bundlerepo, changegroup if revs is None and masterrepo.capable('changegroupsubset'): revs = rheads if revs is None: cg = masterrepo.changegroup(incoming, "incoming") else: cg = masterrepo.changegroupsubset(incoming, revs, 'incoming') fname = changegroup.writebundle(cg, None, "HG10UN") # use the created uncompressed bundlerepo masterrepo = bundlerepo.bundlerepository(wdrepo.ui, wdrepo.root, fname) return masterrepo.changelog.nodesbetween(incoming, revs)[0]
def getoutgoinglfiles(ui, repo, dest=None, **opts): dest = ui.expandpath(dest or 'default-push', dest or 'default') dest, branches = hg.parseurl(dest, opts.get('branch')) revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) if revs: revs = [repo.lookup(rev) for rev in revs] remoteui = hg.remoteui try: remote = hg.repository(remoteui(repo, opts), dest) except error.RepoError: return None o = lfutil.findoutgoing(repo, remote, False) if not o: return None o = repo.changelog.nodesbetween(o, revs)[0] if opts.get('newest_first'): o.reverse() toupload = set() for n in o: parents = [p for p in repo.changelog.parents(n) if p != node.nullid] ctx = repo[n] files = set(ctx.files()) if len(parents) == 2: mc = ctx.manifest() mp1 = ctx.parents()[0].manifest() mp2 = ctx.parents()[1].manifest() for f in mp1: if f not in mc: files.add(f) for f in mp2: if f not in mc: files.add(f) for f in mc: if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): files.add(f) toupload = toupload.union( set([f for f in files if lfutil.isstandin(f) and f in ctx])) return toupload
def getoutgoinglfiles(ui, repo, dest=None, **opts): dest = ui.expandpath(dest or 'default-push', dest or 'default') dest, branches = hg.parseurl(dest, opts.get('branch')) revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) if revs: revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)] try: remote = hg.peer(repo, opts, dest) except error.RepoError: return None outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False) if not outgoing.missing: return outgoing.missing o = repo.changelog.nodesbetween(outgoing.missing, revs)[0] if opts.get('newest_first'): o.reverse() toupload = set() for n in o: parents = [p for p in repo.changelog.parents(n) if p != node.nullid] ctx = repo[n] files = set(ctx.files()) if len(parents) == 2: mc = ctx.manifest() mp1 = ctx.parents()[0].manifest() mp2 = ctx.parents()[1].manifest() for f in mp1: if f not in mc: files.add(f) for f in mp2: if f not in mc: files.add(f) for f in mc: if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): files.add(f) toupload = toupload.union( set([f for f in files if lfutil.isstandin(f) and f in ctx])) return sorted(toupload)
def findoutgoing(ui, repo, remote=None, force=False, opts={}): """utility function to find the first outgoing changeset Used by initialisation code""" dest = ui.expandpath(remote or 'default-push', remote or 'default') dest, revs = hg.parseurl(dest, None)[:2] ui.status(_('comparing with %s\n') % util.hidepassword(dest)) revs, checkout = hg.addbranchrevs(repo, repo, revs, None) other = hg.peer(repo, opts, dest) if revs: revs = [repo.lookup(rev) for rev in revs] # hexlify nodes from outgoing, because we're going to parse # parent[0] using revsingle below, and if the binary hash # contains special revset characters like ":" the revset # parser can choke. outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force) if not outgoing.missing: raise util.Abort(_('no outgoing ancestors')) return outgoing.missing[0]
def findoutgoing(ui, repo, remote=None, force=False, opts={}): """utility function to find the first outgoing changeset Used by initialisation code""" dest = ui.expandpath(remote or 'default-push', remote or 'default') dest, revs = hg.parseurl(dest, None)[:2] ui.status(_('comparing with %s\n') % util.hidepassword(dest)) revs, checkout = hg.addbranchrevs(repo, repo, revs, None) other = hg.peer(repo, opts, dest) if revs: revs = [repo.lookup(rev) for rev in revs] outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force) if not outgoing.missing: raise util.Abort(_('no outgoing ancestors')) roots = list(repo.revs("roots(%ln)", outgoing.missing)) if 1 < len(roots): msg = _('there are ambiguous outgoing revisions') hint = _('see "hg help histedit" for more detail') raise util.Abort(msg, hint=hint) return repo.lookup(roots[0])
def histedit(ui, repo, *parent, **opts): """hg histedit <parent> """ # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) if mq and mq.applied: raise util.Abort(_('source has mq patches applied')) parent = list(parent) + opts.get('rev', []) if opts.get('outgoing'): if len(parent) > 1: raise util.Abort('only one repo argument allowed with --outgoing') elif parent: parent = parent[0] dest = ui.expandpath(parent or 'default-push', parent or 'default') dest, revs = hg.parseurl(dest, None)[:2] if isinstance(revs, tuple): # hg >= 1.6 revs, checkout = hg.addbranchrevs(repo, repo, revs, None) other = hg.repository(hg.remoteui(repo, opts), dest) # hg >= 1.9 findoutgoing = getattr(discovery, 'findoutgoing', None) if findoutgoing is None: if getattr(discovery, 'outgoing', None) is not None: def findoutgoing(repo, other, force=False): out = discovery.findcommonoutgoing( repo, other, [], force=force) return out.missing[0:1] else: # hg 1.9 and 2.0 def findoutgoing(repo, other, force=False): common, outheads = discovery.findcommonoutgoing( repo, other, [], force=force) return repo.changelog.findmissing(common, outheads)[0:1] else: other = hg.repository(ui, dest) def findoutgoing(repo, other, force=False): return repo.findoutgoing(other, force=force) if revs: revs = [repo.lookup(rev) for rev in revs] ui.status(_('comparing with %s\n') % hidepassword(dest)) parent = findoutgoing(repo, other, force=opts.get('force')) else: if opts.get('force'): raise util.Abort('--force only allowed with --outgoing') if opts.get('continue', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --continue') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, replacemap ) = readstate(repo) currentparent, wantnull = repo.dirstate.parents() parentctx = repo[parentctxnode] # discover any nodes the user has added in the interim newchildren = [c for c in parentctx.children() if c.node() not in existing] action, currentnode = rules.pop(0) while newchildren: if action in ['f', 'fold', ]: tmpnodes.extend([n.node() for n in newchildren]) else: created.extend([n.node() for n in newchildren]) newchildren = filter(lambda x: x.node() not in existing, reduce(lambda x, y: x + y, map(lambda r: r.children(), newchildren))) m, a, r, d = repo.status()[:4] oldctx = repo[currentnode] message = oldctx.description() if action in ('e', 'edit', 'm', 'mess'): message = ui.edit(message, ui.username()) elif action in ('f', 'fold', ): message = 'fold-temp-revision %s' % currentnode new = None if m or a or r or d: new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) if action in ('f', 'fold'): if new: tmpnodes.append(new) else: new = newchildren[-1] (parentctx, created_, replaced_, tmpnodes_, ) = finishfold(ui, repo, parentctx, oldctx, new, opts, newchildren) replaced.extend(replaced_) created.extend(created_) tmpnodes.extend(tmpnodes_) elif action not in ('d', 'drop'): if new != oldctx.node(): replaced.append(oldctx.node()) if new: if new != oldctx.node(): created.append(new) parentctx = repo[new] elif opts.get('abort', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --abort') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, replacemap) = readstate(repo) ui.debug('restore wc to old tip %s\n' % node.hex(tip)) hg.clean(repo, tip) ui.debug('should strip created nodes %s\n' % ', '.join([node.hex(n)[:12] for n in created])) ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for nodes in (created, tmpnodes, ): for n in reversed(nodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) return else: bailifchanged(repo) if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise util.Abort('history edit already in progress, try ' '--continue or --abort') tip, empty = repo.dirstate.parents() if len(parent) != 1: raise util.Abort('requires exactly one parent revision') parent = _revsingle(repo, parent[0]).node() keep = opts.get('keep', False) revs = between(repo, parent, tip, keep) ctxs = [repo[r] for r in revs] existing = [r.node() for r in ctxs] rules = opts.get('commands', '') if not rules: rules = '\n'.join([makedesc(c) for c in ctxs]) rules += editcomment % (node.hex(parent)[:12], node.hex(tip)[:12], ) rules = ui.edit(rules, ui.username()) # Save edit rules in .hg/histedit-last-edit.txt in case # the user needs to ask for help after something # surprising happens. f = open(repo.join('histedit-last-edit.txt'), 'w') f.write(rules) f.close() else: f = open(rules) rules = f.read() f.close() rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l[0] == '#'] rules = verifyrules(rules, repo, ctxs) parentctx = repo[parent].parents()[0] keep = opts.get('keep', False) replaced = [] replacemap = {} tmpnodes = [] created = [] while rules: writestate(repo, parentctx.node(), created, replaced, tmpnodes, existing, rules, keep, tip, replacemap) action, ha = rules.pop(0) (parentctx, created_, replaced_, tmpnodes_, ) = actiontable[action](ui, repo, parentctx, ha, opts) hexshort = lambda x: node.hex(x)[:12] if replaced_: clen, rlen = len(created_), len(replaced_) if clen == rlen == 1: ui.debug('histedit: exact replacement of %s with %s\n' % ( hexshort(replaced_[0]), hexshort(created_[0]))) replacemap[replaced_[0]] = created_[0] elif clen > rlen: assert rlen == 1, ('unexpected replacement of ' '%d changes with %d changes' % (rlen, clen)) # made more changesets than we're replacing # TODO synthesize patch names for created patches replacemap[replaced_[0]] = created_[-1] ui.debug('histedit: created many, assuming %s replaced by %s' % ( hexshort(replaced_[0]), hexshort(created_[-1]))) elif rlen > clen: if not created_: # This must be a drop. Try and put our metadata on # the parent change. assert rlen == 1 r = replaced_[0] ui.debug('histedit: %s seems replaced with nothing, ' 'finding a parent\n' % (hexshort(r))) pctx = repo[r].parents()[0] if pctx.node() in replacemap: ui.debug('histedit: parent is already replaced\n') replacemap[r] = replacemap[pctx.node()] else: replacemap[r] = pctx.node() ui.debug('histedit: %s best replaced by %s\n' % ( hexshort(r), hexshort(replacemap[r]))) else: assert len(created_) == 1 for r in replaced_: ui.debug('histedit: %s replaced by %s\n' % ( hexshort(r), hexshort(created_[0]))) replacemap[r] = created_[0] else: assert False, ( 'Unhandled case in replacement mapping! ' 'replacing %d changes with %d changes' % (rlen, clen)) created.extend(created_) replaced.extend(replaced_) tmpnodes.extend(tmpnodes_) hg.update(repo, parentctx.node()) if not keep: if replacemap: ui.note('histedit: Should update metadata for the following ' 'changes:\n') for old, new in replacemap.iteritems(): if old in tmpnodes or old in created: # can't have any metadata we'd want to update continue while new in replacemap: new = replacemap[new] ui.note('histedit: %s to %s\n' % (hexshort(old), hexshort(new))) octx = repo[old] if bookmarks is not None: marks = octx.bookmarks() if marks: ui.note('histedit: moving bookmarks %s\n' % ', '.join(marks)) for mark in marks: repo._bookmarks[mark] = new bookmarks.write(repo) # TODO update mq state ui.debug('should strip replaced nodes %s\n' % ', '.join([node.hex(n)[:12] for n in replaced])) for n in sorted(replaced, key=lambda x: repo[x].rev()): try: repair.strip(ui, repo, n) except error.LookupError: pass ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for n in reversed(tmpnodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) if os.path.exists(repo.sjoin('undo')): os.unlink(repo.sjoin('undo'))
def histedit(ui, repo, *parent, **opts): """hg histedit <parent> """ # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) if mq and mq.applied: raise util.Abort(_('source has mq patches applied')) parent = list(parent) + opts.get('rev', []) if opts.get('outgoing'): if len(parent) > 1: raise util.Abort('only one repo argument allowed with --outgoing') elif parent: parent = parent[0] dest = ui.expandpath(parent or 'default-push', parent or 'default') dest, revs = hg.parseurl(dest, None)[:2] if isinstance(revs, tuple): # hg >= 1.6 revs, checkout = hg.addbranchrevs(repo, repo, revs, None) other = hg.repository(hg.remoteui(repo, opts), dest) # hg >= 1.9 findoutgoing = getattr(discovery, 'findoutgoing', None) if findoutgoing is None: if getattr(discovery, 'outgoing', None) is not None: def findoutgoing(repo, other, force=False): out = discovery.findcommonoutgoing(repo, other, [], force=force) return out.missing[0:1] else: # hg 1.9 and 2.0 def findoutgoing(repo, other, force=False): common, outheads = discovery.findcommonoutgoing( repo, other, [], force=force) return repo.changelog.findmissing(common, outheads)[0:1] else: other = hg.repository(ui, dest) def findoutgoing(repo, other, force=False): return repo.findoutgoing(other, force=force) if revs: revs = [repo.lookup(rev) for rev in revs] ui.status(_('comparing with %s\n') % hidepassword(dest)) parent = findoutgoing(repo, other, force=opts.get('force')) else: if opts.get('force'): raise util.Abort('--force only allowed with --outgoing') if opts.get('continue', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --continue') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, replacemap) = readstate(repo) currentparent, wantnull = repo.dirstate.parents() parentctx = repo[parentctxnode] # discover any nodes the user has added in the interim newchildren = [ c for c in parentctx.children() if c.node() not in existing ] action, currentnode = rules.pop(0) while newchildren: if action in [ 'f', 'fold', ]: tmpnodes.extend([n.node() for n in newchildren]) else: created.extend([n.node() for n in newchildren]) newchildren = filter( lambda x: x.node() not in existing, reduce(lambda x, y: x + y, map(lambda r: r.children(), newchildren))) m, a, r, d = repo.status()[:4] oldctx = repo[currentnode] message = oldctx.description() if action in ('e', 'edit', 'm', 'mess'): message = ui.edit(message, ui.username()) elif action in ( 'f', 'fold', ): message = 'fold-temp-revision %s' % currentnode new = None if m or a or r or d: new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) if action in ('f', 'fold'): if new: tmpnodes.append(new) else: new = newchildren[-1] ( parentctx, created_, replaced_, tmpnodes_, ) = finishfold(ui, repo, parentctx, oldctx, new, opts, newchildren) replaced.extend(replaced_) created.extend(created_) tmpnodes.extend(tmpnodes_) elif action not in ('d', 'drop'): if new != oldctx.node(): replaced.append(oldctx.node()) if new: if new != oldctx.node(): created.append(new) parentctx = repo[new] elif opts.get('abort', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --abort') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, replacemap) = readstate(repo) ui.debug('restore wc to old tip %s\n' % node.hex(tip)) hg.clean(repo, tip) ui.debug('should strip created nodes %s\n' % ', '.join([node.hex(n)[:12] for n in created])) ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for nodes in ( created, tmpnodes, ): for n in reversed(nodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) return else: bailifchanged(repo) if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise util.Abort('history edit already in progress, try ' '--continue or --abort') tip, empty = repo.dirstate.parents() if len(parent) != 1: raise util.Abort('requires exactly one parent revision') parent = _revsingle(repo, parent[0]).node() keep = opts.get('keep', False) revs = between(repo, parent, tip, keep) ctxs = [repo[r] for r in revs] existing = [r.node() for r in ctxs] rules = opts.get('commands', '') if not rules: rules = '\n'.join([makedesc(c) for c in ctxs]) rules += editcomment % ( node.hex(parent)[:12], node.hex(tip)[:12], ) rules = ui.edit(rules, ui.username()) # Save edit rules in .hg/histedit-last-edit.txt in case # the user needs to ask for help after something # surprising happens. f = open(repo.join('histedit-last-edit.txt'), 'w') f.write(rules) f.close() else: f = open(rules) rules = f.read() f.close() rules = [ l for l in (r.strip() for r in rules.splitlines()) if l and not l[0] == '#' ] rules = verifyrules(rules, repo, ctxs) parentctx = repo[parent].parents()[0] keep = opts.get('keep', False) replaced = [] replacemap = {} tmpnodes = [] created = [] while rules: writestate(repo, parentctx.node(), created, replaced, tmpnodes, existing, rules, keep, tip, replacemap) action, ha = rules.pop(0) ( parentctx, created_, replaced_, tmpnodes_, ) = actiontable[action](ui, repo, parentctx, ha, opts) hexshort = lambda x: node.hex(x)[:12] if replaced_: clen, rlen = len(created_), len(replaced_) if clen == rlen == 1: ui.debug('histedit: exact replacement of %s with %s\n' % (hexshort(replaced_[0]), hexshort(created_[0]))) replacemap[replaced_[0]] = created_[0] elif clen > rlen: assert rlen == 1, ('unexpected replacement of ' '%d changes with %d changes' % (rlen, clen)) # made more changesets than we're replacing # TODO synthesize patch names for created patches replacemap[replaced_[0]] = created_[-1] ui.debug('histedit: created many, assuming %s replaced by %s' % (hexshort(replaced_[0]), hexshort(created_[-1]))) elif rlen > clen: if not created_: # This must be a drop. Try and put our metadata on # the parent change. assert rlen == 1 r = replaced_[0] ui.debug('histedit: %s seems replaced with nothing, ' 'finding a parent\n' % (hexshort(r))) pctx = repo[r].parents()[0] if pctx.node() in replacemap: ui.debug('histedit: parent is already replaced\n') replacemap[r] = replacemap[pctx.node()] else: replacemap[r] = pctx.node() ui.debug('histedit: %s best replaced by %s\n' % (hexshort(r), hexshort(replacemap[r]))) else: assert len(created_) == 1 for r in replaced_: ui.debug('histedit: %s replaced by %s\n' % (hexshort(r), hexshort(created_[0]))) replacemap[r] = created_[0] else: assert False, ('Unhandled case in replacement mapping! ' 'replacing %d changes with %d changes' % (rlen, clen)) created.extend(created_) replaced.extend(replaced_) tmpnodes.extend(tmpnodes_) hg.update(repo, parentctx.node()) if not keep: if replacemap: ui.note('histedit: Should update metadata for the following ' 'changes:\n') for old, new in replacemap.iteritems(): if old in tmpnodes or old in created: # can't have any metadata we'd want to update continue while new in replacemap: new = replacemap[new] ui.note('histedit: %s to %s\n' % (hexshort(old), hexshort(new))) octx = repo[old] if bookmarks is not None: marks = octx.bookmarks() if marks: ui.note('histedit: moving bookmarks %s\n' % ', '.join(marks)) for mark in marks: repo._bookmarks[mark] = new bookmarks.write(repo) # TODO update mq state ui.debug('should strip replaced nodes %s\n' % ', '.join([node.hex(n)[:12] for n in replaced])) for n in sorted(replaced, key=lambda x: repo[x].rev()): try: repair.strip(ui, repo, n) except error.LookupError: pass ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for n in reversed(tmpnodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) if os.path.exists(repo.sjoin('undo')): os.unlink(repo.sjoin('undo'))
def histedit(ui, repo, *parent, **opts): """hg histedit <parent> """ # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) if mq and mq.applied: raise util.Abort(_('source has mq patches applied')) parent = list(parent) + opts.get('rev', []) if opts.get('outgoing'): if len(parent) > 1: raise util.Abort('only one repo argument allowed with --outgoing') elif parent: parent = parent[0] dest = ui.expandpath(parent or 'default-push', parent or 'default') dest, revs = hg.parseurl(dest, None)[:2] if isinstance(revs, tuple): # hg >= 1.6 revs, checkout = hg.addbranchrevs(repo, repo, revs, None) other = hg.repository(hg.remoteui(repo, opts), dest) # hg >= 1.9 findoutgoing = getattr(discovery, 'findoutgoing', None) if findoutgoing is None: if getattr(discovery, 'outgoing', None) is not None: def findoutgoing(repo, other, force=False): out = discovery.findcommonoutgoing(repo, other, [], force=force) return out.missing[0:1] else: # hg 1.9 and 2.0 def findoutgoing(repo, other, force=False): common, outheads = discovery.findcommonoutgoing( repo, other, [], force=force) return repo.changelog.findmissing(common, outheads)[0:1] else: other = hg.repository(ui, dest) def findoutgoing(repo, other, force=False): return repo.findoutgoing(other, force=force) if revs: revs = [repo.lookup(rev) for rev in revs] ui.status(_('comparing with %s\n') % hidepassword(dest)) parent = findoutgoing(repo, other, force=opts.get('force')) else: if opts.get('force'): raise util.Abort('--force only allowed with --outgoing') if opts.get('continue', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --continue') ( parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, ) = readstate(repo) currentparent, wantnull = repo.dirstate.parents() parentctx = repo[parentctxnode] # discover any nodes the user has added in the interim newchildren = [ c for c in parentctx.children() if c.node() not in existing ] action, currentnode = rules.pop(0) while newchildren: if action in [ 'f', 'fold', ]: tmpnodes.extend([n.node() for n in newchildren]) else: created.extend([n.node() for n in newchildren]) newchildren = filter( lambda x: x.node() not in existing, reduce(lambda x, y: x + y, map(lambda r: r.children(), newchildren))) m, a, r, d = repo.status()[:4] oldctx = repo[currentnode] message = oldctx.description() if action in ('e', 'edit', 'm', 'mess'): message = ui.edit(message, ui.username()) elif action in ( 'f', 'fold', ): message = 'fold-temp-revision %s' % currentnode new = None if m or a or r or d: new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) if action in ('f', 'fold'): if new: tmpnodes.append(new) else: new = newchildren[-1] ( parentctx, created_, replaced_, tmpnodes_, ) = finishfold(ui, repo, parentctx, oldctx, new, opts, newchildren) replaced.extend(replaced_) created.extend(created_) tmpnodes.extend(tmpnodes_) elif action not in ('d', 'drop'): if new != oldctx.node(): replaced.append(oldctx.node()) if new: if new != oldctx.node(): created.append(new) parentctx = repo[new] elif opts.get('abort', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --abort') ( parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, ) = readstate(repo) ui.debug('restore wc to old tip %s\n' % node.hex(tip)) hg.clean(repo, tip) ui.debug('should strip created nodes %s\n' % ', '.join([node.hex(n)[:12] for n in created])) ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for nodes in ( created, tmpnodes, ): for n in reversed(nodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) return else: bailifchanged(repo) if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise util.Abort('history edit already in progress, try ' '--continue or --abort') tip, empty = repo.dirstate.parents() if len(parent) != 1: raise util.Abort('requires exactly one parent revision') parent = _revsingle(repo, parent[0]).node() keep = opts.get('keep', False) revs = between(repo, parent, tip, keep) ctxs = [repo[r] for r in revs] existing = [r.node() for r in ctxs] rules = opts.get('commands', '') if not rules: rules = '\n'.join([makedesc(c) for c in ctxs]) rules += editcomment % ( node.hex(parent)[:12], node.hex(tip)[:12], ) rules = ui.edit(rules, ui.username()) # Save edit rules in .hg/histedit-last-edit.txt in case # the user needs to ask for help after something # surprising happens. f = open(repo.join('histedit-last-edit.txt'), 'w') f.write(rules) f.close() else: f = open(rules) rules = f.read() f.close() rules = [ l for l in (r.strip() for r in rules.splitlines()) if l and not l[0] == '#' ] rules = verifyrules(rules, repo, ctxs) parentctx = repo[parent].parents()[0] keep = opts.get('keep', False) replaced = [] tmpnodes = [] created = [] while rules: writestate(repo, parentctx.node(), created, replaced, tmpnodes, existing, rules, keep, tip) action, ha = rules.pop(0) ( parentctx, created_, replaced_, tmpnodes_, ) = actiontable[action](ui, repo, parentctx, ha, opts) created.extend(created_) replaced.extend(replaced_) tmpnodes.extend(tmpnodes_) hg.update(repo, parentctx.node()) if not keep: ui.debug('should strip replaced nodes %s\n' % ', '.join([node.hex(n)[:12] for n in replaced])) for n in sorted(replaced, lambda x, y: cmp(repo[x].rev(), repo[y].rev())): try: repair.strip(ui, repo, n) except error.LookupError: pass ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for n in reversed(tmpnodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state'))
def histedit(ui, repo, *parent, **opts): """hg histedit <parent> """ # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) if mq and mq.applied: raise util.Abort(_('source has mq patches applied')) parent = list(parent) + opts.get('rev', []) if opts.get('outgoing'): if len(parent) > 1: raise util.Abort('only one repo argument allowed with --outgoing') elif parent: parent = parent[0] dest = ui.expandpath(parent or 'default-push', parent or 'default') dest, revs = hg.parseurl(dest, None)[:2] if isinstance(revs, tuple): # python >= 1.6 revs, checkout = hg.addbranchrevs(repo, repo, revs, None) other = hg.repository(hg.remoteui(repo, opts), dest) findoutgoing = discovery.findoutgoing else: other = hg.repository(ui, dest) def findoutgoing(repo, other, force=False): return repo.findoutgoing(other, force=force) if revs: revs = [repo.lookup(rev) for rev in revs] ui.status(_('comparing with %s\n') % url.hidepassword(dest)) parent = findoutgoing(repo, other, force=opts.get('force')) else: if opts.get('force'): raise util.Abort('--force only allowed with --outgoing') if opts.get('continue', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --continue') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, ) = readstate(repo) currentparent, wantnull = repo.dirstate.parents() parentctx = repo[parentctxnode] # discover any nodes the user has added in the interim newchildren = [c for c in parentctx.children() if c.node() not in existing] action, currentnode = rules.pop(0) while newchildren: if action in ['f', 'fold', ]: tmpnodes.extend([n.node() for n in newchildren]) else: created.extend([n.node() for n in newchildren]) newchildren = filter(lambda x: x.node() not in existing, reduce(lambda x, y: x + y, map(lambda r: r.children(), newchildren))) m, a, r, d = repo.status()[:4] oldctx = repo[currentnode] message = oldctx.description() if action in ('e', 'edit', ): message = ui.edit(message, ui.username()) elif action in ('f', 'fold', ): message = 'fold-temp-revision %s' % currentnode new = None if m or a or r or d: new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) if action in ('e', 'edit', 'p', 'pick', ): if new != oldctx.node(): replaced.append(oldctx.node()) if new: if new != oldctx.node(): created.append(new) parentctx = repo[new] else: # fold if new: tmpnodes.append(new) else: new = newchildren[-1] (parentctx, created_, replaced_, tmpnodes_, ) = finishfold(ui, repo, parentctx, oldctx, new, opts, newchildren) replaced.extend(replaced_) created.extend(created_) tmpnodes.extend(tmpnodes_) elif opts.get('abort', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --abort') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, ) = readstate(repo) ui.debug('restore wc to old tip %s\n' % node.hex(tip)) hg.clean(repo, tip) ui.debug('should strip created nodes %s\n' % ', '.join([node.hex(n)[:12] for n in created])) ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for nodes in (created, tmpnodes, ): for n in reversed(nodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) return else: cmdutil.bail_if_changed(repo) if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise util.Abort('history edit already in progress, try ' '--continue or --abort') tip, empty = repo.dirstate.parents() if len(parent) != 1: raise util.Abort('requires exactly one parent revision') parent = _revsingle(repo, parent[0]).node() keep = opts.get('keep', False) revs = between(repo, parent, tip, keep) ctxs = [repo[r] for r in revs] existing = [r.node() for r in ctxs] rules = opts.get('commands', '') if not rules: rules = '\n'.join([('pick %s %s' % ( c.hex()[:12], c.description().splitlines()[0]))[:80] for c in ctxs]) rules += editcomment % (node.hex(parent)[:12], node.hex(tip)[:12], ) rules = ui.edit(rules, ui.username()) else: f = open(rules) rules = f.read() f.close() rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l[0] == '#'] rules = verifyrules(rules, repo, ctxs) parentctx = repo[parent].parents()[0] keep = opts.get('keep', False) replaced = [] tmpnodes = [] created = [] while rules: writestate(repo, parentctx.node(), created, replaced, tmpnodes, existing, rules, keep, tip) action, ha = rules.pop(0) (parentctx, created_, replaced_, tmpnodes_, ) = actiontable[action](ui, repo, parentctx, ha, opts) created.extend(created_) replaced.extend(replaced_) tmpnodes.extend(tmpnodes_) hg.update(repo, parentctx.node()) if not keep: ui.debug('should strip replaced nodes %s\n' % ', '.join([node.hex(n)[:12] for n in replaced])) for n in sorted(replaced, lambda x, y: cmp(repo[x].rev(), repo[y].rev())): try: repair.strip(ui, repo, n) except error.LookupError: pass ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for n in reversed(tmpnodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state'))
def backups(ui, repo, *pats, **opts): '''lists the changesets available in backup bundles Without any arguments, this command prints a list of the changesets in each backup bundle. --recover takes a changeset hash and unbundles the first bundle that contains that hash, which puts that changeset back in your repository. --verbose will print the entire commit message and the bundle path for that backup. ''' supportsmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt) if supportsmarkers and ui.configbool('backups', 'warnobsolescence', True): # Warn users of obsolescence markers that they probably don't want to # use backups but reflog instead ui.warn(msgwithcreatermarkers) backuppath = repo.vfs.join("strip-backup") backups = filter(os.path.isfile, glob.glob(backuppath + "/*.hg")) backups.sort(key=lambda x: os.path.getmtime(x), reverse=True) opts['bundle'] = '' opts['force'] = None if util.safehasattr(cmdutil, 'loglimit'): # legacy case loglimit = cmdutil.loglimit show_changeset = cmdutil.show_changeset else: # since core commit c8e2d6ed1f9e from mercurial import logcmdutil loglimit = logcmdutil.getlimit show_changeset = logcmdutil.changesetdisplayer def display(other, chlist, displayer): limit = loglimit(opts) if opts.get('newest_first'): chlist.reverse() count = 0 for n in chlist: if limit is not None and count >= limit: break parents = [p for p in other.changelog.parents(n) if p != nullid] if opts.get('no_merges') and len(parents) == 2: continue count += 1 displayer.show(other[n]) recovernode = opts.get('recover') if recovernode: if scmutil.isrevsymbol(repo, recovernode): ui.warn(_("%s already exists in the repo\n") % recovernode) return else: msg = _('Recover changesets using: hg backups --recover ' '<changeset hash>\n\nAvailable backup changesets:') ui.status(msg, label="status.removed") for backup in backups: # Much of this is copied from the hg incoming logic source = os.path.relpath(backup, pycompat.getcwd()) source = ui.expandpath(source) source, branches = hg.parseurl(source, opts.get('branch')) try: other = hg.peer(repo, opts, source) except error.LookupError as ex: msg = _("\nwarning: unable to open bundle %s") % source hint = _("\n(missing parent rev %s)\n") % short(ex.name) ui.warn(msg) ui.warn(hint) continue revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev')) if revs: revs = [other.lookup(rev) for rev in revs] quiet = ui.quiet try: ui.quiet = True other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other, revs, opts["bundle"], opts["force"]) except error.LookupError: continue finally: ui.quiet = quiet try: if chlist: if recovernode: tr = lock = None try: lock = repo.lock() if scmutil.isrevsymbol(other, recovernode): ui.status(_("Unbundling %s\n") % (recovernode)) f = hg.openpath(ui, source) gen = exchange.readbundle(ui, f, source) tr = repo.transaction("unbundle") if not isinstance(gen, bundle2.unbundle20): gen.apply(repo, 'unbundle', 'bundle:' + source) if isinstance(gen, bundle2.unbundle20): bundle2.applybundle(repo, gen, tr, source='unbundle', url='bundle:' + source) tr.close() break finally: lockmod.release(lock, tr) else: backupdate = os.path.getmtime(source) backupdate = time.strftime('%a %H:%M, %Y-%m-%d', time.localtime(backupdate)) ui.status("\n%s\n" % (backupdate.ljust(50))) if not ui.verbose: opts['template'] = verbosetemplate else: ui.status("%s%s\n" % ("bundle:".ljust(13), source)) displayer = show_changeset(ui, other, opts, False) display(other, chlist, displayer) displayer.close() finally: cleanupfn()