def _pull(orig, ui, repo, *args, **opts): if not obsolete.isenabled(repo, obsolete.createmarkersopt): return orig(ui, repo, *args, **opts) maxrevbeforepull = len(repo.changelog) r = orig(ui, repo, *args, **opts) maxrevafterpull = len(repo.changelog) # Collect the diff number of the landed diffs landeddiffs = {} for rev in range(maxrevbeforepull, maxrevafterpull): n = repo[rev] if n.phase() == phases.public: diff = getdiff(n) if diff is not None: landeddiffs[diff] = n if not landeddiffs: return r # Try to find match with the drafts tocreate = [] unfiltered = repo.unfiltered() for rev in unfiltered.revs("draft() - obsolete()"): n = unfiltered[rev] diff = getdiff(n) if diff in landeddiffs and landeddiffs[diff].rev() != n.rev(): tocreate.append((n, (landeddiffs[diff],))) if not tocreate: return r with unfiltered.lock(), unfiltered.transaction('pullcreatemarkers'): obsolete.createmarkers(unfiltered, tocreate) return r
def catcommit(ui, repo, n, prefix, ctx=None): nlprefix = '\n' + prefix if ctx is None: ctx = repo[n] # use ctx.node() instead ?? ui.write(("tree %s\n" % short(ctx.changeset()[0]))) for p in ctx.parents(): ui.write(("parent %s\n" % p)) date = ctx.date() description = ctx.description().replace("\0", "") ui.write(("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1]))) if 'committer' in ctx.extra(): ui.write(("committer %s\n" % ctx.extra()['committer'])) ui.write(("revision %d\n" % ctx.rev())) ui.write(("branch %s\n" % ctx.branch())) if obsolete.isenabled(repo, obsolete.createmarkersopt): if ctx.obsolete(): ui.write(("obsolete\n")) ui.write(("phase %s\n\n" % ctx.phasestr())) if prefix != "": ui.write("%s%s\n" % (prefix, description.replace('\n', nlprefix).strip())) else: ui.write(description + "\n") if prefix: ui.write('\0')
def clearrebased(ui, repo, state, skipped, collapsedas=None): """dispose of rebased revision at the end of the rebase If `collapsedas` is not None, the rebase was a collapse whose result if the `collapsedas` node.""" if obsolete.isenabled(repo, obsolete.createmarkersopt): markers = [] for rev, newrev in sorted(state.items()): if newrev >= 0: if rev in skipped: succs = () elif collapsedas is not None: succs = (repo[collapsedas],) else: succs = (repo[newrev],) markers.append((repo[rev], succs)) if markers: obsolete.createmarkers(repo, markers) else: rebased = [rev for rev in state if state[rev] > nullmerge] if rebased: stripped = [] for root in repo.set('roots(%ld)', rebased): if set(repo.changelog.descendants([root.rev()])) - set(state): ui.warn(_("warning: new changesets detected " "on source branch, not stripping\n")) else: stripped.append(root.node()) if stripped: # backup the old csets by default repair.strip(ui, repo, stripped, "all")
def capabilities(orig, repo, proto): """wrapper to advertise new capability""" caps = orig(repo, proto) if obsolete.isenabled(repo, obsolete.exchangeopt): caps += ' _push_experiment_pushobsmarkers_0' caps += ' _push_experiment_notifypushend_0' return caps
def _buildobsolete(replacements, oldrepo, newrepo): 'adds obsolete markers in replacements if enabled in newrepo' if obsolete.isenabled(newrepo, obsolete.createmarkersopt): markers = [(oldrepo[oldrev], (newrepo[newrev],)) for oldrev, newrev in replacements.items() if newrev != oldrev] obsolete.createmarkers(newrepo, markers)
def _addpushbackobsolete(repo, reply, newrevs): '''adds obsoletion markers to reply that are relevant to newrevs (if enabled)''' if (obsolete.isenabled(repo, obsolete.exchangeopt) and repo.obsstore): try: markers = repo.obsstore.relevantmarkers(newrevs) exchange.buildobsmarkerspart(reply, markers) except ValueError, exc: repo.ui.status(_("can't send obsolete markers: %s") % exc.message)
def _buildobsolete(replacements, oldrepo, newrepo, date): '''return obsmarkers, add them locally (server-side) if obsstore enabled''' markers = [(oldrepo[oldrev], (newrepo[newrev],), {'operation': 'push', 'user': newrepo[newrev].user()}) for oldrev, newrev in replacements.items() if newrev != oldrev] if obsolete.isenabled(newrepo, obsolete.createmarkersopt): obsolete.createmarkers(newrepo, markers, date=date) return markers
def syncpush(orig, repo, remote): """wraper for obsolete.syncpush to use the fast way if possible""" if not (obsolete.isenabled(repo, obsolete.exchangeopt) and repo.obsstore): return if remote.capable('_push_experiment_pushobsmarkers_0'): return # already pushed before changeset remote.push_experiment_pushobsmarkers_0(obsfp) return return orig(repo, remote)
def capabilities(orig, repo, proto): """wrapper to advertise new capability""" caps = orig(repo, proto) advertise = repo.ui.configbool('__temporary__', 'advertiseobsolete', True) if obsolete.isenabled(repo, obsolete.exchangeopt) and advertise: caps += ' _evoext_pushobsmarkers_0' caps += ' _evoext_pullobsmarkers_0' caps += ' _evoext_obshash_0' caps += ' _evoext_obshash_1' caps += ' _evoext_getbundle_obscommon' return caps
def between(repo, old, new, keep): """select and validate the set of revision to edit When keep is false, the specified set can't have children.""" ctxs = list(repo.set('%n::%n', old, new)) if ctxs and not keep: if (not obsolete.isenabled(repo, obsolete.allowunstableopt) and repo.revs('(%ld::) - (%ld)', ctxs, ctxs)): raise util.Abort(_('cannot edit history that would orphan nodes')) if repo.revs('(%ld) and merge()', ctxs): raise util.Abort(_('cannot edit history that contains merges')) root = ctxs[0] # list is already sorted by repo.set if not root.mutable(): raise util.Abort(_('cannot edit immutable changeset: %s') % root) return [c.node() for c in ctxs]
def augmented_push(orig, repo, remote, *args, **kwargs): """push wrapped that call the wire protocol command""" if not remote.canpush(): raise error.Abort(_("destination does not support push")) if (obsolete.isenabled(repo, obsolete.exchangeopt) and repo.obsstore and remote.capable('_push_experiment_pushobsmarkers_0')): # push marker early to limit damage of pushing too early. try: obsfp = repo.svfs('obsstore') except IOError as e: if e.errno != errno.ENOENT: raise else: remote.push_experiment_pushobsmarkers_0(obsfp) ret = orig(repo, remote, *args, **kwargs) if remote.capable('_push_experiment_notifypushend_0'): remote.push_experiment_notifypushend_0() return ret
def createrebasepart(repo, peer, outgoing, onto, newhead): if not outgoing.missing: raise error.Abort(_('no changesets to rebase')) if rebaseparttype not in bundle2.bundle2caps(peer): raise error.Abort(_('no server support for %r') % rebaseparttype) validaterevset(repo, revsetlang.formatspec('%ln', outgoing.missing)) cg = changegroup.makestream(repo, outgoing, '01', 'push') # Explicitly notify the server what obsmarker versions the client supports # so the client could receive marker from the server. # # The core mercurial logic will do the right thing (enable obsmarker # capabilities in the pushback bundle) if obsmarker exchange is enabled # client-side. # # But we want the marker without enabling marker exchange, and our server # could reply a marker without exchange or even obsstore enabled. So we # bypass the "standard" way of capabilities check by sending the supported # versions directly in our own part. Note: do not enable "exchange" because # it has an unwanted side effect: pushing markers from client to server. # # "createmarkers" is all we need to be able to write a new marker. if obsolete.isenabled(repo, obsolete.createmarkersopt): obsmarkerversions = '\0'.join(str(v) for v in obsolete.formats) else: obsmarkerversions = '' # .upper() marks this as a mandatory part: server will abort if there's no # handler return bundle2.bundlepart( rebaseparttype.upper(), mandatoryparams={ 'onto': onto, 'newhead': repr(newhead), }.items(), advisoryparams={ # advisory: (old) server could ignore this without error 'obsmarkerversions': obsmarkerversions, }.items(), data = cg)
def _useobsolete(self): """() -> bool""" return obsolete.isenabled(self.repo, obsolete.createmarkersopt)
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history (the source) onto another (the destination). This can be useful for linearizing *local* changes relative to a master development tree. You should not rebase changesets that have already been shared with others. Doing so will force everybody else to perform the same rebase or they will end up with duplicated changesets after pulling in your rebased changesets. In its default configuration, Mercurial will prevent you from rebasing published changes. See :hg:`help phases` for details. If you don't specify a destination changeset (``-d/--dest``), rebase uses the current branch tip as the destination. (The destination changeset is not modified by rebasing, but new changesets are added as its descendants.) You can specify which changesets to rebase in two ways: as a "source" changeset or as a "base" changeset. Both are shorthand for a topologically related set of changesets (the "source branch"). If you specify source (``-s/--source``), rebase will rebase that changeset and all of its descendants onto dest. If you specify base (``-b/--base``), rebase will select ancestors of base back to but not including the common ancestor with dest. Thus, ``-b`` is less precise but more convenient than ``-s``: you can specify any changeset in the source branch, and rebase will select the whole branch. If you specify neither ``-s`` nor ``-b``, rebase uses the parent of the working directory as the base. For advanced usage, a third way is available through the ``--rev`` option. It allows you to specify an arbitrary set of changesets to rebase. Descendants of revs you specify with this option are not automatically included in the rebase. By default, rebase recreates the changesets in the source branch as descendants of dest and then destroys the originals. Use ``--keep`` to preserve the original source changesets. Some changesets in the source branch (e.g. merges from the destination branch) may be dropped if they no longer contribute any change. One result of the rules for selecting the destination changeset and source branch is that, unlike ``merge``, rebase will do nothing if you are at the branch tip of a named branch with two heads. You need to explicitly specify source and/or destination (or ``update`` to the other head, if it's the head of the intended source branch). If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. .. container:: verbose Examples: - move "local changes" (current commit back to branching point) to the current branch tip after a pull:: hg rebase - move a single changeset to the stable branch:: hg rebase -r 5f493448 -d stable - splice a commit and all its descendants onto another part of history:: hg rebase --source c0c3 --dest 4cf9 - rebase everything on a branch marked by a bookmark onto the default branch:: hg rebase --base myfeature --dest default - collapse a sequence of changes into a single commit:: hg rebase --collapse -r 1520:1525 -d . - move a named branch while preserving its name:: hg rebase -r "branch(featureX)" -d 1.3 --keepbranches Returns 0 on success, 1 if nothing to rebase or there are unresolved conflicts. """ originalwd = target = None activebookmark = None external = nullrev # Mapping between the old revision id and either what is the new rebased # revision or what needs to be done with the old revision. The state dict # will be what contains most of the rebase progress state. state = {} skipped = set() targetancestors = set() lock = wlock = None try: wlock = repo.wlock() lock = repo.lock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) revf = opts.get('rev', []) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) collapsemsg = cmdutil.logmessage(ui, opts) e = opts.get('extrafn') # internal, used by e.g. hgsubversion extrafns = [_savegraft] if e: extrafns = [e] keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) # keepopen is not meant for use on the command line, but by # other extensions keepopen = opts.get('keepopen', False) if opts.get('interactive'): try: if extensions.find('histedit'): enablehistedit = '' except KeyError: enablehistedit = " --config extensions.histedit=" help = "hg%s help -e histedit" % enablehistedit msg = _("interactive history editing is supported by the " "'histedit' extension (see \"%s\")") % help raise error.Abort(msg) if collapsemsg and not collapsef: raise error.Abort( _('message can only be specified with collapse')) if contf or abortf: if contf and abortf: raise error.Abort(_('cannot use both abort and continue')) if collapsef: raise error.Abort( _('cannot use collapse with continue or abort')) if srcf or basef or destf: raise error.Abort( _('abort and continue do not allow specifying revisions')) if abortf and opts.get('tool', False): ui.warn(_('tool option will be ignored\n')) try: (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external, activebookmark) = restorestatus(repo) except error.RepoLookupError: if abortf: clearstatus(repo) repo.ui.warn(_('rebase aborted (no revision is removed,' ' only broken state is cleared)\n')) return 0 else: msg = _('cannot continue inconsistent rebase') hint = _('use "hg rebase --abort" to clear broken state') raise error.Abort(msg, hint=hint) if abortf: return abort(repo, originalwd, target, state, activebookmark=activebookmark) else: if srcf and basef: raise error.Abort(_('cannot specify both a ' 'source and a base')) if revf and basef: raise error.Abort(_('cannot specify both a ' 'revision and a base')) if revf and srcf: raise error.Abort(_('cannot specify both a ' 'revision and a source')) cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) if destf: dest = scmutil.revsingle(repo, destf) else: dest = repo[_destrebase(repo)] destf = str(dest) if revf: rebaseset = scmutil.revrange(repo, revf) if not rebaseset: ui.status(_('empty "rev" revision set - ' 'nothing to rebase\n')) return _nothingtorebase() elif srcf: src = scmutil.revrange(repo, [srcf]) if not src: ui.status(_('empty "source" revision set - ' 'nothing to rebase\n')) return _nothingtorebase() rebaseset = repo.revs('(%ld)::', src) assert rebaseset else: base = scmutil.revrange(repo, [basef or '.']) if not base: ui.status(_('empty "base" revision set - ' "can't compute rebase set\n")) return _nothingtorebase() commonanc = repo.revs('ancestor(%ld, %d)', base, dest).first() if commonanc is not None: rebaseset = repo.revs('(%d::(%ld) - %d)::', commonanc, base, commonanc) else: rebaseset = [] if not rebaseset: # transform to list because smartsets are not comparable to # lists. This should be improved to honor laziness of # smartset. if list(base) == [dest.rev()]: if basef: ui.status(_('nothing to rebase - %s is both "base"' ' and destination\n') % dest) else: ui.status(_('nothing to rebase - working directory ' 'parent is also destination\n')) elif not repo.revs('%ld - ::%d', base, dest): if basef: ui.status(_('nothing to rebase - "base" %s is ' 'already an ancestor of destination ' '%s\n') % ('+'.join(str(repo[r]) for r in base), dest)) else: ui.status(_('nothing to rebase - working ' 'directory parent is already an ' 'ancestor of destination %s\n') % dest) else: # can it happen? ui.status(_('nothing to rebase from %s to %s\n') % ('+'.join(str(repo[r]) for r in base), dest)) return _nothingtorebase() allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt) if (not (keepf or allowunstable) and repo.revs('first(children(%ld) - %ld)', rebaseset, rebaseset)): raise error.Abort( _("can't remove original changesets with" " unrebased descendants"), hint=_('use --keep to keep original changesets')) obsoletenotrebased = {} if ui.configbool('experimental', 'rebaseskipobsolete'): rebasesetrevs = set(rebaseset) obsoletenotrebased = _computeobsoletenotrebased(repo, rebasesetrevs, dest) # - plain prune (no successor) changesets are rebased # - split changesets are not rebased if at least one of the # changeset resulting from the split is an ancestor of dest rebaseset = rebasesetrevs - set(obsoletenotrebased) result = buildstate(repo, dest, rebaseset, collapsef, obsoletenotrebased) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return _nothingtorebase() root = min(rebaseset) if not keepf and not repo[root].mutable(): raise error.Abort(_("can't rebase public changeset %s") % repo[root], hint=_('see "hg help phases" for details')) originalwd, target, state = result if collapsef: targetancestors = repo.changelog.ancestors([target], inclusive=True) external = externalparent(repo, state, targetancestors) if dest.closesbranch() and not keepbranchesf: ui.status(_('reopening closed branch head %s\n') % dest) if keepbranchesf: # insert _savebranch at the start of extrafns so if # there's a user-provided extrafn it can clobber branch if # desired extrafns.insert(0, _savebranch) if collapsef: branches = set() for rev in state: branches.add(repo[rev].branch()) if len(branches) > 1: raise error.Abort(_('cannot collapse multiple named ' 'branches')) # Rebase if not targetancestors: targetancestors = repo.changelog.ancestors([target], inclusive=True) # Keep track of the current bookmarks in order to reset them later currentbookmarks = repo._bookmarks.copy() activebookmark = activebookmark or repo._activebookmark if activebookmark: bookmarks.deactivate(repo) extrafn = _makeextrafn(extrafns) sortedstate = sorted(state) total = len(sortedstate) pos = 0 for rev in sortedstate: ctx = repo[rev] desc = '%d:%s "%s"' % (ctx.rev(), ctx, ctx.description().split('\n', 1)[0]) names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node()) if names: desc += ' (%s)' % ' '.join(names) pos += 1 if state[rev] == revtodo: ui.status(_('rebasing %s\n') % desc) ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)), _('changesets'), total) p1, p2, base = defineparents(repo, rev, target, state, targetancestors) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external, activebookmark) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: try: ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'rebase') stats = rebasenode(repo, rev, p1, base, state, collapsef, target) if stats and stats[3] > 0: raise error.InterventionRequired( _('unresolved conflicts (see hg ' 'resolve, then hg rebase --continue)')) finally: ui.setconfig('ui', 'forcemerge', '', 'rebase') if not collapsef: merging = p2 != nullrev editform = cmdutil.mergeeditform(merging, 'rebase') editor = cmdutil.getcommiteditor(editform=editform, **opts) newnode = concludenode(repo, rev, p1, p2, extrafn=extrafn, editor=editor, keepbranches=keepbranchesf) else: # Skip commit if we are collapsing repo.dirstate.beginparentchange() repo.setparents(repo[p1].node()) repo.dirstate.endparentchange() newnode = None # Update the state if newnode is not None: state[rev] = repo[newnode].rev() ui.debug('rebased as %s\n' % short(newnode)) else: if not collapsef: ui.warn(_('note: rebase of %d:%s created no changes ' 'to commit\n') % (rev, ctx)) skipped.add(rev) state[rev] = p1 ui.debug('next revision set to %s\n' % p1) elif state[rev] == nullmerge: ui.debug('ignoring null merge rebase of %s\n' % rev) elif state[rev] == revignored: ui.status(_('not rebasing ignored %s\n') % desc) elif state[rev] == revprecursor: targetctx = repo[obsoletenotrebased[rev]] desctarget = '%d:%s "%s"' % (targetctx.rev(), targetctx, targetctx.description().split('\n', 1)[0]) msg = _('note: not rebasing %s, already in destination as %s\n') ui.status(msg % (desc, desctarget)) else: ui.status(_('already rebased %s as %s\n') % (desc, repo[state[rev]])) ui.progress(_('rebasing'), None) ui.note(_('rebase merging completed\n')) if collapsef and not keepopen: p1, p2, _base = defineparents(repo, min(state), target, state, targetancestors) editopt = opts.get('edit') editform = 'rebase.collapse' if collapsemsg: commitmsg = collapsemsg else: commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] > nullmerge: commitmsg += '\n* %s' % repo[rebased].description() editopt = True editor = cmdutil.getcommiteditor(edit=editopt, editform=editform) newnode = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn, editor=editor, keepbranches=keepbranchesf) if newnode is None: newrev = target else: newrev = repo[newnode].rev() for oldrev in state.iterkeys(): if state[oldrev] > nullmerge: state[oldrev] = newrev if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if currentbookmarks: # Nodeids are needed to reset bookmarks nstate = {} for k, v in state.iteritems(): if v > nullmerge: nstate[repo[k].node()] = repo[v].node() # XXX this is the same as dest.node() for the non-continue path -- # this should probably be cleaned up targetnode = repo[target].node() # restore original working directory # (we do this before stripping) newwd = state.get(originalwd, originalwd) if newwd < 0: # original directory is a parent of rebase set root or ignored newwd = originalwd if newwd not in [c.rev() for c in repo[None].parents()]: ui.note(_("update back to initial working directory parent\n")) hg.updaterepo(repo, newwd, False) if not keepf: collapsedas = None if collapsef: collapsedas = newnode clearrebased(ui, repo, state, skipped, collapsedas) if currentbookmarks: updatebookmarks(repo, targetnode, nstate, currentbookmarks) if activebookmark not in repo._bookmarks: # active bookmark was divergent one and has been deleted activebookmark = None clearstatus(repo) ui.note(_("rebase completed\n")) util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) if (activebookmark and repo['.'].node() == repo._bookmarks[activebookmark]): bookmarks.activate(repo, activebookmark) finally: release(lock, wlock)
def backups(ui, repo, *pats, **opts): '''lists the changesets available in backup bundles Without any arguments, this command prints a list of the changesets in each backup bundle. --recover takes a changeset hash and unbundles the first bundle that contains that hash, which puts that changeset back in your repository. --verbose will print the entire commit message and the bundle path for that backup. ''' supportsmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt) if supportsmarkers and ui.configbool('backups', 'warnobsolescence', True): # Warn users of obsolescence markers that they probably don't want to # use backups but reflog instead ui.warn(msgwithcreatermarkers) backuppath = repo.vfs.join("strip-backup") backups = filter(os.path.isfile, glob.glob(backuppath + "/*.hg")) backups.sort(key=lambda x: os.path.getmtime(x), reverse=True) opts['bundle'] = '' opts['force'] = None if util.safehasattr(cmdutil, 'loglimit'): # legacy case loglimit = cmdutil.loglimit show_changeset = cmdutil.show_changeset else: # since core commit c8e2d6ed1f9e from mercurial import logcmdutil loglimit = logcmdutil.getlimit show_changeset = logcmdutil.changesetdisplayer def display(other, chlist, displayer): limit = loglimit(opts) if opts.get('newest_first'): chlist.reverse() count = 0 for n in chlist: if limit is not None and count >= limit: break parents = [p for p in other.changelog.parents(n) if p != nullid] if opts.get('no_merges') and len(parents) == 2: continue count += 1 displayer.show(other[n]) recovernode = opts.get('recover') if recovernode: if scmutil.isrevsymbol(repo, recovernode): ui.warn(_("%s already exists in the repo\n") % recovernode) return else: msg = _('Recover changesets using: hg backups --recover ' '<changeset hash>\n\nAvailable backup changesets:') ui.status(msg, label="status.removed") for backup in backups: # Much of this is copied from the hg incoming logic source = os.path.relpath(backup, pycompat.getcwd()) source = ui.expandpath(source) source, branches = hg.parseurl(source, opts.get('branch')) try: other = hg.peer(repo, opts, source) except error.LookupError as ex: msg = _("\nwarning: unable to open bundle %s") % source hint = _("\n(missing parent rev %s)\n") % short(ex.name) ui.warn(msg) ui.warn(hint) continue revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev')) if revs: revs = [other.lookup(rev) for rev in revs] quiet = ui.quiet try: ui.quiet = True other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other, revs, opts["bundle"], opts["force"]) except error.LookupError: continue finally: ui.quiet = quiet try: if chlist: if recovernode: tr = lock = None try: lock = repo.lock() if scmutil.isrevsymbol(other, recovernode): ui.status(_("Unbundling %s\n") % (recovernode)) f = hg.openpath(ui, source) gen = exchange.readbundle(ui, f, source) tr = repo.transaction("unbundle") if not isinstance(gen, bundle2.unbundle20): gen.apply(repo, 'unbundle', 'bundle:' + source) if isinstance(gen, bundle2.unbundle20): bundle2.applybundle(repo, gen, tr, source='unbundle', url='bundle:' + source) tr.close() break finally: lockmod.release(lock, tr) else: backupdate = os.path.getmtime(source) backupdate = time.strftime('%a %H:%M, %Y-%m-%d', time.localtime(backupdate)) ui.status("\n%s\n" % (backupdate.ljust(50))) if not ui.verbose: opts['template'] = verbosetemplate else: ui.status("%s%s\n" % ("bundle:".ljust(13), source)) displayer = show_changeset(ui, other, opts, False) display(other, chlist, displayer) displayer.close() finally: cleanupfn()
def split(ui, repo, *revs, **opts): """split a changeset into smaller ones Repeatedly prompt changes and commit message for new changesets until there is nothing left in the original changeset. If --rev was not given, split the working directory parent. By default, rebase connected non-obsoleted descendants onto the new changeset. Use --no-rebase to avoid the rebase. """ opts = pycompat.byteskwargs(opts) revlist = [] if opts.get('rev'): revlist.append(opts.get('rev')) revlist.extend(revs) with repo.wlock(), repo.lock(), repo.transaction('split') as tr: revs = scmutil.revrange(repo, revlist or ['.']) if len(revs) > 1: raise error.Abort(_('cannot split multiple revisions')) rev = revs.first() ctx = repo[rev] if rev is None or ctx.node() == nullid: ui.status(_('nothing to split\n')) return 1 if ctx.node() is None: raise error.Abort(_('cannot split working directory')) # rewriteutil.precheck is not very useful here because: # 1. null check is done above and it's more friendly to return 1 # instead of abort # 2. mergestate check is done below by cmdutil.bailifchanged # 3. unstable check is more complex here because of --rebase # # So only "public" check is useful and it's checked directly here. if ctx.phase() == phases.public: raise error.Abort(_('cannot split public changeset'), hint=_("see 'hg help phases' for details")) descendants = list(repo.revs('(%d::) - (%d)', rev, rev)) alloworphaned = obsolete.isenabled(repo, obsolete.allowunstableopt) if opts.get('rebase'): # Skip obsoleted descendants and their descendants so the rebase # won't cause conflicts for sure. torebase = list( repo.revs('%ld - (%ld & obsolete())::', descendants, descendants)) if not alloworphaned and len(torebase) != len(descendants): raise error.Abort( _('split would leave orphaned changesets ' 'behind')) else: if not alloworphaned and descendants: raise error.Abort( _('cannot split changeset with children without rebase')) torebase = () if len(ctx.parents()) > 1: raise error.Abort(_('cannot split a merge changeset')) cmdutil.bailifchanged(repo) # Deactivate bookmark temporarily so it won't get moved unintentionally bname = repo._activebookmark if bname and repo._bookmarks[bname] != ctx.node(): bookmarks.deactivate(repo) wnode = repo['.'].node() top = None try: top = dosplit(ui, repo, tr, ctx, opts) finally: # top is None: split failed, need update --clean recovery. # wnode == ctx.node(): wnode split, no need to update. if top is None or wnode != ctx.node(): hg.clean(repo, wnode, show_stats=False) if bname: bookmarks.activate(repo, bname) if torebase and top: dorebase(ui, repo, torebase, top)
def _isobsstoreenabled(repo): return obsolete.isenabled(repo, obsolete.createmarkersopt)
def fixupamend(ui, repo): """rebases any children found on the preamend changset and strips the preamend changset """ wlock = None lock = None tr = None try: wlock = repo.wlock() lock = repo.lock() current = repo['.'] # Use obsolescence information to fix up the amend instead of relying # on the preamend bookmark if the user enables this feature. if ui.configbool('fbamend', 'userestack'): with repo.transaction('fixupamend') as tr: try: common.restackonce(ui, repo, current.rev()) except error.InterventionRequired: tr.close() raise return preamendname = _preamendname(repo, current.node()) if not preamendname in repo._bookmarks: raise error.Abort(_('no bookmark %s') % preamendname, hint=_('check if your bookmark is active')) old = repo[preamendname] if old == current: hint = _('please examine smartlog and rebase your changsets ' 'manually') err = _('cannot automatically determine what to rebase ' 'because bookmark "%s" points to the current changset') % \ preamendname raise error.Abort(err, hint=hint) oldbookmarks = old.bookmarks() ui.status(_("rebasing the children of %s\n") % (preamendname)) active = bmactive(repo) opts = { 'rev' : [str(c.rev()) for c in old.descendants()], 'dest' : current.rev() } if opts['rev'] and opts['rev'][0]: rebasemod.rebase(ui, repo, **opts) changes = [] for bookmark in oldbookmarks: changes.append((bookmark, None)) # delete the bookmark tr = repo.transaction('fixupamend') repo._bookmarks.applychanges(repo, tr, changes) if obsolete.isenabled(repo, obsolete.createmarkersopt): tr.close() else: tr.close() repair.strip(ui, repo, old.node(), topic='preamend-backup') merge.update(repo, current.node(), False, True, False) if active: bmactivate(repo, active) finally: lockmod.release(wlock, lock, tr)
def _histedit(ui, repo, state, *freeargs, **opts): # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) if mq and mq.applied: raise util.Abort(_('source has mq patches applied')) # basic argument incompatibility processing outg = opts.get('outgoing') cont = opts.get('continue') editplan = opts.get('edit_plan') abort = opts.get('abort') force = opts.get('force') rules = opts.get('commands', '') revs = opts.get('rev', []) goal = 'new' # This invocation goal, in new, continue, abort if force and not outg: raise util.Abort(_('--force only allowed with --outgoing')) if cont: if util.any((outg, abort, revs, freeargs, rules, editplan)): raise util.Abort(_('no arguments allowed with --continue')) goal = 'continue' elif abort: if util.any((outg, revs, freeargs, rules, editplan)): raise util.Abort(_('no arguments allowed with --abort')) goal = 'abort' elif editplan: if util.any((outg, revs, freeargs)): raise util.Abort(_('only --commands argument allowed with ' '--edit-plan')) goal = 'edit-plan' else: if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise util.Abort(_('history edit already in progress, try ' '--continue or --abort')) if outg: if revs: raise util.Abort(_('no revisions allowed with --outgoing')) if len(freeargs) > 1: raise util.Abort( _('only one repo argument allowed with --outgoing')) else: revs.extend(freeargs) if len(revs) == 0: histeditdefault = ui.config('histedit', 'defaultrev') if histeditdefault: revs.append(histeditdefault) if len(revs) != 1: raise util.Abort( _('histedit requires exactly one ancestor revision')) replacements = [] keep = opts.get('keep', False) # rebuild state if goal == 'continue': state.read() state = bootstrapcontinue(ui, state, opts) elif goal == 'edit-plan': state.read() if not rules: comment = editcomment % (state.parentctx, node.short(state.topmost)) rules = ruleeditor(repo, ui, state.rules, comment) else: if rules == '-': f = sys.stdin else: f = open(rules) rules = f.read() f.close() rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l.startswith('#')] rules = verifyrules(rules, repo, [repo[c] for [_a, c] in state.rules]) state.rules = rules state.write() return elif goal == 'abort': state.read() mapping, tmpnodes, leafs, _ntm = processreplacement(state) ui.debug('restore wc to old parent %s\n' % node.short(state.topmost)) # Recover our old commits if necessary if not state.topmost in repo and state.backupfile: backupfile = repo.join(state.backupfile) f = hg.openpath(ui, backupfile) gen = exchange.readbundle(ui, f, backupfile) changegroup.addchangegroup(repo, gen, 'histedit', 'bundle:' + backupfile) os.remove(backupfile) # check whether we should update away parentnodes = [c.node() for c in repo[None].parents()] for n in leafs | set([state.parentctxnode]): if n in parentnodes: hg.clean(repo, state.topmost) break else: pass cleanupnode(ui, repo, 'created', tmpnodes) cleanupnode(ui, repo, 'temp', leafs) state.clear() return else: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) topmost, empty = repo.dirstate.parents() if outg: if freeargs: remote = freeargs[0] else: remote = None root = findoutgoing(ui, repo, remote, force, opts) else: rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs))) if len(rr) != 1: raise util.Abort(_('The specified revisions must have ' 'exactly one common root')) root = rr[0].node() revs = between(repo, root, topmost, keep) if not revs: raise util.Abort(_('%s is not an ancestor of working directory') % node.short(root)) ctxs = [repo[r] for r in revs] if not rules: comment = editcomment % (node.short(root), node.short(topmost)) rules = ruleeditor(repo, ui, [['pick', c] for c in ctxs], comment) else: if rules == '-': f = sys.stdin else: f = open(rules) rules = f.read() f.close() rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l.startswith('#')] rules = verifyrules(rules, repo, ctxs) parentctxnode = repo[root].parents()[0].node() state.parentctxnode = parentctxnode state.rules = rules state.keep = keep state.topmost = topmost state.replacements = replacements # Create a backup so we can always abort completely. backupfile = None if not obsolete.isenabled(repo, obsolete.createmarkersopt): backupfile = repair._bundle(repo, [parentctxnode], [topmost], root, 'histedit') state.backupfile = backupfile while state.rules: state.write() action, ha = state.rules.pop(0) ui.debug('histedit: processing %s %s\n' % (action, ha[:12])) actobj = actiontable[action].fromrule(state, ha) parentctx, replacement_ = actobj.run() state.parentctxnode = parentctx.node() state.replacements.extend(replacement_) state.write() hg.update(repo, state.parentctxnode) mapping, tmpnodes, created, ntm = processreplacement(state) if mapping: for prec, succs in mapping.iteritems(): if not succs: ui.debug('histedit: %s is dropped\n' % node.short(prec)) else: ui.debug('histedit: %s is replaced by %s\n' % ( node.short(prec), node.short(succs[0]))) if len(succs) > 1: m = 'histedit: %s' for n in succs[1:]: ui.debug(m % node.short(n)) if not keep: if mapping: movebookmarks(ui, repo, mapping, state.topmost, ntm) # TODO update mq state if obsolete.isenabled(repo, obsolete.createmarkersopt): markers = [] # sort by revision number because it sound "right" for prec in sorted(mapping, key=repo.changelog.rev): succs = mapping[prec] markers.append((repo[prec], tuple(repo[s] for s in succs))) if markers: obsolete.createmarkers(repo, markers) else: cleanupnode(ui, repo, 'replaced', mapping) cleanupnode(ui, repo, 'temp', tmpnodes) state.clear() if os.path.exists(repo.sjoin('undo')): os.unlink(repo.sjoin('undo'))
def doreview(repo, ui, remote, nodes): """Do the work of submitting a review to a remote repo. :remote is a peerrepository. :nodes is a list of nodes to review. """ assert nodes assert 'pushreview' in getreviewcaps(remote) bzauth = getbugzillaauth(ui) if not bzauth: ui.warn(_('Bugzilla credentials not available. Not submitting review.\n')) return identifier = None # The review identifier can come from a number of places. In order of # priority: # 1. --reviewid argument passed to push command # 2. The active bookmark # 3. The active branch (if it isn't default) # 4. A bug number extracted from commit messages if repo.reviewid: identifier = repo.reviewid # TODO The server currently requires a bug number for the identifier. # Pull bookmark and branch names in once allowed. #elif repo._bookmarkcurrent: # identifier = repo._bookmarkcurrent #elif repo.dirstate.branch() != 'default': # identifier = repo.dirstate.branch() if not identifier: identifiers = set() for node in nodes: ctx = repo[node] bugs = parse_bugs(ctx.description().split('\n')[0]) if bugs: identifier = 'bz://%s' % bugs[0] identifiers.add(identifier) if len(identifiers) > 1: raise util.Abort('cannot submit reviews referencing multiple ' 'bugs', hint='limit reviewed changesets ' 'with "-c" or "-r" arguments') identifier = ReviewID(identifier) if not identifier: ui.write(_('Unable to determine review identifier. Review ' 'identifiers are extracted from commit messages automatically. ' 'Try to begin one of your commit messages with "Bug XXXXXX -"\n')) return # Append irc nick to review identifier. # This is an ugly workaround to a limitation in ReviewBoard. RB doesn't # really support changing the owner of a review. It is doable, but no # history is stored and this leads to faulty attribution. More details # in bug 1034188. if not identifier.user: ircnick = ui.config('mozilla', 'ircnick', None) identifier.user = ircnick if hasattr(repo, 'mq'): for patch in repo.mq.applied: if patch.node in nodes: ui.warn(_('(You are using mq to develop patches. For the best ' 'code review experience, use bookmark-based development ' 'with changeset evolution. Read more at ' 'http://mozilla-version-control-tools.readthedocs.org/en/latest/mozreview-user.html)\n')) break req = commonrequestdict(ui, bzauth) req['identifier'] = identifier.full req['changesets'] = [] req['obsolescence'] = obsolete.isenabled(repo, obsolete.createmarkersopt) reviews = repo.reviews oldparentid = reviews.findparentreview(identifier=identifier.full) # Include obsolescence data so server can make intelligent decisions. obsstore = repo.obsstore for node in nodes: precursors = [hex(n) for n in obsolete.allprecursors(obsstore, [node])] req['changesets'].append({ 'node': hex(node), 'precursors': precursors, }) ui.write(_('submitting %d changesets for review\n') % len(nodes)) res = calljsoncommand(ui, remote, 'pushreview', data=req, httpcap='submithttp', httpcommand='mozreviewsubmitseries') if 'error' in res: raise error.Abort(res['error']) for w in res['display']: ui.write('%s\n' % w) reviews.baseurl = res['rburl'] newparentid = res['parentrrid'] reviews.addparentreview(identifier.full, newparentid) nodereviews = {} reviewdata = {} for rid, info in sorted(res['reviewrequests'].iteritems()): if 'node' in info: node = bin(info['node']) nodereviews[node] = rid reviewdata[rid] = { 'status': info['status'], 'public': info['public'], } if 'reviewers' in info: reviewdata[rid]['reviewers'] = info['reviewers'] reviews.remoteurl = remote.url() for node, rid in nodereviews.items(): reviews.addnodereview(node, rid, newparentid) reviews.write() for rid, data in reviewdata.iteritems(): reviews.savereviewrequest(rid, data) havedraft = False ui.write('\n') for node in nodes: rid = nodereviews[node] ctx = repo[node] # Bug 1065024 use cmdutil.show_changeset() here. ui.write('changeset: %s:%s\n' % (ctx.rev(), ctx.hex()[0:12])) ui.write('summary: %s\n' % ctx.description().splitlines()[0]) ui.write('review: %s' % reviews.reviewurl(rid)) if not reviewdata[rid].get('public'): havedraft = True ui.write(' (draft)') ui.write('\n\n') ui.write(_('review id: %s\n') % identifier.full) ui.write(_('review url: %s') % reviews.parentreviewurl(identifier.full)) if not reviewdata[newparentid].get('public'): havedraft = True ui.write(' (draft)') ui.write('\n') # Warn people that they have not assigned reviewers for at least some # of their commits. for node in nodes: rd = reviewdata[nodereviews[node]] if not rd.get('reviewers', None): ui.status(_('(review requests lack reviewers; visit review url ' 'to assign reviewers)\n')) break # Make it clear to the user that they need to take action in order for # others to see this review series. if havedraft: # At some point we may want an yes/no/prompt option for autopublish # but for safety reasons we only allow no/prompt for now. if ui.configbool('reviewboard', 'autopublish', True): ui.write('\n') publish = ui.promptchoice( _('publish these review requests now (Yn)? $$ &Yes $$ &No')) if publish == 0: publishreviewrequests(ui, remote, bzauth, [newparentid]) else: ui.status(_('(visit review url to publish these review ' 'requests so others can see them)\n')) else: ui.status(_('(visit review url to publish these review requests ' 'so others can see them)\n'))
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history (the source) onto another (the destination). This can be useful for linearizing *local* changes relative to a master development tree. You should not rebase changesets that have already been shared with others. Doing so will force everybody else to perform the same rebase or they will end up with duplicated changesets after pulling in your rebased changesets. In its default configuration, Mercurial will prevent you from rebasing published changes. See :hg:`help phases` for details. If you don't specify a destination changeset (``-d/--dest``), rebase uses the current branch tip as the destination. (The destination changeset is not modified by rebasing, but new changesets are added as its descendants.) You can specify which changesets to rebase in two ways: as a "source" changeset or as a "base" changeset. Both are shorthand for a topologically related set of changesets (the "source branch"). If you specify source (``-s/--source``), rebase will rebase that changeset and all of its descendants onto dest. If you specify base (``-b/--base``), rebase will select ancestors of base back to but not including the common ancestor with dest. Thus, ``-b`` is less precise but more convenient than ``-s``: you can specify any changeset in the source branch, and rebase will select the whole branch. If you specify neither ``-s`` nor ``-b``, rebase uses the parent of the working directory as the base. For advanced usage, a third way is available through the ``--rev`` option. It allows you to specify an arbitrary set of changesets to rebase. Descendants of revs you specify with this option are not automatically included in the rebase. By default, rebase recreates the changesets in the source branch as descendants of dest and then destroys the originals. Use ``--keep`` to preserve the original source changesets. Some changesets in the source branch (e.g. merges from the destination branch) may be dropped if they no longer contribute any change. One result of the rules for selecting the destination changeset and source branch is that, unlike ``merge``, rebase will do nothing if you are at the branch tip of a named branch with two heads. You need to explicitly specify source and/or destination (or ``update`` to the other head, if it's the head of the intended source branch). If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. .. container:: verbose Examples: - move "local changes" (current commit back to branching point) to the current branch tip after a pull:: hg rebase - move a single changeset to the stable branch:: hg rebase -r 5f493448 -d stable - splice a commit and all its descendants onto another part of history:: hg rebase --source c0c3 --dest 4cf9 - rebase everything on a branch marked by a bookmark onto the default branch:: hg rebase --base myfeature --dest default - collapse a sequence of changes into a single commit:: hg rebase --collapse -r 1520:1525 -d . - move a named branch while preserving its name:: hg rebase -r "branch(featureX)" -d 1.3 --keepbranches Returns 0 on success, 1 if nothing to rebase or there are unresolved conflicts. """ originalwd = target = None activebookmark = None external = nullrev state = {} skipped = set() targetancestors = set() lock = wlock = None try: wlock = repo.wlock() lock = repo.lock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) revf = opts.get('rev', []) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) collapsemsg = cmdutil.logmessage(ui, opts) e = opts.get('extrafn') # internal, used by e.g. hgsubversion extrafns = [_savegraft] if e: extrafns = [e] keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) # keepopen is not meant for use on the command line, but by # other extensions keepopen = opts.get('keepopen', False) if opts.get('interactive'): msg = _("interactive history editing is supported by the " "'histedit' extension (see \"hg help histedit\")") raise util.Abort(msg) if collapsemsg and not collapsef: raise util.Abort(_('message can only be specified with collapse')) if contf or abortf: if contf and abortf: raise util.Abort(_('cannot use both abort and continue')) if collapsef: raise util.Abort( _('cannot use collapse with continue or abort')) if srcf or basef or destf: raise util.Abort( _('abort and continue do not allow specifying revisions')) if opts.get('tool', False): ui.warn(_('tool option will be ignored\n')) try: (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external, activebookmark) = restorestatus(repo) except error.RepoLookupError: if abortf: clearstatus(repo) repo.ui.warn( _('rebase aborted (no revision is removed,' ' only broken state is cleared)\n')) return 0 else: msg = _('cannot continue inconsistent rebase') hint = _('use "hg rebase --abort" to clear broken state') raise util.Abort(msg, hint=hint) if abortf: return abort(repo, originalwd, target, state, activebookmark=activebookmark) else: if srcf and basef: raise util.Abort( _('cannot specify both a ' 'source and a base')) if revf and basef: raise util.Abort( _('cannot specify both a ' 'revision and a base')) if revf and srcf: raise util.Abort( _('cannot specify both a ' 'revision and a source')) cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) if not destf: # Destination defaults to the latest revision in the # current branch branch = repo[None].branch() dest = repo[branch] else: dest = scmutil.revsingle(repo, destf) if revf: rebaseset = scmutil.revrange(repo, revf) if not rebaseset: ui.status( _('empty "rev" revision set - ' 'nothing to rebase\n')) return 1 elif srcf: src = scmutil.revrange(repo, [srcf]) if not src: ui.status( _('empty "source" revision set - ' 'nothing to rebase\n')) return 1 rebaseset = repo.revs('(%ld)::', src) assert rebaseset else: base = scmutil.revrange(repo, [basef or '.']) if not base: ui.status( _('empty "base" revision set - ' "can't compute rebase set\n")) return 1 commonanc = repo.revs('ancestor(%ld, %d)', base, dest).first() if commonanc is not None: rebaseset = repo.revs('(%d::(%ld) - %d)::', commonanc, base, commonanc) else: rebaseset = [] if not rebaseset: # transform to list because smartsets are not comparable to # lists. This should be improved to honor laziness of # smartset. if list(base) == [dest.rev()]: if basef: ui.status( _('nothing to rebase - %s is both "base"' ' and destination\n') % dest) else: ui.status( _('nothing to rebase - working directory ' 'parent is also destination\n')) elif not repo.revs('%ld - ::%d', base, dest): if basef: ui.status( _('nothing to rebase - "base" %s is ' 'already an ancestor of destination ' '%s\n') % ('+'.join(str(repo[r]) for r in base), dest)) else: ui.status( _('nothing to rebase - working ' 'directory parent is already an ' 'ancestor of destination %s\n') % dest) else: # can it happen? ui.status( _('nothing to rebase from %s to %s\n') % ('+'.join(str(repo[r]) for r in base), dest)) return 1 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt) if (not (keepf or allowunstable) and repo.revs( 'first(children(%ld) - %ld)', rebaseset, rebaseset)): raise util.Abort( _("can't remove original changesets with" " unrebased descendants"), hint=_('use --keep to keep original changesets')) result = buildstate(repo, dest, rebaseset, collapsef) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return 1 root = min(rebaseset) if not keepf and not repo[root].mutable(): raise util.Abort(_("can't rebase public changeset %s") % repo[root], hint=_('see "hg help phases" for details')) originalwd, target, state = result if collapsef: targetancestors = repo.changelog.ancestors([target], inclusive=True) external = externalparent(repo, state, targetancestors) if dest.closesbranch() and not keepbranchesf: ui.status(_('reopening closed branch head %s\n') % dest) if keepbranchesf: # insert _savebranch at the start of extrafns so if # there's a user-provided extrafn it can clobber branch if # desired extrafns.insert(0, _savebranch) if collapsef: branches = set() for rev in state: branches.add(repo[rev].branch()) if len(branches) > 1: raise util.Abort( _('cannot collapse multiple named ' 'branches')) # Rebase if not targetancestors: targetancestors = repo.changelog.ancestors([target], inclusive=True) # Keep track of the current bookmarks in order to reset them later currentbookmarks = repo._bookmarks.copy() activebookmark = activebookmark or repo._activebookmark if activebookmark: bookmarks.deactivate(repo) extrafn = _makeextrafn(extrafns) sortedstate = sorted(state) total = len(sortedstate) pos = 0 for rev in sortedstate: ctx = repo[rev] desc = '%d:%s "%s"' % (ctx.rev(), ctx, ctx.description().split( '\n', 1)[0]) names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node()) if names: desc += ' (%s)' % ' '.join(names) pos += 1 if state[rev] == revtodo: ui.status(_('rebasing %s\n') % desc) ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)), _('changesets'), total) p1, p2, base = defineparents(repo, rev, target, state, targetancestors) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external, activebookmark) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: try: ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'rebase') stats = rebasenode(repo, rev, p1, base, state, collapsef, target) if stats and stats[3] > 0: raise error.InterventionRequired( _('unresolved conflicts (see hg ' 'resolve, then hg rebase --continue)')) finally: ui.setconfig('ui', 'forcemerge', '', 'rebase') if not collapsef: merging = p2 != nullrev editform = cmdutil.mergeeditform(merging, 'rebase') editor = cmdutil.getcommiteditor(editform=editform, **opts) newnode = concludenode(repo, rev, p1, p2, extrafn=extrafn, editor=editor) else: # Skip commit if we are collapsing repo.dirstate.beginparentchange() repo.setparents(repo[p1].node()) repo.dirstate.endparentchange() newnode = None # Update the state if newnode is not None: state[rev] = repo[newnode].rev() ui.debug('rebased as %s\n' % short(newnode)) else: ui.warn( _('note: rebase of %d:%s created no changes ' 'to commit\n') % (rev, ctx)) if not collapsef: skipped.add(rev) state[rev] = p1 ui.debug('next revision set to %s\n' % p1) elif state[rev] == nullmerge: ui.debug('ignoring null merge rebase of %s\n' % rev) elif state[rev] == revignored: ui.status(_('not rebasing ignored %s\n') % desc) else: ui.status( _('already rebased %s as %s\n') % (desc, repo[state[rev]])) ui.progress(_('rebasing'), None) ui.note(_('rebase merging completed\n')) if collapsef and not keepopen: p1, p2, _base = defineparents(repo, min(state), target, state, targetancestors) editopt = opts.get('edit') editform = 'rebase.collapse' if collapsemsg: commitmsg = collapsemsg else: commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] > nullmerge: commitmsg += '\n* %s' % repo[rebased].description() editopt = True editor = cmdutil.getcommiteditor(edit=editopt, editform=editform) newnode = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn, editor=editor) if newnode is None: newrev = target else: newrev = repo[newnode].rev() for oldrev in state.iterkeys(): if state[oldrev] > nullmerge: state[oldrev] = newrev if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if currentbookmarks: # Nodeids are needed to reset bookmarks nstate = {} for k, v in state.iteritems(): if v > nullmerge: nstate[repo[k].node()] = repo[v].node() # XXX this is the same as dest.node() for the non-continue path -- # this should probably be cleaned up targetnode = repo[target].node() # restore original working directory # (we do this before stripping) newwd = state.get(originalwd, originalwd) if newwd < 0: # original directory is a parent of rebase set root or ignored newwd = originalwd if newwd not in [c.rev() for c in repo[None].parents()]: ui.note(_("update back to initial working directory parent\n")) hg.updaterepo(repo, newwd, False) if not keepf: collapsedas = None if collapsef: collapsedas = newnode clearrebased(ui, repo, state, skipped, collapsedas) if currentbookmarks: updatebookmarks(repo, targetnode, nstate, currentbookmarks) if activebookmark not in repo._bookmarks: # active bookmark was divergent one and has been deleted activebookmark = None clearstatus(repo) ui.note(_("rebase completed\n")) util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) if (activebookmark and repo['.'].node() == repo._bookmarks[activebookmark]): bookmarks.activate(repo, activebookmark) finally: release(lock, wlock)
def split(ui, repo, *revs, **opts): """split a changeset into smaller changesets By default, split the current revision by prompting for all its hunks to be redistributed into new changesets. Use --rev to split a given changeset instead. """ tr = wlock = lock = None newcommits = [] revarg = (list(revs) + opts.get('rev')) or ['.'] if len(revarg) != 1: msg = _("more than one revset is given") hnt = _("use either `hg split <rs>` or `hg split --rev <rs>`, not both") raise error.Abort(msg, hint=hnt) rev = scmutil.revsingle(repo, revarg[0]) if opts.get('no_rebase'): torebase = () else: torebase = repo.revs('descendants(%d) - (%d)', rev, rev) try: wlock = repo.wlock() lock = repo.lock() cmdutil.bailifchanged(repo) if torebase: cmdutil.checkunfinished(repo) tr = repo.transaction('split') ctx = repo[rev] r = ctx.rev() disallowunstable = not obsolete.isenabled(repo, obsolete.allowunstableopt) if disallowunstable: # XXX We should check head revs if repo.revs("(%d::) - %d", rev, rev): raise error.Abort(_("cannot split commit: %s not a head") % ctx) if len(ctx.parents()) > 1: raise error.Abort(_("cannot split merge commits")) prev = ctx.p1() bmupdate = common.bookmarksupdater(repo, ctx.node(), tr) bookactive = repo._activebookmark if bookactive is not None: repo.ui.status(_("(leaving bookmark %s)\n") % repo._activebookmark) bookmarks.deactivate(repo) hg.update(repo, prev) commands.revert(ui, repo, rev=r, all=True) def haschanges(): modified, added, removed, deleted = repo.status()[:4] return modified or added or removed or deleted msg = ("HG: This is the original pre-split commit message. " "Edit it as appropriate.\n\n") msg += ctx.description() opts['message'] = msg opts['edit'] = True while haschanges(): pats = () cmdutil.dorecord(ui, repo, commands.commit, 'commit', False, cmdutil.recordfilter, *pats, **opts) # TODO: Does no seem like the best way to do this # We should make dorecord return the newly created commit newcommits.append(repo['.']) if haschanges(): if ui.prompt('Done splitting? [yN]', default='n') == 'y': commands.commit(ui, repo, **opts) newcommits.append(repo['.']) break else: ui.status(_("no more change to split\n")) if newcommits: tip = repo[newcommits[-1]] bmupdate(tip.node()) if bookactive is not None: bookmarks.activate(repo, bookactive) obsolete.createmarkers(repo, [(repo[r], newcommits)], operation='split') if torebase: top = repo.revs('allsuccessors(%d)', rev).last() common.restackonce(ui, repo, top) tr.close() finally: lockmod.release(tr, lock, wlock)
def _histedit(ui, repo, state, *freeargs, **opts): # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) if mq and mq.applied: raise util.Abort(_('source has mq patches applied')) # basic argument incompatibility processing outg = opts.get('outgoing') cont = opts.get('continue') abort = opts.get('abort') force = opts.get('force') rules = opts.get('commands', '') revs = opts.get('rev', []) goal = 'new' # This invocation goal, in new, continue, abort if force and not outg: raise util.Abort(_('--force only allowed with --outgoing')) if cont: if util.any((outg, abort, revs, freeargs, rules)): raise util.Abort(_('no arguments allowed with --continue')) goal = 'continue' elif abort: if util.any((outg, revs, freeargs, rules)): raise util.Abort(_('no arguments allowed with --abort')) goal = 'abort' else: if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise util.Abort(_('history edit already in progress, try ' '--continue or --abort')) if outg: if revs: raise util.Abort(_('no revisions allowed with --outgoing')) if len(freeargs) > 1: raise util.Abort( _('only one repo argument allowed with --outgoing')) else: revs.extend(freeargs) if len(revs) != 1: raise util.Abort( _('histedit requires exactly one ancestor revision')) replacements = [] keep = opts.get('keep', False) # rebuild state if goal == 'continue': state = histeditstate(repo) state.read() state = bootstrapcontinue(ui, state, opts) elif goal == 'abort': state = histeditstate(repo) state.read() mapping, tmpnodes, leafs, _ntm = processreplacement(state) ui.debug('restore wc to old parent %s\n' % node.short(state.topmost)) # check whether we should update away parentnodes = [c.node() for c in repo[None].parents()] for n in leafs | set([state.parentctx.node()]): if n in parentnodes: hg.clean(repo, state.topmost) break else: pass cleanupnode(ui, repo, 'created', tmpnodes) cleanupnode(ui, repo, 'temp', leafs) state.clear() return else: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) topmost, empty = repo.dirstate.parents() if outg: if freeargs: remote = freeargs[0] else: remote = None root = findoutgoing(ui, repo, remote, force, opts) else: rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs))) if len(rr) != 1: raise util.Abort(_('The specified revisions must have ' 'exactly one common root')) root = rr[0].node() revs = between(repo, root, topmost, keep) if not revs: raise util.Abort(_('%s is not an ancestor of working directory') % node.short(root)) ctxs = [repo[r] for r in revs] if not rules: rules = '\n'.join([makedesc(c) for c in ctxs]) rules += '\n\n' rules += editcomment % (node.short(root), node.short(topmost)) rules = ui.edit(rules, ui.username()) # Save edit rules in .hg/histedit-last-edit.txt in case # the user needs to ask for help after something # surprising happens. f = open(repo.join('histedit-last-edit.txt'), 'w') f.write(rules) f.close() else: if rules == '-': f = sys.stdin else: f = open(rules) rules = f.read() f.close() rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l.startswith('#')] rules = verifyrules(rules, repo, ctxs) parentctx = repo[root].parents()[0] state.parentctx = parentctx state.rules = rules state.keep = keep state.topmost = topmost state.replacements = replacements while state.rules: state.write() action, ha = state.rules.pop(0) ui.debug('histedit: processing %s %s\n' % (action, ha)) actfunc = actiontable[action] state.parentctx, replacement_ = actfunc(ui, state, ha, opts) state.replacements.extend(replacement_) hg.update(repo, state.parentctx.node()) mapping, tmpnodes, created, ntm = processreplacement(state) if mapping: for prec, succs in mapping.iteritems(): if not succs: ui.debug('histedit: %s is dropped\n' % node.short(prec)) else: ui.debug('histedit: %s is replaced by %s\n' % ( node.short(prec), node.short(succs[0]))) if len(succs) > 1: m = 'histedit: %s' for n in succs[1:]: ui.debug(m % node.short(n)) if not keep: if mapping: movebookmarks(ui, repo, mapping, state.topmost, ntm) # TODO update mq state if obsolete.isenabled(repo, obsolete.createmarkersopt): markers = [] # sort by revision number because it sound "right" for prec in sorted(mapping, key=repo.changelog.rev): succs = mapping[prec] markers.append((repo[prec], tuple(repo[s] for s in succs))) if markers: obsolete.createmarkers(repo, markers) else: cleanupnode(ui, repo, 'replaced', mapping) cleanupnode(ui, repo, 'temp', tmpnodes) state.clear() if os.path.exists(repo.sjoin('undo')): os.unlink(repo.sjoin('undo'))
def replacechangesets(repo, oldnodes, createfn, backuptopic='replacing'): """Replace changesets with new versions. This is a generic function used to perform history rewriting. Given an iterable of input nodes, a function will be called which is expected to produce a new changeset to replace the input node. The function signature should be: def createfn(repo, ctx, revmap, copyfilectxfn): It is passed a repo, the changectx being rewritten, a map of old to new revisions that have been changed so far, and a function that can be used as the memctx callback for obtaining memfilectx when no file modifications are to be performed (a common pattern). The function should return an *uncommitted* memctx holding the new changeset info. We currently restrict that the createfn callback must return a new changeset and that no file changes may occur. Restricting file changes satisfies the requirements this function was invented for and keeps the implementation simple. After the memctx is obtained, it is committed. Children changesets are rebased automatically after all changesets have been rewritten. After the old to new mapping is obtained, bookmarks are moved and old changesets are made obsolete or stripped, depending on what is appropriate for the repo configuration. This function handles locking the repository and performing as many actions in a transaction as possible. Before any changes are made, we verify the state of the repo is sufficient for transformation to occur and abort otherwise. """ if not oldnodes: return {} repo = repo.unfiltered() # Validate function called properly. for node in oldnodes: if len(node) != 20: raise util.Abort('replacechangesets expects 20 byte nodes') uoldrevs = [repo[node].rev() for node in oldnodes] oldrevs = sorted(uoldrevs) if oldrevs != uoldrevs: raise util.Abort('must pass oldnodes in changelog order') # We may perform stripping and stripping inside a nested transaction # is a recipe for disaster. # currenttransaction was added in 3.3. Copy the implementation until we # drop 3.2 compatibility. if hasattr(repo, 'currenttransaction'): intrans = repo.currenttransaction() else: if repo._transref and repo._transref().running(): intrans = True else: intrans = False if intrans: raise util.Abort('cannot call replacechangesets when a transaction ' 'is active') # The revisions impacted by the current operation. This is essentially # all non-hidden children. We don't operate on hidden changesets because # there is no point - they are hidden and deemed not important. impactedrevs = list(repo.filtered('visible').revs('%ld::', oldrevs)) # If we'll need to update the working directory, don't do anything if there # are uncommitted changes, as this could cause a giant mess (merge # conflicts, etc). Note the comparison against impacted revs, as children # of rewritten changesets will be rebased below. dirstaterev = repo[repo.dirstate.p1()].rev() if dirstaterev in impactedrevs: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) obsenabled = False if hasattr(obsolete, 'isenabled'): obsenabled = obsolete.isenabled(repo, 'createmarkers') else: obsenabled = obsolete._enabled def adjustphase(repo, tr, phase, node): # transaction argument added in Mercurial 3.2. try: phases.advanceboundary(repo, tr, phase, [node]) phases.retractboundary(repo, tr, phase, [node]) except TypeError: phases.advanceboundary(repo, phase, [node]) phases.retractboundary(repo, phase, [node]) nodemap = {} wlock, lock, tr = None, None, None try: wlock = repo.wlock() lock = repo.lock() tr = repo.transaction('replacechangesets') # Create the new changesets. revmap = OrderedDict() for oldnode in oldnodes: oldctx = repo[oldnode] # Copy revmap out of paranoia. newctx = createfn(repo, oldctx, dict(revmap), preservefilectx(oldctx)) if not isinstance(newctx, context.memctx): raise util.Abort('createfn must return a context.memctx') if oldctx == newctx: raise util.Abort('createfn must create a new changeset') newnode = newctx.commit() # Needed so .manifestnode() works, which memctx doesn't have. newctx = repo[newnode] # This makes the implementation significantly simpler as we don't # need to worry about merges when we do auto rebasing later. if oldctx.manifestnode() != newctx.manifestnode(): raise util.Abort('we do not allow replacements to modify files') revmap[oldctx.rev()] = newctx.rev() nodemap[oldnode] = newnode # Do phase adjustment ourselves because we want callbacks to be as # dumb as possible. adjustphase(repo, tr, oldctx.phase(), newctx.node()) # Children of rewritten changesets are impacted as well. Rebase as # needed. for rev in impactedrevs: # It was handled by createfn() or by this loop already. if rev in revmap: continue oldctx = repo[rev] if oldctx.p1().rev() not in revmap: raise util.Abort('unknown parent of child commit: %s' % oldctx.hex(), hint='please report this as a bug') parents = newparents(repo, oldctx, revmap) mctx = context.memctx(repo, parents, oldctx.description(), oldctx.files(), preservefilectx(oldctx), user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) status = oldctx.p1().status(oldctx) mctx.modified = lambda: status[0] mctx.added = lambda: status[1] mctx.removed = lambda: status[2] newnode = mctx.commit() revmap[rev] = repo[newnode].rev() nodemap[oldctx.node()] = newnode # Retain phase. adjustphase(repo, tr, oldctx.phase(), newnode) ph = repo.ui.config('phases', 'new-commit') try: repo.ui.setconfig('phases', 'new-commit', oldctx.phase(), 'rewriting') newnode = mctx.commit() revmap[rev] = repo[newnode].rev() finally: repo.ui.setconfig('phases', 'new-commit', ph) # Move bookmarks to new nodes. bmchanges = [] oldactivebookmark = activebookmark(repo) for oldrev, newrev in revmap.items(): oldnode = repo[oldrev].node() for mark, bmnode in repo._bookmarks.items(): if bmnode == oldnode: bmchanges.append((mark, repo[newrev].node())) for mark, newnode in bmchanges: repo._bookmarks[mark] = newnode if bmchanges: repo._bookmarks.recordchange(tr) # Update references to rewritten MQ patches. if hasattr(repo, 'mq'): q = repo.mq for e in q.applied: if e.node in nodemap: e.node = nodemap[e.node] q.applieddirty = True # This no-ops if nothing is dirty. q.savedirty() # If obsolescence is enabled, obsolete the old changesets. if obsenabled: markers = [] for oldrev, newrev in revmap.items(): markers.append((repo[oldrev], (repo[newrev],))) obsolete.createmarkers(repo, markers) # Move the working directory to the new node, if applicable. wdirrev = repo['.'].rev() if wdirrev in revmap: hg.updaterepo(repo, repo[revmap[wdirrev]].node(), True) # The active bookmark is tracked by its symbolic name, not its # changeset. Since we didn't do anything that should change the # active bookmark, we shouldn't need to adjust it. if activebookmark(repo) != oldactivebookmark: raise util.Abort('active bookmark changed; ' 'this should not occur!', hint='please file a bug') tr.close() # Unless obsolescence is enabled, strip the old changesets. if not obsenabled: stripnodes = [repo[rev].node() for rev in revmap.keys()] repair.strip(repo.ui, repo, stripnodes, topic=backuptopic) finally: if tr: tr.release() lockmod.release(wlock, lock) return nodemap
def replacechangesets(repo, oldnodes, createfn, backuptopic='replacing'): """Replace changesets with new versions. This is a generic function used to perform history rewriting. Given an iterable of input nodes, a function will be called which is expected to produce a new changeset to replace the input node. The function signature should be: def createfn(repo, ctx, revmap, copyfilectxfn): It is passed a repo, the changectx being rewritten, a map of old to new revisions that have been changed so far, and a function that can be used as the memctx callback for obtaining memfilectx when no file modifications are to be performed (a common pattern). The function should return an *uncommitted* memctx holding the new changeset info. We currently restrict that the createfn callback must return a new changeset and that no file changes may occur. Restricting file changes satisfies the requirements this function was invented for and keeps the implementation simple. After the memctx is obtained, it is committed. Children changesets are rebased automatically after all changesets have been rewritten. After the old to new mapping is obtained, bookmarks are moved and old changesets are made obsolete or stripped, depending on what is appropriate for the repo configuration. This function handles locking the repository and performing as many actions in a transaction as possible. Before any changes are made, we verify the state of the repo is sufficient for transformation to occur and abort otherwise. """ if not oldnodes: return {} repo = repo.unfiltered() # Validate function called properly. for node in oldnodes: if len(node) != 20: raise error.Abort('replacechangesets expects 20 byte nodes') uoldrevs = [repo[node].rev() for node in oldnodes] oldrevs = sorted(uoldrevs) if oldrevs != uoldrevs: raise error.Abort('must pass oldnodes in changelog order') # We may perform stripping and stripping inside a nested transaction # is a recipe for disaster. # currenttransaction was added in 3.3. Copy the implementation until we # drop 3.2 compatibility. if hasattr(repo, 'currenttransaction'): intrans = repo.currenttransaction() else: if repo._transref and repo._transref().running(): intrans = True else: intrans = False if intrans: raise error.Abort('cannot call replacechangesets when a transaction ' 'is active') # The revisions impacted by the current operation. This is essentially # all non-hidden children. We don't operate on hidden changesets because # there is no point - they are hidden and deemed not important. impactedrevs = list(repo.filtered('visible').revs('%ld::', oldrevs)) # If we'll need to update the working directory, don't do anything if there # are uncommitted changes, as this could cause a giant mess (merge # conflicts, etc). Note the comparison against impacted revs, as children # of rewritten changesets will be rebased below. dirstaterev = repo[repo.dirstate.p1()].rev() if dirstaterev in impactedrevs: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) obsenabled = False if hasattr(obsolete, 'isenabled'): obsenabled = obsolete.isenabled(repo, 'createmarkers') else: obsenabled = obsolete._enabled def adjustphase(repo, tr, phase, node): # transaction argument added in Mercurial 3.2. try: phases.advanceboundary(repo, tr, phase, [node]) phases.retractboundary(repo, tr, phase, [node]) except TypeError: phases.advanceboundary(repo, phase, [node]) phases.retractboundary(repo, phase, [node]) nodemap = {} wlock, lock, tr = None, None, None try: wlock = repo.wlock() lock = repo.lock() tr = repo.transaction('replacechangesets') # Create the new changesets. revmap = OrderedDict() for oldnode in oldnodes: oldctx = repo[oldnode] # Copy revmap out of paranoia. newctx = createfn(repo, oldctx, dict(revmap), preservefilectx(oldctx)) if not isinstance(newctx, context.memctx): raise error.Abort('createfn must return a context.memctx') if oldctx == newctx: raise error.Abort('createfn must create a new changeset') newnode = newctx.commit() # Needed so .manifestnode() works, which memctx doesn't have. newctx = repo[newnode] # This makes the implementation significantly simpler as we don't # need to worry about merges when we do auto rebasing later. if oldctx.manifestnode() != newctx.manifestnode(): raise error.Abort( 'we do not allow replacements to modify files') revmap[oldctx.rev()] = newctx.rev() nodemap[oldnode] = newnode # Do phase adjustment ourselves because we want callbacks to be as # dumb as possible. adjustphase(repo, tr, oldctx.phase(), newctx.node()) # Children of rewritten changesets are impacted as well. Rebase as # needed. for rev in impactedrevs: # It was handled by createfn() or by this loop already. if rev in revmap: continue oldctx = repo[rev] if oldctx.p1().rev() not in revmap: raise error.Abort('unknown parent of child commit: %s' % oldctx.hex(), hint='please report this as a bug') parents = newparents(repo, oldctx, revmap) mctx = context.memctx(repo, parents, oldctx.description(), oldctx.files(), preservefilectx(oldctx), user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) status = oldctx.p1().status(oldctx) mctx.modified = lambda: status[0] mctx.added = lambda: status[1] mctx.removed = lambda: status[2] newnode = mctx.commit() revmap[rev] = repo[newnode].rev() nodemap[oldctx.node()] = newnode # Retain phase. adjustphase(repo, tr, oldctx.phase(), newnode) ph = repo.ui.config('phases', 'new-commit') try: repo.ui.setconfig('phases', 'new-commit', oldctx.phase(), 'rewriting') newnode = mctx.commit() revmap[rev] = repo[newnode].rev() finally: repo.ui.setconfig('phases', 'new-commit', ph) # Move bookmarks to new nodes. bmchanges = [] oldactivebookmark = activebookmark(repo) for oldrev, newrev in revmap.items(): oldnode = repo[oldrev].node() for mark, bmnode in repo._bookmarks.items(): if bmnode == oldnode: bmchanges.append((mark, repo[newrev].node())) if bmchanges: # TODO unconditionally call applychanges() when support for # Mercurial 4.1 is dropped. if util.safehasattr(repo._bookmarks, 'applychanges'): repo._bookmarks.applychanges(repo, tr, bmchanges) else: for mark, newnode in bmchanges: repo._bookmarks[mark] = newnode repo._bookmarks.recordchange(tr) # Update references to rewritten MQ patches. if hasattr(repo, 'mq'): q = repo.mq for e in q.applied: if e.node in nodemap: e.node = nodemap[e.node] q.applieddirty = True # This no-ops if nothing is dirty. q.savedirty() # If obsolescence is enabled, obsolete the old changesets. if obsenabled: markers = [] for oldrev, newrev in revmap.items(): if repo[oldrev] != repo[newrev]: markers.append((repo[oldrev], (repo[newrev], ))) if markers: obsolete.createmarkers(repo, markers) # Move the working directory to the new node, if applicable. wdirrev = repo['.'].rev() if wdirrev in revmap: hg.updaterepo(repo, repo[revmap[wdirrev]].node(), True) # The active bookmark is tracked by its symbolic name, not its # changeset. Since we didn't do anything that should change the # active bookmark, we shouldn't need to adjust it. if activebookmark(repo) != oldactivebookmark: raise error.Abort( 'active bookmark changed; ' 'this should not occur!', hint='please file a bug') tr.close() # Unless obsolescence is enabled, strip any obsolete changesets. if not obsenabled: stripnodes = [] for oldrev, newrev in revmap.items(): if repo[oldrev] != repo[newrev]: stripnodes.append(repo[oldrev].node()) if stripnodes: repair.strip(repo.ui, repo, stripnodes, topic=backuptopic) finally: if tr: tr.release() lockmod.release(wlock, lock) return nodemap