def override_pull(orig, ui, repo, source=None, **opts): if opts.get('rebase', False): repo._isrebasing = True try: if opts.get('update'): del opts['update'] ui.debug('--update and --rebase are not compatible, ignoring ' 'the update flag\n') del opts['rebase'] cmdutil.bailifchanged(repo) revsprepull = len(repo) origpostincoming = commands.postincoming def _dummy(*args, **kwargs): pass commands.postincoming = _dummy repo.lfpullsource = source if not source: source = 'default' try: result = commands.pull(ui, repo, source, **opts) finally: commands.postincoming = origpostincoming revspostpull = len(repo) if revspostpull > revsprepull: result = result or rebase.rebase(ui, repo) finally: repo._isrebasing = False else: repo.lfpullsource = source if not source: source = 'default' result = orig(ui, repo, source, **opts) return result
def pullrebase(orig, ui, repo, *args, **opts): 'Call rebase after pull if the latter has been invoked with --rebase' if opts.get('rebase'): if opts.get('update'): del opts['update'] ui.debug('--update and --rebase are not compatible, ignoring ' 'the update flag\n') cmdutil.bailifchanged(repo) revsprepull = len(repo) origpostincoming = commands.postincoming def _dummy(*args, **kwargs): pass commands.postincoming = _dummy try: orig(ui, repo, *args, **opts) finally: commands.postincoming = origpostincoming revspostpull = len(repo) if revspostpull > revsprepull: rebase(ui, repo, **opts) branch = repo[None].branch() dest = repo[branch].rev() if dest != repo['.'].rev(): # there was nothing to rebase we force an update hg.update(repo, dest) else: if opts.get('tool'): raise util.Abort(_('--tool can only be used with --rebase')) orig(ui, repo, *args, **opts)
def pullrebaseif(orig, ui, repo, *args, **opts): '''Call rebaseif after pull if the latter has been invoked with --rebaseif''' # this function is taken in verbatim from rebase extension, with rebase replaced with rebaseif if opts.get('rebaseif'): if opts.get('update'): del opts['update'] ui.debug(_('--update and --rebaseif are not compatible, ignoring the update flag\n')) try: cmdutil.bailifchanged(repo) # 1.9 except AttributeError: cmdutil.bail_if_changed(repo) # < 1.9 revsprepull = len(repo) origpostincoming = commands.postincoming def _dummy(*args, **kwargs): pass commands.postincoming = _dummy try: orig(ui, repo, *args, **opts) finally: commands.postincoming = origpostincoming revspostpull = len(repo) if revspostpull > revsprepull: rebaseif(ui, repo, **opts) branch = repo[None].branch() dest = repo[branch].rev() if dest != repo['.'].rev(): # there was nothing to rebase we force an update hg.update(repo, dest) else: orig(ui, repo, *args, **opts)
def verify_pending_commits(repo): """ Checks if there are any pending uncommitted changese Args: repo - the current repository """ cmdutil.bailifchanged(repo)
def checklocalchanges(repo, force=False): s = repo.status() if not force: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) else: cmdutil.checkunfinished(repo, skipmerge=True) return s
def hide(ui, repo, *revs, **opts): """hide changesets and their descendants Hidden changesets are still accessible by their hashes which can be found in ``hg journal``. If a parent of the working directory is hidden, then the working directory will automatically be updated to the most recent available ancestor of the hidden parent. If there is a bookmark pointing to the commit it will be removed. """ revs = list(revs) + opts.pop('rev', []) revs = set(scmutil.revrange(repo, revs)) hidectxs = list(repo.set("(%ld)::", revs)) if not hidectxs: raise error.Abort(_('nothing to hide')) with repo.wlock(), repo.lock(), repo.transaction('hide') as tr: # revs to be hidden for ctx in hidectxs: if not ctx.mutable(): raise error.Abort( _('cannot hide immutable changeset: %s') % ctx, hint="see 'hg help phases' for details") wdp = repo['.'] newnode = wdp while newnode in hidectxs: newnode = newnode.parents()[0] if newnode.node() != wdp.node(): cmdutil.bailifchanged(repo, merge=False) hg.update(repo, newnode, False) ui.status(_('working directory now at %s\n') % ui.label(str(newnode), 'node')) # create markers obsolete.createmarkers(repo, [(r, []) for r in hidectxs], operation='hide') ui.status(_('%i changesets hidden\n') % len(hidectxs)) # remove bookmarks pointing to hidden changesets hnodes = [r.node() for r in hidectxs] bmchanges = [] for book, node in bookmarksmod.listbinbookmarks(repo): if node in hnodes: bmchanges.append((book, None)) repo._bookmarks.applychanges(repo, tr, bmchanges) if len(bmchanges) > 0: ui.status(_('%i bookmarks removed\n') % len(bmchanges))
def override_pull(orig, ui, repo, source=None, **opts): if opts.get('rebase', False): repo._isrebasing = True try: if opts.get('update'): del opts['update'] ui.debug('--update and --rebase are not compatible, ignoring ' 'the update flag\n') del opts['rebase'] cmdutil.bailifchanged(repo) revsprepull = len(repo) origpostincoming = commands.postincoming def _dummy(*args, **kwargs): pass commands.postincoming = _dummy repo.lfpullsource = source if not source: source = 'default' try: result = commands.pull(ui, repo, source, **opts) finally: commands.postincoming = origpostincoming revspostpull = len(repo) if revspostpull > revsprepull: result = result or rebase.rebase(ui, repo) finally: repo._isrebasing = False else: repo.lfpullsource = source if not source: source = 'default' oldheads = lfutil.getcurrentheads(repo) result = orig(ui, repo, source, **opts) # If we do not have the new largefiles for any new heads we pulled, we # will run into a problem later if we try to merge or rebase with one of # these heads, so cache the largefiles now direclty into the system # cache. ui.status(_("caching new largefiles\n")) numcached = 0 heads = lfutil.getcurrentheads(repo) newheads = set(heads).difference(set(oldheads)) for head in newheads: (cached, missing) = lfcommands.cachelfiles(ui, repo, head) numcached += len(cached) ui.status(_("%d largefiles cached\n") % numcached) return result
def uncommit(ui, repo, *pats, **opts): """uncommit part or all of a local changeset This command undoes the effect of a local commit, returning the affected files to their uncommitted state. This means that files modified or deleted in the changeset will be left unchanged, and so will remain modified in the working directory. """ with repo.wlock(), repo.lock(): wctx = repo[None] if not pats and not repo.ui.configbool('experimental', 'uncommitondirtywdir'): cmdutil.bailifchanged(repo) if wctx.parents()[0].node() == node.nullid: raise error.Abort(_("cannot uncommit null changeset")) if len(wctx.parents()) > 1: raise error.Abort(_("cannot uncommit while merging")) old = repo['.'] if not old.mutable(): raise error.Abort(_('cannot uncommit public changesets')) if len(old.parents()) > 1: raise error.Abort(_("cannot uncommit merge changeset")) allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt) if not allowunstable and old.children(): raise error.Abort(_('cannot uncommit changeset with children')) with repo.transaction('uncommit'): match = scmutil.match(old, pats, opts) newid = _commitfiltered(repo, old, match, opts.get('keep')) if newid is None: ui.status(_("nothing to uncommit\n")) return 1 mapping = {} if newid != old.p1().node(): # Move local changes on filtered changeset mapping[old.node()] = (newid, ) else: # Fully removed the old commit mapping[old.node()] = () scmutil.cleanupnodes(repo, mapping, 'uncommit') with repo.dirstate.parentchange(): repo.dirstate.setparents(newid, node.nullid) _uncommitdirstate(repo, old, match)
def override_pull(orig, ui, repo, source=None, **opts): if opts.get("rebase", False): repo._isrebasing = True try: if opts.get("update"): del opts["update"] ui.debug("--update and --rebase are not compatible, ignoring " "the update flag\n") del opts["rebase"] cmdutil.bailifchanged(repo) revsprepull = len(repo) origpostincoming = commands.postincoming def _dummy(*args, **kwargs): pass commands.postincoming = _dummy repo.lfpullsource = source if not source: source = "default" try: result = commands.pull(ui, repo, source, **opts) finally: commands.postincoming = origpostincoming revspostpull = len(repo) if revspostpull > revsprepull: result = result or rebase.rebase(ui, repo) finally: repo._isrebasing = False else: repo.lfpullsource = source if not source: source = "default" oldheads = lfutil.getcurrentheads(repo) result = orig(ui, repo, source, **opts) # If we do not have the new largefiles for any new heads we pulled, we # will run into a problem later if we try to merge or rebase with one of # these heads, so cache the largefiles now direclty into the system # cache. ui.status(_("caching new largefiles\n")) numcached = 0 heads = lfutil.getcurrentheads(repo) newheads = set(heads).difference(set(oldheads)) for head in newheads: (cached, missing) = lfcommands.cachelfiles(ui, repo, head) numcached += len(cached) ui.status(_("%d largefiles cached\n") % numcached) return result
def overridepull(orig, ui, repo, source=None, **opts): revsprepull = len(repo) if not source: source = 'default' repo.lfpullsource = source if opts.get('rebase', False): repo._isrebasing = True try: if opts.get('update'): del opts['update'] ui.debug('--update and --rebase are not compatible, ignoring ' 'the update flag\n') del opts['rebase'] cmdutil.bailifchanged(repo) origpostincoming = commands.postincoming def _dummy(*args, **kwargs): pass commands.postincoming = _dummy try: result = commands.pull(ui, repo, source, **opts) finally: commands.postincoming = origpostincoming revspostpull = len(repo) if revspostpull > revsprepull: result = result or rebase.rebase(ui, repo) finally: repo._isrebasing = False else: result = orig(ui, repo, source, **opts) revspostpull = len(repo) lfrevs = opts.get('lfrev', []) if opts.get('all_largefiles'): lfrevs.append('pulled()') if lfrevs and revspostpull > revsprepull: numcached = 0 repo.firstpulled = revsprepull # for pulled() revset expression try: for rev in scmutil.revrange(repo, lfrevs): ui.note(_('pulling largefiles for revision %s\n') % rev) (cached, missing) = lfcommands.cachelfiles(ui, repo, rev) numcached += len(cached) finally: del repo.firstpulled ui.status(_("%d largefiles cached\n") % numcached) return result
def override_pull(orig, ui, repo, source=None, **opts): if opts.get('rebase', False): repo._isrebasing = True try: if opts.get('update'): del opts['update'] ui.debug('--update and --rebase are not compatible, ignoring ' 'the update flag\n') del opts['rebase'] cmdutil.bailifchanged(repo) revsprepull = len(repo) origpostincoming = commands.postincoming def _dummy(*args, **kwargs): pass commands.postincoming = _dummy repo.lfpullsource = source if not source: source = 'default' try: result = commands.pull(ui, repo, source, **opts) finally: commands.postincoming = origpostincoming revspostpull = len(repo) if revspostpull > revsprepull: result = result or rebase.rebase(ui, repo) finally: repo._isrebasing = False else: repo.lfpullsource = source if not source: source = 'default' result = orig(ui, repo, source, **opts) # If we do not have the new largefiles for any new heads we pulled, we # will run into a problem later if we try to merge or rebase with one of # these heads, so cache the largefiles now direclty into the system # cache. ui.status(_("caching new largefiles\n")) numcached = 0 branches = repo.branchmap() for branch in branches: heads = repo.branchheads(branch) for head in heads: (cached, missing) = lfcommands.cachelfiles(ui, repo, head) numcached += len(cached) ui.status(_("%d largefiles cached\n" % numcached)) return result
def uncommit(ui, repo, *pats, **opts): """uncommit part or all of a local changeset This command undoes the effect of a local commit, returning the affected files to their uncommitted state. This means that files modified or deleted in the changeset will be left unchanged, and so will remain modified in the working directory. If no files are specified, the commit will be pruned, unless --keep is given. """ opts = pycompat.byteskwargs(opts) with repo.wlock(), repo.lock(): if not pats and not repo.ui.configbool('experimental', 'uncommitondirtywdir'): cmdutil.bailifchanged(repo) old = repo['.'] rewriteutil.precheck(repo, [old.rev()], 'uncommit') if len(old.parents()) > 1: raise error.Abort(_("cannot uncommit merge changeset")) with repo.transaction('uncommit'): match = scmutil.match(old, pats, opts) keepcommit = opts.get('keep') or pats newid = _commitfiltered(repo, old, match, keepcommit) if newid is None: ui.status(_("nothing to uncommit\n")) return 1 mapping = {} if newid != old.p1().node(): # Move local changes on filtered changeset mapping[old.node()] = (newid, ) else: # Fully removed the old commit mapping[old.node()] = () scmutil.cleanupnodes(repo, mapping, 'uncommit') with repo.dirstate.parentchange(): repo.dirstate.setparents(newid, node.nullid) s = repo.status(old.p1(), old, match=match) _fixdirstate(repo, old, repo[newid], s)
def pullrebase(orig, ui, repo, *args, **opts): 'Call rebase after pull if the latter has been invoked with --rebase' if opts.get('rebase'): if opts.get('update'): del opts['update'] ui.debug('--update and --rebase are not compatible, ignoring ' 'the update flag\n') movemarkfrom = repo['.'].node() cmdutil.bailifchanged(repo) revsprepull = len(repo) origpostincoming = commands.postincoming def _dummy(*args, **kwargs): pass commands.postincoming = _dummy try: orig(ui, repo, *args, **opts) finally: commands.postincoming = origpostincoming revspostpull = len(repo) if revspostpull > revsprepull: # --rev option from pull conflict with rebase own --rev # dropping it if 'rev' in opts: del opts['rev'] rebase(ui, repo, **opts) branch = repo[None].branch() dest = repo[branch].rev() if dest != repo['.'].rev(): # there was nothing to rebase we force an update hg.update(repo, dest) if bookmarks.update(repo, [movemarkfrom], repo['.'].node()): ui.status( _("updating bookmark %s\n") % repo._bookmarkcurrent) else: if opts.get('tool'): raise util.Abort(_('--tool can only be used with --rebase')) orig(ui, repo, *args, **opts)
def pullrebase(orig, ui, repo, *args, **opts): 'Call rebase after pull if the latter has been invoked with --rebase' if opts.get('rebase'): if opts.get('update'): del opts['update'] ui.debug('--update and --rebase are not compatible, ignoring ' 'the update flag\n') movemarkfrom = repo['.'].node() cmdutil.bailifchanged(repo) revsprepull = len(repo) origpostincoming = commands.postincoming def _dummy(*args, **kwargs): pass commands.postincoming = _dummy try: orig(ui, repo, *args, **opts) finally: commands.postincoming = origpostincoming revspostpull = len(repo) if revspostpull > revsprepull: # --rev option from pull conflict with rebase own --rev # dropping it if 'rev' in opts: del opts['rev'] rebase(ui, repo, **opts) branch = repo[None].branch() dest = repo[branch].rev() if dest != repo['.'].rev(): # there was nothing to rebase we force an update hg.update(repo, dest) if bookmarks.update(repo, [movemarkfrom], repo['.'].node()): ui.status(_("updating bookmark %s\n") % repo._bookmarkcurrent) else: if opts.get('tool'): raise util.Abort(_('--tool can only be used with --rebase')) orig(ui, repo, *args, **opts)
def pullrebaseif(orig, ui, repo, *args, **opts): '''Call rebaseif after pull if the latter has been invoked with --rebaseif''' # this function is taken in verbatim from rebase extension, with rebase replaced with rebaseif if opts.get('rebaseif'): if opts.get('update'): del opts['update'] ui.debug( _('--update and --rebaseif are not compatible, ignoring the update flag\n' )) try: cmdutil.bailifchanged(repo) # 1.9 except AttributeError: cmdutil.bail_if_changed(repo) # < 1.9 revsprepull = len(repo) origpostincoming = commands.postincoming def _dummy(*args, **kwargs): pass commands.postincoming = _dummy try: orig(ui, repo, *args, **opts) finally: commands.postincoming = origpostincoming revspostpull = len(repo) if revspostpull > revsprepull: rebaseif(ui, repo, **opts) branch = repo[None].branch() dest = repo[branch].rev() if dest != repo['.'].rev(): # there was nothing to rebase we force an update hg.update(repo, dest) else: orig(ui, repo, *args, **opts)
def _histedit(ui, repo, *freeargs, **opts): # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) if mq and mq.applied: raise util.Abort(_('source has mq patches applied')) # basic argument incompatibility processing outg = opts.get('outgoing') cont = opts.get('continue') abort = opts.get('abort') force = opts.get('force') rules = opts.get('commands', '') revs = opts.get('rev', []) goal = 'new' # This invocation goal, in new, continue, abort if force and not outg: raise util.Abort(_('--force only allowed with --outgoing')) if cont: if util.any((outg, abort, revs, freeargs, rules)): raise util.Abort(_('no arguments allowed with --continue')) goal = 'continue' elif abort: if util.any((outg, revs, freeargs, rules)): raise util.Abort(_('no arguments allowed with --abort')) goal = 'abort' else: if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise util.Abort(_('history edit already in progress, try ' '--continue or --abort')) if outg: if revs: raise util.Abort(_('no revisions allowed with --outgoing')) if len(freeargs) > 1: raise util.Abort( _('only one repo argument allowed with --outgoing')) else: revs.extend(freeargs) if len(revs) != 1: raise util.Abort( _('histedit requires exactly one ancestor revision')) if goal == 'continue': (parentctxnode, rules, keep, topmost, replacements) = readstate(repo) parentctx = repo[parentctxnode] parentctx, repl = bootstrapcontinue(ui, repo, parentctx, rules, opts) replacements.extend(repl) elif goal == 'abort': (parentctxnode, rules, keep, topmost, replacements) = readstate(repo) mapping, tmpnodes, leafs, _ntm = processreplacement(repo, replacements) ui.debug('restore wc to old parent %s\n' % node.short(topmost)) # check whether we should update away parentnodes = [c.node() for c in repo[None].parents()] for n in leafs | set([parentctxnode]): if n in parentnodes: hg.clean(repo, topmost) break else: pass cleanupnode(ui, repo, 'created', tmpnodes) cleanupnode(ui, repo, 'temp', leafs) os.unlink(os.path.join(repo.path, 'histedit-state')) return else: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) topmost, empty = repo.dirstate.parents() if outg: if freeargs: remote = freeargs[0] else: remote = None root = findoutgoing(ui, repo, remote, force, opts) else: rootrevs = list(repo.set('roots(%lr)', revs)) if len(rootrevs) != 1: raise util.Abort(_('The specified revisions must have ' 'exactly one common root')) root = rootrevs[0].node() keep = opts.get('keep', False) revs = between(repo, root, topmost, keep) if not revs: raise util.Abort(_('%s is not an ancestor of working directory') % node.short(root)) ctxs = [repo[r] for r in revs] if not rules: rules = '\n'.join([makedesc(c) for c in ctxs]) rules += '\n\n' rules += editcomment % (node.short(root), node.short(topmost)) rules = ui.edit(rules, ui.username()) # Save edit rules in .hg/histedit-last-edit.txt in case # the user needs to ask for help after something # surprising happens. f = open(repo.join('histedit-last-edit.txt'), 'w') f.write(rules) f.close() else: if rules == '-': f = sys.stdin else: f = open(rules) rules = f.read() f.close() rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l[0] == '#'] rules = verifyrules(rules, repo, ctxs) parentctx = repo[root].parents()[0] keep = opts.get('keep', False) replacements = [] while rules: writestate(repo, parentctx.node(), rules, keep, topmost, replacements) action, ha = rules.pop(0) ui.debug('histedit: processing %s %s\n' % (action, ha)) actfunc = actiontable[action] parentctx, replacement_ = actfunc(ui, repo, parentctx, ha, opts) replacements.extend(replacement_) hg.update(repo, parentctx.node()) mapping, tmpnodes, created, ntm = processreplacement(repo, replacements) if mapping: for prec, succs in mapping.iteritems(): if not succs: ui.debug('histedit: %s is dropped\n' % node.short(prec)) else: ui.debug('histedit: %s is replaced by %s\n' % ( node.short(prec), node.short(succs[0]))) if len(succs) > 1: m = 'histedit: %s' for n in succs[1:]: ui.debug(m % node.short(n)) if not keep: if mapping: movebookmarks(ui, repo, mapping, topmost, ntm) # TODO update mq state if obsolete._enabled: markers = [] # sort by revision number because it sound "right" for prec in sorted(mapping, key=repo.changelog.rev): succs = mapping[prec] markers.append((repo[prec], tuple(repo[s] for s in succs))) if markers: obsolete.createmarkers(repo, markers) else: cleanupnode(ui, repo, 'replaced', mapping) cleanupnode(ui, repo, 'temp', tmpnodes) os.unlink(os.path.join(repo.path, 'histedit-state')) if os.path.exists(repo.sjoin('undo')): os.unlink(repo.sjoin('undo'))
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history (the source) onto another (the destination). This can be useful for linearizing *local* changes relative to a master development tree. You should not rebase changesets that have already been shared with others. Doing so will force everybody else to perform the same rebase or they will end up with duplicated changesets after pulling in your rebased changesets. In its default configuration, Mercurial will prevent you from rebasing published changes. See :hg:`help phases` for details. If you don't specify a destination changeset (``-d/--dest``), rebase uses the current branch tip as the destination. (The destination changeset is not modified by rebasing, but new changesets are added as its descendants.) You can specify which changesets to rebase in two ways: as a "source" changeset or as a "base" changeset. Both are shorthand for a topologically related set of changesets (the "source branch"). If you specify source (``-s/--source``), rebase will rebase that changeset and all of its descendants onto dest. If you specify base (``-b/--base``), rebase will select ancestors of base back to but not including the common ancestor with dest. Thus, ``-b`` is less precise but more convenient than ``-s``: you can specify any changeset in the source branch, and rebase will select the whole branch. If you specify neither ``-s`` nor ``-b``, rebase uses the parent of the working directory as the base. For advanced usage, a third way is available through the ``--rev`` option. It allows you to specify an arbitrary set of changesets to rebase. Descendants of revs you specify with this option are not automatically included in the rebase. By default, rebase recreates the changesets in the source branch as descendants of dest and then destroys the originals. Use ``--keep`` to preserve the original source changesets. Some changesets in the source branch (e.g. merges from the destination branch) may be dropped if they no longer contribute any change. One result of the rules for selecting the destination changeset and source branch is that, unlike ``merge``, rebase will do nothing if you are at the branch tip of a named branch with two heads. You need to explicitly specify source and/or destination (or ``update`` to the other head, if it's the head of the intended source branch). If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. Returns 0 on success, 1 if nothing to rebase or there are unresolved conflicts. """ originalwd = target = None activebookmark = None external = nullrev state = {} skipped = set() targetancestors = set() editor = None if opts.get('edit'): editor = cmdutil.commitforceeditor lock = wlock = None try: wlock = repo.wlock() lock = repo.lock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) revf = opts.get('rev', []) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) collapsemsg = cmdutil.logmessage(ui, opts) e = opts.get('extrafn') # internal, used by e.g. hgsubversion extrafns = [_savegraft] if e: extrafns = [e] keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) # keepopen is not meant for use on the command line, but by # other extensions keepopen = opts.get('keepopen', False) if collapsemsg and not collapsef: raise util.Abort( _('message can only be specified with collapse')) if contf or abortf: if contf and abortf: raise util.Abort(_('cannot use both abort and continue')) if collapsef: raise util.Abort( _('cannot use collapse with continue or abort')) if srcf or basef or destf: raise util.Abort( _('abort and continue do not allow specifying revisions')) if opts.get('tool', False): ui.warn(_('tool option will be ignored\n')) try: (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external, activebookmark) = restorestatus(repo) except error.RepoLookupError: if abortf: clearstatus(repo) repo.ui.warn(_('rebase aborted (no revision is removed,' ' only broken state is cleared)\n')) return 0 else: msg = _('cannot continue inconsistent rebase') hint = _('use "hg rebase --abort" to clear borken state') raise util.Abort(msg, hint=hint) if abortf: return abort(repo, originalwd, target, state) else: if srcf and basef: raise util.Abort(_('cannot specify both a ' 'source and a base')) if revf and basef: raise util.Abort(_('cannot specify both a ' 'revision and a base')) if revf and srcf: raise util.Abort(_('cannot specify both a ' 'revision and a source')) cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) if not destf: # Destination defaults to the latest revision in the # current branch branch = repo[None].branch() dest = repo[branch] else: dest = scmutil.revsingle(repo, destf) if revf: rebaseset = scmutil.revrange(repo, revf) elif srcf: src = scmutil.revrange(repo, [srcf]) rebaseset = repo.revs('(%ld)::', src) else: base = scmutil.revrange(repo, [basef or '.']) rebaseset = repo.revs( '(children(ancestor(%ld, %d)) and ::(%ld))::', base, dest, base) if rebaseset: root = min(rebaseset) else: root = None if not rebaseset: repo.ui.debug('base is ancestor of destination\n') result = None elif (not (keepf or obsolete._enabled) and repo.revs('first(children(%ld) - %ld)', rebaseset, rebaseset)): raise util.Abort( _("can't remove original changesets with" " unrebased descendants"), hint=_('use --keep to keep original changesets')) else: result = buildstate(repo, dest, rebaseset, collapsef) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return 1 elif not keepf and not repo[root].mutable(): raise util.Abort(_("can't rebase immutable changeset %s") % repo[root], hint=_('see hg help phases for details')) else: originalwd, target, state = result if collapsef: targetancestors = repo.changelog.ancestors([target], inclusive=True) external = externalparent(repo, state, targetancestors) if keepbranchesf: # insert _savebranch at the start of extrafns so if # there's a user-provided extrafn it can clobber branch if # desired extrafns.insert(0, _savebranch) if collapsef: branches = set() for rev in state: branches.add(repo[rev].branch()) if len(branches) > 1: raise util.Abort(_('cannot collapse multiple named ' 'branches')) # Rebase if not targetancestors: targetancestors = repo.changelog.ancestors([target], inclusive=True) # Keep track of the current bookmarks in order to reset them later currentbookmarks = repo._bookmarks.copy() activebookmark = activebookmark or repo._bookmarkcurrent if activebookmark: bookmarks.unsetcurrent(repo) extrafn = _makeextrafn(extrafns) sortedstate = sorted(state) total = len(sortedstate) pos = 0 for rev in sortedstate: pos += 1 if state[rev] == -1: ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])), _('changesets'), total) p1, p2 = defineparents(repo, rev, target, state, targetancestors) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external, activebookmark) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: try: ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) stats = rebasenode(repo, rev, p1, state, collapsef) if stats and stats[3] > 0: raise error.InterventionRequired( _('unresolved conflicts (see hg ' 'resolve, then hg rebase --continue)')) finally: ui.setconfig('ui', 'forcemerge', '') cmdutil.duplicatecopies(repo, rev, target) if not collapsef: newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn, editor=editor) else: # Skip commit if we are collapsing repo.setparents(repo[p1].node()) newrev = None # Update the state if newrev is not None: state[rev] = repo[newrev].rev() else: if not collapsef: ui.note(_('no changes, revision %d skipped\n') % rev) ui.debug('next revision set to %s\n' % p1) skipped.add(rev) state[rev] = p1 ui.progress(_('rebasing'), None) ui.note(_('rebase merging completed\n')) if collapsef and not keepopen: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) if collapsemsg: commitmsg = collapsemsg else: commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] > nullmerge: commitmsg += '\n* %s' % repo[rebased].description() commitmsg = ui.edit(commitmsg, repo.ui.username()) newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn, editor=editor) for oldrev in state.iterkeys(): if state[oldrev] > nullmerge: state[oldrev] = newrev if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if currentbookmarks: # Nodeids are needed to reset bookmarks nstate = {} for k, v in state.iteritems(): if v > nullmerge: nstate[repo[k].node()] = repo[v].node() # XXX this is the same as dest.node() for the non-continue path -- # this should probably be cleaned up targetnode = repo[target].node() # restore original working directory # (we do this before stripping) newwd = state.get(originalwd, originalwd) if newwd not in [c.rev() for c in repo[None].parents()]: ui.note(_("update back to initial working directory parent\n")) hg.updaterepo(repo, newwd, False) if not keepf: collapsedas = None if collapsef: collapsedas = newrev clearrebased(ui, repo, state, skipped, collapsedas) if currentbookmarks: updatebookmarks(repo, targetnode, nstate, currentbookmarks) clearstatus(repo) ui.note(_("rebase completed\n")) util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) if (activebookmark and repo['.'].node() == repo._bookmarks[activebookmark]): bookmarks.setcurrent(repo, activebookmark) finally: release(lock, wlock)
def split(ui, repo, *revs, **opts): """split a changeset into smaller changesets By default, split the current revision by prompting for all its hunks to be redistributed into new changesets. Use --rev to split a given changeset instead. """ tr = wlock = lock = None newcommits = [] revarg = (list(revs) + opts.get('rev')) or ['.'] if len(revarg) != 1: msg = _("more than one revset is given") hnt = _("use either `hg split <rs>` or `hg split --rev <rs>`, not both") raise error.Abort(msg, hint=hnt) rev = scmutil.revsingle(repo, revarg[0]) if opts.get('no_rebase'): torebase = () else: torebase = repo.revs('descendants(%d) - (%d)', rev, rev) try: wlock = repo.wlock() lock = repo.lock() cmdutil.bailifchanged(repo) if torebase: cmdutil.checkunfinished(repo) tr = repo.transaction('split') ctx = repo[rev] r = ctx.rev() disallowunstable = not obsolete.isenabled(repo, obsolete.allowunstableopt) if disallowunstable: # XXX We should check head revs if repo.revs("(%d::) - %d", rev, rev): raise error.Abort(_("cannot split commit: %s not a head") % ctx) if len(ctx.parents()) > 1: raise error.Abort(_("cannot split merge commits")) prev = ctx.p1() bmupdate = common.bookmarksupdater(repo, ctx.node(), tr) bookactive = repo._activebookmark if bookactive is not None: repo.ui.status(_("(leaving bookmark %s)\n") % repo._activebookmark) bookmarks.deactivate(repo) hg.update(repo, prev) commands.revert(ui, repo, rev=r, all=True) def haschanges(): modified, added, removed, deleted = repo.status()[:4] return modified or added or removed or deleted msg = ("HG: This is the original pre-split commit message. " "Edit it as appropriate.\n\n") msg += ctx.description() opts['message'] = msg opts['edit'] = True while haschanges(): pats = () cmdutil.dorecord(ui, repo, commands.commit, 'commit', False, cmdutil.recordfilter, *pats, **opts) # TODO: Does no seem like the best way to do this # We should make dorecord return the newly created commit newcommits.append(repo['.']) if haschanges(): if ui.prompt('Done splitting? [yN]', default='n') == 'y': commands.commit(ui, repo, **opts) newcommits.append(repo['.']) break else: ui.status(_("no more change to split\n")) if newcommits: tip = repo[newcommits[-1]] bmupdate(tip.node()) if bookactive is not None: bookmarks.activate(repo, bookactive) obsolete.createmarkers(repo, [(repo[r], newcommits)], operation='split') if torebase: top = repo.revs('allsuccessors(%d)', rev).last() common.restackonce(ui, repo, top) tr.close() finally: lockmod.release(tr, lock, wlock)
def replacechangesets(repo, oldnodes, createfn, backuptopic='replacing'): """Replace changesets with new versions. This is a generic function used to perform history rewriting. Given an iterable of input nodes, a function will be called which is expected to produce a new changeset to replace the input node. The function signature should be: def createfn(repo, ctx, revmap, copyfilectxfn): It is passed a repo, the changectx being rewritten, a map of old to new revisions that have been changed so far, and a function that can be used as the memctx callback for obtaining memfilectx when no file modifications are to be performed (a common pattern). The function should return an *uncommitted* memctx holding the new changeset info. We currently restrict that the createfn callback must return a new changeset and that no file changes may occur. Restricting file changes satisfies the requirements this function was invented for and keeps the implementation simple. After the memctx is obtained, it is committed. Children changesets are rebased automatically after all changesets have been rewritten. After the old to new mapping is obtained, bookmarks are moved and old changesets are made obsolete or stripped, depending on what is appropriate for the repo configuration. This function handles locking the repository and performing as many actions in a transaction as possible. Before any changes are made, we verify the state of the repo is sufficient for transformation to occur and abort otherwise. """ if not oldnodes: return {} repo = repo.unfiltered() # Validate function called properly. for node in oldnodes: if len(node) != 20: raise util.Abort('replacechangesets expects 20 byte nodes') uoldrevs = [repo[node].rev() for node in oldnodes] oldrevs = sorted(uoldrevs) if oldrevs != uoldrevs: raise util.Abort('must pass oldnodes in changelog order') # We may perform stripping and stripping inside a nested transaction # is a recipe for disaster. # currenttransaction was added in 3.3. Copy the implementation until we # drop 3.2 compatibility. if hasattr(repo, 'currenttransaction'): intrans = repo.currenttransaction() else: if repo._transref and repo._transref().running(): intrans = True else: intrans = False if intrans: raise util.Abort('cannot call replacechangesets when a transaction ' 'is active') # The revisions impacted by the current operation. This is essentially # all non-hidden children. We don't operate on hidden changesets because # there is no point - they are hidden and deemed not important. impactedrevs = list(repo.filtered('visible').revs('%ld::', oldrevs)) # If we'll need to update the working directory, don't do anything if there # are uncommitted changes, as this could cause a giant mess (merge # conflicts, etc). Note the comparison against impacted revs, as children # of rewritten changesets will be rebased below. dirstaterev = repo[repo.dirstate.p1()].rev() if dirstaterev in impactedrevs: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) obsenabled = False if hasattr(obsolete, 'isenabled'): obsenabled = obsolete.isenabled(repo, 'createmarkers') else: obsenabled = obsolete._enabled def adjustphase(repo, tr, phase, node): # transaction argument added in Mercurial 3.2. try: phases.advanceboundary(repo, tr, phase, [node]) phases.retractboundary(repo, tr, phase, [node]) except TypeError: phases.advanceboundary(repo, phase, [node]) phases.retractboundary(repo, phase, [node]) nodemap = {} wlock, lock, tr = None, None, None try: wlock = repo.wlock() lock = repo.lock() tr = repo.transaction('replacechangesets') # Create the new changesets. revmap = OrderedDict() for oldnode in oldnodes: oldctx = repo[oldnode] # Copy revmap out of paranoia. newctx = createfn(repo, oldctx, dict(revmap), preservefilectx(oldctx)) if not isinstance(newctx, context.memctx): raise util.Abort('createfn must return a context.memctx') if oldctx == newctx: raise util.Abort('createfn must create a new changeset') newnode = newctx.commit() # Needed so .manifestnode() works, which memctx doesn't have. newctx = repo[newnode] # This makes the implementation significantly simpler as we don't # need to worry about merges when we do auto rebasing later. if oldctx.manifestnode() != newctx.manifestnode(): raise util.Abort('we do not allow replacements to modify files') revmap[oldctx.rev()] = newctx.rev() nodemap[oldnode] = newnode # Do phase adjustment ourselves because we want callbacks to be as # dumb as possible. adjustphase(repo, tr, oldctx.phase(), newctx.node()) # Children of rewritten changesets are impacted as well. Rebase as # needed. for rev in impactedrevs: # It was handled by createfn() or by this loop already. if rev in revmap: continue oldctx = repo[rev] if oldctx.p1().rev() not in revmap: raise util.Abort('unknown parent of child commit: %s' % oldctx.hex(), hint='please report this as a bug') parents = newparents(repo, oldctx, revmap) mctx = context.memctx(repo, parents, oldctx.description(), oldctx.files(), preservefilectx(oldctx), user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) status = oldctx.p1().status(oldctx) mctx.modified = lambda: status[0] mctx.added = lambda: status[1] mctx.removed = lambda: status[2] newnode = mctx.commit() revmap[rev] = repo[newnode].rev() nodemap[oldctx.node()] = newnode # Retain phase. adjustphase(repo, tr, oldctx.phase(), newnode) ph = repo.ui.config('phases', 'new-commit') try: repo.ui.setconfig('phases', 'new-commit', oldctx.phase(), 'rewriting') newnode = mctx.commit() revmap[rev] = repo[newnode].rev() finally: repo.ui.setconfig('phases', 'new-commit', ph) # Move bookmarks to new nodes. bmchanges = [] oldactivebookmark = activebookmark(repo) for oldrev, newrev in revmap.items(): oldnode = repo[oldrev].node() for mark, bmnode in repo._bookmarks.items(): if bmnode == oldnode: bmchanges.append((mark, repo[newrev].node())) for mark, newnode in bmchanges: repo._bookmarks[mark] = newnode if bmchanges: repo._bookmarks.recordchange(tr) # Update references to rewritten MQ patches. if hasattr(repo, 'mq'): q = repo.mq for e in q.applied: if e.node in nodemap: e.node = nodemap[e.node] q.applieddirty = True # This no-ops if nothing is dirty. q.savedirty() # If obsolescence is enabled, obsolete the old changesets. if obsenabled: markers = [] for oldrev, newrev in revmap.items(): markers.append((repo[oldrev], (repo[newrev],))) obsolete.createmarkers(repo, markers) # Move the working directory to the new node, if applicable. wdirrev = repo['.'].rev() if wdirrev in revmap: hg.updaterepo(repo, repo[revmap[wdirrev]].node(), True) # The active bookmark is tracked by its symbolic name, not its # changeset. Since we didn't do anything that should change the # active bookmark, we shouldn't need to adjust it. if activebookmark(repo) != oldactivebookmark: raise util.Abort('active bookmark changed; ' 'this should not occur!', hint='please file a bug') tr.close() # Unless obsolescence is enabled, strip the old changesets. if not obsenabled: stripnodes = [repo[rev].node() for rev in revmap.keys()] repair.strip(repo.ui, repo, stripnodes, topic=backuptopic) finally: if tr: tr.release() lockmod.release(wlock, lock) return nodemap
def push(repo, dest, force, revs): """push revisions starting at a specified head back to Subversion. """ assert not revs, 'designated revisions for push remains unimplemented.' if hasattr(cmdutil, 'bail_if_changed'): cmdutil.bail_if_changed(repo) else: # Since 1.9 (d68ddccf276b) cmdutil.bailifchanged(repo) checkpush = getattr(repo, 'checkpush', None) if checkpush: checkpush(force, revs) ui = repo.ui old_encoding = util.swap_out_encoding() # TODO: implement --rev/#rev support # TODO: do credentials specified in the URL still work? svnurl = repo.ui.expandpath(dest.svnurl) svn = dest.svn meta = repo.svnmeta(svn.uuid, svn.subdir) # Strategy: # 1. Find all outgoing commits from this head if len(repo.parents()) != 1: ui.status('Cowardly refusing to push branch merge\n') return 0 # results in nonzero exit status, see hg's commands.py workingrev = repo.parents()[0] ui.status('searching for changes\n') hashes = meta.revmap.hashes() outgoing = util.outgoing_revisions(repo, hashes, workingrev.node()) if not (outgoing and len(outgoing)): ui.status('no changes found\n') return 1 # so we get a sane exit status, see hg's commands.push while outgoing: # 2. Commit oldest revision that needs to be pushed oldest = outgoing.pop(-1) old_ctx = repo[oldest] old_pars = old_ctx.parents() if len(old_pars) != 1: ui.status('Found a branch merge, this needs discussion and ' 'implementation.\n') return 0 # results in nonzero exit status, see hg's commands.py # We will commit to svn against this node's parent rev. Any file-level # conflicts here will result in an error reported by svn. base_ctx = old_pars[0] base_revision = hashes[base_ctx.node()][0] svnbranch = base_ctx.branch() # Find most recent svn commit we have on this branch. # This node will become the nearest known ancestor of the pushed rev. oldtipctx = base_ctx old_children = oldtipctx.descendants() seen = set(c.node() for c in old_children) samebranchchildren = [c for c in old_children if c.branch() == svnbranch and c.node() in hashes] if samebranchchildren: # The following relies on descendants being sorted by rev. oldtipctx = samebranchchildren[-1] # All set, so commit now. try: pushmod.commit(ui, repo, old_ctx, meta, base_revision, svn) except pushmod.NoFilesException: ui.warn("Could not push revision %s because it had no changes in svn.\n" % old_ctx) return 1 # 3. Fetch revisions from svn # TODO: this probably should pass in the source explicitly - rev too? r = repo.pull(dest, force=force) assert not r or r == 0 # 4. Find the new head of the target branch # We expect to get our own new commit back, but we might also get other # commits that happened since our last pull, or even right after our own # commit (race). for c in oldtipctx.descendants(): if c.node() not in seen and c.branch() == svnbranch: newtipctx = c # 5. Rebase all children of the currently-pushing rev to the new head heads = repo.heads(old_ctx.node()) for needs_transplant in heads: def extrafn(ctx, extra): if ctx.node() == oldest: return extra['branch'] = ctx.branch() # TODO: can we avoid calling our own rebase wrapper here? rebase(hgrebase.rebase, ui, repo, svn=True, svnextrafn=extrafn, svnsourcerev=needs_transplant) # Reload the repo after the rebase. Do not reuse contexts across this. newtip = newtipctx.node() repo = hg.repository(ui, meta.path) newtipctx = repo[newtip] # Rewrite the node ids in outgoing to their rebased versions. rebasemap = dict() for child in newtipctx.descendants(): rebasesrc = child.extra().get('rebase_source') if rebasesrc: rebasemap[node.bin(rebasesrc)] = child.node() outgoing = [rebasemap.get(n) or n for n in outgoing] # TODO: stop constantly creating the SVNMeta instances. meta = repo.svnmeta(svn.uuid, svn.subdir) hashes = meta.revmap.hashes() util.swap_out_encoding(old_encoding) return 1 # so we get a sane exit status, see hg's commands.push
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history (the source) onto another (the destination). This can be useful for linearizing *local* changes relative to a master development tree. You should not rebase changesets that have already been shared with others. Doing so will force everybody else to perform the same rebase or they will end up with duplicated changesets after pulling in your rebased changesets. If you don't specify a destination changeset (``-d/--dest``), rebase uses the tipmost head of the current named branch as the destination. (The destination changeset is not modified by rebasing, but new changesets are added as its descendants.) You can specify which changesets to rebase in two ways: as a "source" changeset or as a "base" changeset. Both are shorthand for a topologically related set of changesets (the "source branch"). If you specify source (``-s/--source``), rebase will rebase that changeset and all of its descendants onto dest. If you specify base (``-b/--base``), rebase will select ancestors of base back to but not including the common ancestor with dest. Thus, ``-b`` is less precise but more convenient than ``-s``: you can specify any changeset in the source branch, and rebase will select the whole branch. If you specify neither ``-s`` nor ``-b``, rebase uses the parent of the working directory as the base. By default, rebase recreates the changesets in the source branch as descendants of dest and then destroys the originals. Use ``--keep`` to preserve the original source changesets. Some changesets in the source branch (e.g. merges from the destination branch) may be dropped if they no longer contribute any change. One result of the rules for selecting the destination changeset and source branch is that, unlike ``merge``, rebase will do nothing if you are at the latest (tipmost) head of a named branch with two heads. You need to explicitly specify source and/or destination (or ``update`` to the other head, if it's the head of the intended source branch). If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. Returns 0 on success, 1 if nothing to rebase. """ originalwd = target = None external = nullrev state = {} skipped = set() targetancestors = set() lock = wlock = None try: lock = repo.lock() wlock = repo.wlock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) collapsemsg = cmdutil.logmessage(ui, opts) extrafn = opts.get('extrafn') # internal, used by e.g. hgsubversion keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) detachf = opts.get('detach', False) # keepopen is not meant for use on the command line, but by # other extensions keepopen = opts.get('keepopen', False) if collapsemsg and not collapsef: raise util.Abort( _('message can only be specified with collapse')) if contf or abortf: if contf and abortf: raise util.Abort(_('cannot use both abort and continue')) if collapsef: raise util.Abort( _('cannot use collapse with continue or abort')) if detachf: raise util.Abort(_('cannot use detach with continue or abort')) if srcf or basef or destf: raise util.Abort( _('abort and continue do not allow specifying revisions')) if opts.get('tool', False): ui.warn(_('tool option will be ignored\n')) (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external) = restorestatus(repo) if abortf: return abort(repo, originalwd, target, state) else: if srcf and basef: raise util.Abort(_('cannot specify both a ' 'revision and a base')) if detachf: if not srcf: raise util.Abort( _('detach requires a revision to be specified')) if basef: raise util.Abort(_('cannot specify a base with detach')) cmdutil.bailifchanged(repo) result = buildstate(repo, destf, srcf, basef, detachf) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return 1 else: originalwd, target, state = result if collapsef: targetancestors = set(repo.changelog.ancestors(target)) external = checkexternal(repo, state, targetancestors) if keepbranchesf: assert not extrafn, 'cannot use both keepbranches and extrafn' def extrafn(ctx, extra): extra['branch'] = ctx.branch() # Rebase if not targetancestors: targetancestors = set(repo.changelog.ancestors(target)) targetancestors.add(target) sortedstate = sorted(state) total = len(sortedstate) pos = 0 for rev in sortedstate: pos += 1 if state[rev] == -1: ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])), _('changesets'), total) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external) p1, p2 = defineparents(repo, rev, target, state, targetancestors) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: try: ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) stats = rebasenode(repo, rev, p1, state) if stats and stats[3] > 0: raise util.Abort(_('unresolved conflicts (see hg ' 'resolve, then hg rebase --continue)')) finally: ui.setconfig('ui', 'forcemerge', '') updatedirstate(repo, rev, target, p2) if not collapsef: newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn) else: # Skip commit if we are collapsing repo.dirstate.setparents(repo[p1].node()) newrev = None # Update the state if newrev is not None: state[rev] = repo[newrev].rev() else: if not collapsef: ui.note(_('no changes, revision %d skipped\n') % rev) ui.debug('next revision set to %s\n' % p1) skipped.add(rev) state[rev] = p1 ui.progress(_('rebasing'), None) ui.note(_('rebase merging completed\n')) if collapsef and not keepopen: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) if collapsemsg: commitmsg = collapsemsg else: commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] != nullmerge: commitmsg += '\n* %s' % repo[rebased].description() commitmsg = ui.edit(commitmsg, repo.ui.username()) newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn) if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if not keepf: # Remove no more useful revisions rebased = [rev for rev in state if state[rev] != nullmerge] if rebased: if set(repo.changelog.descendants(min(rebased))) - set(state): ui.warn(_("warning: new changesets detected " "on source branch, not stripping\n")) else: # backup the old csets by default repair.strip(ui, repo, repo[min(rebased)].node(), "all") clearstatus(repo) ui.note(_("rebase completed\n")) if os.path.exists(repo.sjoin('undo')): util.unlinkpath(repo.sjoin('undo')) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) finally: release(lock, wlock)
def split(ui, repo, *revs, **opts): """split a changeset into smaller ones Repeatedly prompt changes and commit message for new changesets until there is nothing left in the original changeset. If --rev was not given, split the working directory parent. By default, rebase connected non-obsoleted descendants onto the new changeset. Use --no-rebase to avoid the rebase. """ opts = pycompat.byteskwargs(opts) revlist = [] if opts.get(b'rev'): revlist.append(opts.get(b'rev')) revlist.extend(revs) with repo.wlock(), repo.lock(), repo.transaction(b'split') as tr: revs = scmutil.revrange(repo, revlist or [b'.']) if len(revs) > 1: raise error.Abort(_(b'cannot split multiple revisions')) rev = revs.first() ctx = repo[rev] if rev is None or ctx.node() == nullid: ui.status(_(b'nothing to split\n')) return 1 if ctx.node() is None: raise error.Abort(_(b'cannot split working directory')) # rewriteutil.precheck is not very useful here because: # 1. null check is done above and it's more friendly to return 1 # instead of abort # 2. mergestate check is done below by cmdutil.bailifchanged # 3. unstable check is more complex here because of --rebase # # So only "public" check is useful and it's checked directly here. if ctx.phase() == phases.public: raise error.Abort( _(b'cannot split public changeset'), hint=_(b"see 'hg help phases' for details"), ) descendants = list(repo.revs(b'(%d::) - (%d)', rev, rev)) alloworphaned = obsolete.isenabled(repo, obsolete.allowunstableopt) if opts.get(b'rebase'): # Skip obsoleted descendants and their descendants so the rebase # won't cause conflicts for sure. torebase = list( repo.revs(b'%ld - (%ld & obsolete())::', descendants, descendants)) if not alloworphaned and len(torebase) != len(descendants): raise error.Abort( _(b'split would leave orphaned changesets behind')) else: if not alloworphaned and descendants: raise error.Abort( _(b'cannot split changeset with children without rebase')) torebase = () if len(ctx.parents()) > 1: raise error.Abort(_(b'cannot split a merge changeset')) cmdutil.bailifchanged(repo) # Deactivate bookmark temporarily so it won't get moved unintentionally bname = repo._activebookmark if bname and repo._bookmarks[bname] != ctx.node(): bookmarks.deactivate(repo) wnode = repo[b'.'].node() top = None try: top = dosplit(ui, repo, tr, ctx, opts) finally: # top is None: split failed, need update --clean recovery. # wnode == ctx.node(): wnode split, no need to update. if top is None or wnode != ctx.node(): hg.clean(repo, wnode, show_stats=False) if bname: bookmarks.activate(repo, bname) if torebase and top: dorebase(ui, repo, torebase, top)
def trackedcmd(ui, repo, remotepath=None, *pats, **opts): """show or change the current narrowspec With no argument, shows the current narrowspec entries, one per line. Each line will be prefixed with 'I' or 'X' for included or excluded patterns, respectively. The narrowspec is comprised of expressions to match remote files and/or directories that should be pulled into your client. The narrowspec has *include* and *exclude* expressions, with excludes always trumping includes: that is, if a file matches an exclude expression, it will be excluded even if it also matches an include expression. Excluding files that were never included has no effect. Each included or excluded entry is in the format described by 'hg help patterns'. The options allow you to add or remove included and excluded expressions. If --clear is specified, then all previous includes and excludes are DROPPED and replaced by the new ones specified to --addinclude and --addexclude. If --clear is specified without any further options, the narrowspec will be empty and will not match any files. """ opts = pycompat.byteskwargs(opts) if changegroup.NARROW_REQUIREMENT not in repo.requirements: ui.warn( _('The narrow command is only supported on respositories cloned' ' with --narrow.\n')) return 1 # Before supporting, decide whether it "hg tracked --clear" should mean # tracking no paths or all paths. if opts['clear']: ui.warn(_('The --clear option is not yet supported.\n')) return 1 if narrowspec.needsexpansion(opts['addinclude'] + opts['addexclude']): raise error.Abort('Expansion not yet supported on widen/narrow') addedincludes = narrowspec.parsepatterns(opts['addinclude']) removedincludes = narrowspec.parsepatterns(opts['removeinclude']) addedexcludes = narrowspec.parsepatterns(opts['addexclude']) removedexcludes = narrowspec.parsepatterns(opts['removeexclude']) widening = addedincludes or removedexcludes narrowing = removedincludes or addedexcludes only_show = not widening and not narrowing # Only print the current narrowspec. if only_show: include, exclude = repo.narrowpats ui.pager('tracked') fm = ui.formatter('narrow', opts) for i in sorted(include): fm.startitem() fm.write('status', '%s ', 'I', label='narrow.included') fm.write('pat', '%s\n', i, label='narrow.included') for i in sorted(exclude): fm.startitem() fm.write('status', '%s ', 'X', label='narrow.excluded') fm.write('pat', '%s\n', i, label='narrow.excluded') fm.end() return 0 with repo.wlock(), repo.lock(): cmdutil.bailifchanged(repo) # Find the revisions we have in common with the remote. These will # be used for finding local-only changes for narrowing. They will # also define the set of revisions to update for widening. remotepath = ui.expandpath(remotepath or 'default') url, branches = hg.parseurl(remotepath) ui.status(_('comparing with %s\n') % util.hidepassword(url)) remote = hg.peer(repo, opts, url) commoninc = discovery.findcommonincoming(repo, remote) oldincludes, oldexcludes = repo.narrowpats if narrowing: newincludes = oldincludes - removedincludes newexcludes = oldexcludes | addedexcludes _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes, newincludes, newexcludes, opts['force_delete_local_changes']) # _narrow() updated the narrowspec and _widen() below needs to # use the updated values as its base (otherwise removed includes # and addedexcludes will be lost in the resulting narrowspec) oldincludes = newincludes oldexcludes = newexcludes if widening: newincludes = oldincludes | addedincludes newexcludes = oldexcludes - removedexcludes _widen(ui, repo, remote, commoninc, newincludes, newexcludes) return 0
def _histedit(ui, repo, state, *freeargs, **opts): # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) if mq and mq.applied: raise util.Abort(_('source has mq patches applied')) # basic argument incompatibility processing outg = opts.get('outgoing') cont = opts.get('continue') editplan = opts.get('edit_plan') abort = opts.get('abort') force = opts.get('force') rules = opts.get('commands', '') revs = opts.get('rev', []) goal = 'new' # This invocation goal, in new, continue, abort if force and not outg: raise util.Abort(_('--force only allowed with --outgoing')) if cont: if util.any((outg, abort, revs, freeargs, rules, editplan)): raise util.Abort(_('no arguments allowed with --continue')) goal = 'continue' elif abort: if util.any((outg, revs, freeargs, rules, editplan)): raise util.Abort(_('no arguments allowed with --abort')) goal = 'abort' elif editplan: if util.any((outg, revs, freeargs)): raise util.Abort(_('only --commands argument allowed with ' '--edit-plan')) goal = 'edit-plan' else: if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise util.Abort(_('history edit already in progress, try ' '--continue or --abort')) if outg: if revs: raise util.Abort(_('no revisions allowed with --outgoing')) if len(freeargs) > 1: raise util.Abort( _('only one repo argument allowed with --outgoing')) else: revs.extend(freeargs) if len(revs) == 0: histeditdefault = ui.config('histedit', 'defaultrev') if histeditdefault: revs.append(histeditdefault) if len(revs) != 1: raise util.Abort( _('histedit requires exactly one ancestor revision')) replacements = [] keep = opts.get('keep', False) # rebuild state if goal == 'continue': state.read() state = bootstrapcontinue(ui, state, opts) elif goal == 'edit-plan': state.read() if not rules: comment = editcomment % (state.parentctx, node.short(state.topmost)) rules = ruleeditor(repo, ui, state.rules, comment) else: if rules == '-': f = sys.stdin else: f = open(rules) rules = f.read() f.close() rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l.startswith('#')] rules = verifyrules(rules, repo, [repo[c] for [_a, c] in state.rules]) state.rules = rules state.write() return elif goal == 'abort': state.read() mapping, tmpnodes, leafs, _ntm = processreplacement(state) ui.debug('restore wc to old parent %s\n' % node.short(state.topmost)) # Recover our old commits if necessary if not state.topmost in repo and state.backupfile: backupfile = repo.join(state.backupfile) f = hg.openpath(ui, backupfile) gen = exchange.readbundle(ui, f, backupfile) changegroup.addchangegroup(repo, gen, 'histedit', 'bundle:' + backupfile) os.remove(backupfile) # check whether we should update away parentnodes = [c.node() for c in repo[None].parents()] for n in leafs | set([state.parentctxnode]): if n in parentnodes: hg.clean(repo, state.topmost) break else: pass cleanupnode(ui, repo, 'created', tmpnodes) cleanupnode(ui, repo, 'temp', leafs) state.clear() return else: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) topmost, empty = repo.dirstate.parents() if outg: if freeargs: remote = freeargs[0] else: remote = None root = findoutgoing(ui, repo, remote, force, opts) else: rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs))) if len(rr) != 1: raise util.Abort(_('The specified revisions must have ' 'exactly one common root')) root = rr[0].node() revs = between(repo, root, topmost, keep) if not revs: raise util.Abort(_('%s is not an ancestor of working directory') % node.short(root)) ctxs = [repo[r] for r in revs] if not rules: comment = editcomment % (node.short(root), node.short(topmost)) rules = ruleeditor(repo, ui, [['pick', c] for c in ctxs], comment) else: if rules == '-': f = sys.stdin else: f = open(rules) rules = f.read() f.close() rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l.startswith('#')] rules = verifyrules(rules, repo, ctxs) parentctxnode = repo[root].parents()[0].node() state.parentctxnode = parentctxnode state.rules = rules state.keep = keep state.topmost = topmost state.replacements = replacements # Create a backup so we can always abort completely. backupfile = None if not obsolete.isenabled(repo, obsolete.createmarkersopt): backupfile = repair._bundle(repo, [parentctxnode], [topmost], root, 'histedit') state.backupfile = backupfile while state.rules: state.write() action, ha = state.rules.pop(0) ui.debug('histedit: processing %s %s\n' % (action, ha[:12])) actobj = actiontable[action].fromrule(state, ha) parentctx, replacement_ = actobj.run() state.parentctxnode = parentctx.node() state.replacements.extend(replacement_) state.write() hg.update(repo, state.parentctxnode) mapping, tmpnodes, created, ntm = processreplacement(state) if mapping: for prec, succs in mapping.iteritems(): if not succs: ui.debug('histedit: %s is dropped\n' % node.short(prec)) else: ui.debug('histedit: %s is replaced by %s\n' % ( node.short(prec), node.short(succs[0]))) if len(succs) > 1: m = 'histedit: %s' for n in succs[1:]: ui.debug(m % node.short(n)) if not keep: if mapping: movebookmarks(ui, repo, mapping, state.topmost, ntm) # TODO update mq state if obsolete.isenabled(repo, obsolete.createmarkersopt): markers = [] # sort by revision number because it sound "right" for prec in sorted(mapping, key=repo.changelog.rev): succs = mapping[prec] markers.append((repo[prec], tuple(repo[s] for s in succs))) if markers: obsolete.createmarkers(repo, markers) else: cleanupnode(ui, repo, 'replaced', mapping) cleanupnode(ui, repo, 'temp', tmpnodes) state.clear() if os.path.exists(repo.sjoin('undo')): os.unlink(repo.sjoin('undo'))
def histedit(ui, repo, *parent, **opts): """hg histedit <parent> """ # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) if mq and mq.applied: raise util.Abort(_('source has mq patches applied')) parent = list(parent) + opts.get('rev', []) if opts.get('outgoing'): if len(parent) > 1: raise util.Abort('only one repo argument allowed with --outgoing') elif parent: parent = parent[0] dest = ui.expandpath(parent or 'default-push', parent or 'default') dest, revs = hg.parseurl(dest, None)[:2] if isinstance(revs, tuple): # hg >= 1.6 revs, checkout = hg.addbranchrevs(repo, repo, revs, None) other = hg.repository(hg.remoteui(repo, opts), dest) # hg >= 1.9 findoutgoing = getattr(discovery, 'findoutgoing', None) if findoutgoing is None: if getattr(discovery, 'outgoing', None) is not None: def findoutgoing(repo, other, force=False): out = discovery.findcommonoutgoing(repo, other, [], force=force) return out.missing[0:1] else: # hg 1.9 and 2.0 def findoutgoing(repo, other, force=False): common, outheads = discovery.findcommonoutgoing( repo, other, [], force=force) return repo.changelog.findmissing(common, outheads)[0:1] else: other = hg.repository(ui, dest) def findoutgoing(repo, other, force=False): return repo.findoutgoing(other, force=force) if revs: revs = [repo.lookup(rev) for rev in revs] ui.status(_('comparing with %s\n') % hidepassword(dest)) parent = findoutgoing(repo, other, force=opts.get('force')) else: if opts.get('force'): raise util.Abort('--force only allowed with --outgoing') if opts.get('continue', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --continue') ( parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, ) = readstate(repo) currentparent, wantnull = repo.dirstate.parents() parentctx = repo[parentctxnode] # discover any nodes the user has added in the interim newchildren = [ c for c in parentctx.children() if c.node() not in existing ] action, currentnode = rules.pop(0) while newchildren: if action in [ 'f', 'fold', ]: tmpnodes.extend([n.node() for n in newchildren]) else: created.extend([n.node() for n in newchildren]) newchildren = filter( lambda x: x.node() not in existing, reduce(lambda x, y: x + y, map(lambda r: r.children(), newchildren))) m, a, r, d = repo.status()[:4] oldctx = repo[currentnode] message = oldctx.description() if action in ('e', 'edit', 'm', 'mess'): message = ui.edit(message, ui.username()) elif action in ( 'f', 'fold', ): message = 'fold-temp-revision %s' % currentnode new = None if m or a or r or d: new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) if action in ('f', 'fold'): if new: tmpnodes.append(new) else: new = newchildren[-1] ( parentctx, created_, replaced_, tmpnodes_, ) = finishfold(ui, repo, parentctx, oldctx, new, opts, newchildren) replaced.extend(replaced_) created.extend(created_) tmpnodes.extend(tmpnodes_) elif action not in ('d', 'drop'): if new != oldctx.node(): replaced.append(oldctx.node()) if new: if new != oldctx.node(): created.append(new) parentctx = repo[new] elif opts.get('abort', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --abort') ( parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, ) = readstate(repo) ui.debug('restore wc to old tip %s\n' % node.hex(tip)) hg.clean(repo, tip) ui.debug('should strip created nodes %s\n' % ', '.join([node.hex(n)[:12] for n in created])) ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for nodes in ( created, tmpnodes, ): for n in reversed(nodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) return else: bailifchanged(repo) if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise util.Abort('history edit already in progress, try ' '--continue or --abort') tip, empty = repo.dirstate.parents() if len(parent) != 1: raise util.Abort('requires exactly one parent revision') parent = _revsingle(repo, parent[0]).node() keep = opts.get('keep', False) revs = between(repo, parent, tip, keep) ctxs = [repo[r] for r in revs] existing = [r.node() for r in ctxs] rules = opts.get('commands', '') if not rules: rules = '\n'.join([makedesc(c) for c in ctxs]) rules += editcomment % ( node.hex(parent)[:12], node.hex(tip)[:12], ) rules = ui.edit(rules, ui.username()) # Save edit rules in .hg/histedit-last-edit.txt in case # the user needs to ask for help after something # surprising happens. f = open(repo.join('histedit-last-edit.txt'), 'w') f.write(rules) f.close() else: f = open(rules) rules = f.read() f.close() rules = [ l for l in (r.strip() for r in rules.splitlines()) if l and not l[0] == '#' ] rules = verifyrules(rules, repo, ctxs) parentctx = repo[parent].parents()[0] keep = opts.get('keep', False) replaced = [] tmpnodes = [] created = [] while rules: writestate(repo, parentctx.node(), created, replaced, tmpnodes, existing, rules, keep, tip) action, ha = rules.pop(0) ( parentctx, created_, replaced_, tmpnodes_, ) = actiontable[action](ui, repo, parentctx, ha, opts) created.extend(created_) replaced.extend(replaced_) tmpnodes.extend(tmpnodes_) hg.update(repo, parentctx.node()) if not keep: ui.debug('should strip replaced nodes %s\n' % ', '.join([node.hex(n)[:12] for n in replaced])) for n in sorted(replaced, lambda x, y: cmp(repo[x].rev(), repo[y].rev())): try: repair.strip(ui, repo, n) except error.LookupError: pass ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for n in reversed(tmpnodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state'))
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history (the source) onto another (the destination). This can be useful for linearizing *local* changes relative to a master development tree. You should not rebase changesets that have already been shared with others. Doing so will force everybody else to perform the same rebase or they will end up with duplicated changesets after pulling in your rebased changesets. In its default configuration, Mercurial will prevent you from rebasing published changes. See :hg:`help phases` for details. If you don't specify a destination changeset (``-d/--dest``), rebase uses the current branch tip as the destination. (The destination changeset is not modified by rebasing, but new changesets are added as its descendants.) You can specify which changesets to rebase in two ways: as a "source" changeset or as a "base" changeset. Both are shorthand for a topologically related set of changesets (the "source branch"). If you specify source (``-s/--source``), rebase will rebase that changeset and all of its descendants onto dest. If you specify base (``-b/--base``), rebase will select ancestors of base back to but not including the common ancestor with dest. Thus, ``-b`` is less precise but more convenient than ``-s``: you can specify any changeset in the source branch, and rebase will select the whole branch. If you specify neither ``-s`` nor ``-b``, rebase uses the parent of the working directory as the base. For advanced usage, a third way is available through the ``--rev`` option. It allows you to specify an arbitrary set of changesets to rebase. Descendants of revs you specify with this option are not automatically included in the rebase. By default, rebase recreates the changesets in the source branch as descendants of dest and then destroys the originals. Use ``--keep`` to preserve the original source changesets. Some changesets in the source branch (e.g. merges from the destination branch) may be dropped if they no longer contribute any change. One result of the rules for selecting the destination changeset and source branch is that, unlike ``merge``, rebase will do nothing if you are at the branch tip of a named branch with two heads. You need to explicitly specify source and/or destination (or ``update`` to the other head, if it's the head of the intended source branch). If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. .. container:: verbose Examples: - move "local changes" (current commit back to branching point) to the current branch tip after a pull:: hg rebase - move a single changeset to the stable branch:: hg rebase -r 5f493448 -d stable - splice a commit and all its descendants onto another part of history:: hg rebase --source c0c3 --dest 4cf9 - rebase everything on a branch marked by a bookmark onto the default branch:: hg rebase --base myfeature --dest default - collapse a sequence of changes into a single commit:: hg rebase --collapse -r 1520:1525 -d . - move a named branch while preserving its name:: hg rebase -r "branch(featureX)" -d 1.3 --keepbranches Returns 0 on success, 1 if nothing to rebase or there are unresolved conflicts. """ originalwd = target = None activebookmark = None external = nullrev state = {} skipped = set() targetancestors = set() lock = wlock = None try: wlock = repo.wlock() lock = repo.lock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) revf = opts.get('rev', []) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) collapsemsg = cmdutil.logmessage(ui, opts) e = opts.get('extrafn') # internal, used by e.g. hgsubversion extrafns = [_savegraft] if e: extrafns = [e] keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) # keepopen is not meant for use on the command line, but by # other extensions keepopen = opts.get('keepopen', False) if opts.get('interactive'): msg = _("interactive history editing is supported by the " "'histedit' extension (see 'hg help histedit')") raise util.Abort(msg) if collapsemsg and not collapsef: raise util.Abort( _('message can only be specified with collapse')) if contf or abortf: if contf and abortf: raise util.Abort(_('cannot use both abort and continue')) if collapsef: raise util.Abort( _('cannot use collapse with continue or abort')) if srcf or basef or destf: raise util.Abort( _('abort and continue do not allow specifying revisions')) if opts.get('tool', False): ui.warn(_('tool option will be ignored\n')) try: (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external, activebookmark) = restorestatus(repo) except error.RepoLookupError: if abortf: clearstatus(repo) repo.ui.warn(_('rebase aborted (no revision is removed,' ' only broken state is cleared)\n')) return 0 else: msg = _('cannot continue inconsistent rebase') hint = _('use "hg rebase --abort" to clear broken state') raise util.Abort(msg, hint=hint) if abortf: return abort(repo, originalwd, target, state) else: if srcf and basef: raise util.Abort(_('cannot specify both a ' 'source and a base')) if revf and basef: raise util.Abort(_('cannot specify both a ' 'revision and a base')) if revf and srcf: raise util.Abort(_('cannot specify both a ' 'revision and a source')) cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) if not destf: # Destination defaults to the latest revision in the # current branch branch = repo[None].branch() dest = repo[branch] else: dest = scmutil.revsingle(repo, destf) if revf: rebaseset = scmutil.revrange(repo, revf) if not rebaseset: ui.status(_('empty "rev" revision set - ' 'nothing to rebase\n')) return 1 elif srcf: src = scmutil.revrange(repo, [srcf]) if not src: ui.status(_('empty "source" revision set - ' 'nothing to rebase\n')) return 1 rebaseset = repo.revs('(%ld)::', src) assert rebaseset else: base = scmutil.revrange(repo, [basef or '.']) if not base: ui.status(_('empty "base" revision set - ' "can't compute rebase set\n")) return 1 commonanc = repo.revs('ancestor(%ld, %d)', base, dest).first() if commonanc is not None: rebaseset = repo.revs('(%d::(%ld) - %d)::', commonanc, base, commonanc) else: rebaseset = [] if not rebaseset: # transform to list because smartsets are not comparable to # lists. This should be improved to honor lazyness of # smartset. if list(base) == [dest.rev()]: if basef: ui.status(_('nothing to rebase - %s is both "base"' ' and destination\n') % dest) else: ui.status(_('nothing to rebase - working directory ' 'parent is also destination\n')) elif not repo.revs('%ld - ::%d', base, dest): if basef: ui.status(_('nothing to rebase - "base" %s is ' 'already an ancestor of destination ' '%s\n') % ('+'.join(str(repo[r]) for r in base), dest)) else: ui.status(_('nothing to rebase - working ' 'directory parent is already an ' 'ancestor of destination %s\n') % dest) else: # can it happen? ui.status(_('nothing to rebase from %s to %s\n') % ('+'.join(str(repo[r]) for r in base), dest)) return 1 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt) if (not (keepf or allowunstable) and repo.revs('first(children(%ld) - %ld)', rebaseset, rebaseset)): raise util.Abort( _("can't remove original changesets with" " unrebased descendants"), hint=_('use --keep to keep original changesets')) result = buildstate(repo, dest, rebaseset, collapsef) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return 1 root = min(rebaseset) if not keepf and not repo[root].mutable(): raise util.Abort(_("can't rebase immutable changeset %s") % repo[root], hint=_('see hg help phases for details')) originalwd, target, state = result if collapsef: targetancestors = repo.changelog.ancestors([target], inclusive=True) external = externalparent(repo, state, targetancestors) if dest.closesbranch() and not keepbranchesf: ui.status(_('reopening closed branch head %s\n') % dest) if keepbranchesf: # insert _savebranch at the start of extrafns so if # there's a user-provided extrafn it can clobber branch if # desired extrafns.insert(0, _savebranch) if collapsef: branches = set() for rev in state: branches.add(repo[rev].branch()) if len(branches) > 1: raise util.Abort(_('cannot collapse multiple named ' 'branches')) # Rebase if not targetancestors: targetancestors = repo.changelog.ancestors([target], inclusive=True) # Keep track of the current bookmarks in order to reset them later currentbookmarks = repo._bookmarks.copy() activebookmark = activebookmark or repo._bookmarkcurrent if activebookmark: bookmarks.unsetcurrent(repo) extrafn = _makeextrafn(extrafns) sortedstate = sorted(state) total = len(sortedstate) pos = 0 for rev in sortedstate: pos += 1 if state[rev] == -1: ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])), _('changesets'), total) p1, p2 = defineparents(repo, rev, target, state, targetancestors) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external, activebookmark) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: try: ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'rebase') stats = rebasenode(repo, rev, p1, state, collapsef, target) if stats and stats[3] > 0: raise error.InterventionRequired( _('unresolved conflicts (see hg ' 'resolve, then hg rebase --continue)')) finally: ui.setconfig('ui', 'forcemerge', '', 'rebase') if not collapsef: merging = repo[p2].rev() != nullrev editform = cmdutil.mergeeditform(merging, 'rebase') editor = cmdutil.getcommiteditor(editform=editform, **opts) newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn, editor=editor) else: # Skip commit if we are collapsing repo.dirstate.beginparentchange() repo.setparents(repo[p1].node()) repo.dirstate.endparentchange() newrev = None # Update the state if newrev is not None: state[rev] = repo[newrev].rev() else: if not collapsef: ui.note(_('no changes, revision %d skipped\n') % rev) ui.debug('next revision set to %s\n' % p1) skipped.add(rev) state[rev] = p1 ui.progress(_('rebasing'), None) ui.note(_('rebase merging completed\n')) if collapsef and not keepopen: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) editopt = opts.get('edit') editform = 'rebase.collapse' if collapsemsg: commitmsg = collapsemsg else: commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] > nullmerge: commitmsg += '\n* %s' % repo[rebased].description() editopt = True editor = cmdutil.getcommiteditor(edit=editopt, editform=editform) newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn, editor=editor) for oldrev in state.iterkeys(): if state[oldrev] > nullmerge: state[oldrev] = newrev if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if currentbookmarks: # Nodeids are needed to reset bookmarks nstate = {} for k, v in state.iteritems(): if v > nullmerge: nstate[repo[k].node()] = repo[v].node() # XXX this is the same as dest.node() for the non-continue path -- # this should probably be cleaned up targetnode = repo[target].node() # restore original working directory # (we do this before stripping) newwd = state.get(originalwd, originalwd) if newwd < 0: # original directory is a parent of rebase set root or ignored newwd = originalwd if newwd not in [c.rev() for c in repo[None].parents()]: ui.note(_("update back to initial working directory parent\n")) hg.updaterepo(repo, newwd, False) if not keepf: collapsedas = None if collapsef: collapsedas = newrev clearrebased(ui, repo, state, skipped, collapsedas) if currentbookmarks: updatebookmarks(repo, targetnode, nstate, currentbookmarks) if activebookmark not in repo._bookmarks: # active bookmark was divergent one and has been deleted activebookmark = None clearstatus(repo) ui.note(_("rebase completed\n")) util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) if (activebookmark and repo['.'].node() == repo._bookmarks[activebookmark]): bookmarks.setcurrent(repo, activebookmark) finally: release(lock, wlock)
def fetch(ui, repo, source='default', **opts): '''pull changes from a remote repository, merge new changes if needed. This finds all changes from the repository at the specified path or URL and adds them to the local repository. If the pulled changes add a new branch head, the head is automatically merged, and the result of the merge is committed. Otherwise, the working directory is updated to include the new changes. When a merge is needed, the working directory is first updated to the newly pulled changes. Local changes are then merged into the pulled changes. To switch the merge order, use --switch-parent. See :hg:`help dates` for a list of formats valid for -d/--date. Returns 0 on success. ''' date = opts.get('date') if date: opts['date'] = util.parsedate(date) parent, _p2 = repo.dirstate.parents() branch = repo.dirstate.branch() try: branchnode = repo.branchtip(branch) except error.RepoLookupError: branchnode = None if parent != branchnode: raise util.Abort(_('working dir not at branch tip ' '(use "hg update" to check out branch tip)')) wlock = lock = None try: wlock = repo.wlock() lock = repo.lock() cmdutil.bailifchanged(repo) bheads = repo.branchheads(branch) bheads = [head for head in bheads if len(repo[head].children()) == 0] if len(bheads) > 1: raise util.Abort(_('multiple heads in this branch ' '(use "hg heads ." and "hg merge" to merge)')) other = hg.peer(repo, opts, ui.expandpath(source)) ui.status(_('pulling from %s\n') % util.hidepassword(ui.expandpath(source))) revs = None if opts['rev']: try: revs = [other.lookup(rev) for rev in opts['rev']] except error.CapabilityError: err = _("other repository doesn't support revision lookup, " "so a rev cannot be specified.") raise util.Abort(err) # Are there any changes at all? modheads = exchange.pull(repo, other, heads=revs).cgresult if modheads == 0: return 0 # Is this a simple fast-forward along the current branch? newheads = repo.branchheads(branch) newchildren = repo.changelog.nodesbetween([parent], newheads)[2] if len(newheads) == 1 and len(newchildren): if newchildren[0] != parent: return hg.update(repo, newchildren[0]) else: return 0 # Are there more than one additional branch heads? newchildren = [n for n in newchildren if n != parent] newparent = parent if newchildren: newparent = newchildren[0] hg.clean(repo, newparent) newheads = [n for n in newheads if n != newparent] if len(newheads) > 1: ui.status(_('not merging with %d other new branch heads ' '(use "hg heads ." and "hg merge" to merge them)\n') % (len(newheads) - 1)) return 1 if not newheads: return 0 # Otherwise, let's merge. err = False if newheads: # By default, we consider the repository we're pulling # *from* as authoritative, so we merge our changes into # theirs. if opts['switch_parent']: firstparent, secondparent = newparent, newheads[0] else: firstparent, secondparent = newheads[0], newparent ui.status(_('updating to %d:%s\n') % (repo.changelog.rev(firstparent), short(firstparent))) hg.clean(repo, firstparent) ui.status(_('merging with %d:%s\n') % (repo.changelog.rev(secondparent), short(secondparent))) err = hg.merge(repo, secondparent, remind=False) if not err: # we don't translate commit messages message = (cmdutil.logmessage(ui, opts) or ('Automated merge with %s' % util.removeauth(other.url()))) editopt = opts.get('edit') or opts.get('force_editor') editor = cmdutil.getcommiteditor(edit=editopt, editform='fetch') n = repo.commit(message, opts['user'], opts['date'], editor=editor) ui.status(_('new changeset %d:%s merges remote changes ' 'with local\n') % (repo.changelog.rev(n), short(n))) return err finally: release(lock, wlock)
def _moverelative(ui, repo, args, opts, reverse=False): """Update to a changeset relative to the current changeset. Implements both `hg previous` and `hg next`. Takes in a list of positional arguments and a dict of command line options. (See help for `hg previous` and `hg next` to see which arguments and flags are supported.) Moves forward through history by default -- the behavior of `hg next`. Setting reverse=True will change the behavior to that of `hg previous`. """ # Parse positional argument. try: n = int(args[0]) if args else 1 except ValueError: raise error.Abort(_("argument must be an integer")) if n <= 0: return if ui.configbool('fbamend', 'alwaysnewest'): opts['newest'] = True # Check that the given combination of arguments is valid. if args: if opts.get('bookmark', False): raise error.Abort(_("cannot use both number and --bookmark")) if opts.get('top', False): raise error.Abort(_("cannot use both number and --top")) if opts.get('bottom', False): raise error.Abort(_("cannot use both number and --bottom")) if opts.get('bookmark', False): if opts.get('top', False): raise error.Abort(_("cannot use both --top and --bookmark")) if opts.get('bottom', False): raise error.Abort(_("cannot use both --bottom and --bookmark")) if opts.get('towards', False) and opts.get('top', False): raise error.Abort(_("cannot use both --top and --towards")) if opts.get('merge', False) and opts.get('rebase', False): raise error.Abort(_("cannot use both --merge and --rebase")) # Check if there is an outstanding operation or uncommited changes. cmdutil.checkunfinished(repo) if not opts.get('clean', False) and not opts.get('merge', False): try: cmdutil.bailifchanged(repo) except error.Abort as e: e.hint = _("use --clean to discard uncommitted changes " "or --merge to bring them along") raise # If we have both --clean and --rebase, we need to discard any outstanding # changes now before we attempt to perform any rebases. if opts.get('clean') and opts.get('rebase'): commands.update(ui, repo, rev=repo['.'].rev(), clean=True) with repo.wlock(), repo.lock(): # Record the active bookmark, if any. bookmark = repo._activebookmark noactivate = opts.get('no_activate_bookmark', False) movebookmark = opts.get('move_bookmark', False) with repo.transaction('moverelative') as tr: # Find the desired changeset. May potentially perform rebase. try: target = _findtarget(ui, repo, n, opts, reverse) except error.InterventionRequired: # Rebase failed. Need to manually close transaction to allow # `hg rebase --continue` to work correctly. tr.close() raise # Move the active bookmark if neccesary. Needs to happen before # we update to avoid getting a 'leaving bookmark X' message. if movebookmark and bookmark is not None: _setbookmark(repo, tr, bookmark, target) # Update to the target changeset. commands.update(ui, repo, rev=target, clean=opts.get('clean', False)) # Print out the changeset we landed on. _showchangesets(ui, repo, revs=[target]) # Activate the bookmark on the new changeset. if not noactivate and not movebookmark: _activate(ui, repo, target)
def cmd_format_source(ui, repo, tool, *pats, **opts): """register a tool to format source files during merges and rebases Record a mapping from the given file pattern FILES to a source formatting tool TOOL. Mappings are stored in the version-controlled file (automatically committed when format-source is used) .hg-format-source in the root of the checkout. The mapping causes TOOL to be run on FILES during future merge and rebase operations. The actual command run for TOOL needs to be registered in the config. See :hg:`help -e format-source` for details. """ if repo.getcwd(): msg = _("format-source must be run from repository root") hint = _("cd %s") % repo.root raise error.Abort(msg, hint=hint) if not pats: raise error.Abort(_('no files specified')) # XXX We support glob pattern only for now, the recursive behavior of various others is a bit wonky. for pattern in pats: if not pattern.startswith('glob:'): msg = _("format-source only supports explicit 'glob' patterns " "for now ('%s')") msg %= pattern hint = _('maybe try with "glob:%s"') % pattern raise error.Abort(msg, hint=hint) # lock the repo to make sure no content is changed with repo.wlock(): # formatting tool if ' ' in tool: raise error.Abort(_("tool name cannot contain space: '%s'") % tool) # if tool was not specified in the cfg maybe we can use our mozilla firefox in tree clang-format tool if should_use_default(repo, tool): shell_tool, tool_config_files, file_ext = return_default_clang_format( repo) else: shell_tool = repo.ui.config('format-source', tool) tool_config_files = repo.ui.configlist('format-source', '%s:configpaths' % tool) file_ext = tuple( repo.ui.configlist('format-source', '%s:fileext' % tool)) if not shell_tool: msg = _("unknown format tool: %s (no 'format-source.%s' config)") raise error.Abort(msg.format(tool, tool)) if not file_ext: msg = _("no {}:fileext present".format(tool)) raise error.Abort(msg.format(tool, tool)) cmdutil.bailifchanged(repo) cmdutil.checkunfinished(repo, commit=True) wctx = repo[None] # files to be formatted matcher = scmutil.match(wctx, pats, opts) files = list(wctx.matches(matcher)) if util.versiontuple(n=2) >= (4, 7): # In 4.7 we have ui.makeprogress with ui.makeprogress(_('formatting'), unit=_('files'), total=len(files)) as progress: proc = worker.worker(ui, 0.1, batchformat, (repo, wctx, tool, shell_tool, file_ext), files) for filepath in proc: progress.increment(item=filepath) else: proc = worker.worker(ui, 0.1, batchformat, (repo, wctx, tool, shell_tool, file_ext), files) # Wait for everything to finish for filepath in proc: pass # update the storage to mark formatted file as formatted with repo.wvfs(file_storage_path, mode='ab') as storage: for pattern in pats: # XXX if pattern was relative, we need to reroot it from the # repository root. For now we constrained the command to run # at the root of the repository. data = { 'tool': encoding.unifromlocal(tool), 'pattern': encoding.unifromlocal(pattern) } if tool_config_files: data['configpaths'] = [ encoding.unifromlocal(path) for path in tool_config_files ] entry = json.dumps(data, sort_keys=True) assert '\n' not in entry storage.write('%s\n' % entry) if file_storage_path not in wctx: storage_matcher = scmutil.match(wctx, ['path:' + file_storage_path]) cmdutil.add(ui, repo, storage_matcher, '', True) # commit the whole with repo.lock(): commit_patterns = ['path:' + file_storage_path] commit_patterns.extend(pats) return commands._docommit(ui, repo, *commit_patterns, **opts)
def do_backout(ui, repo, rev, handle_change, commit_change, use_mq=False, reverse_order=False, **opts): if not opts.get('force'): ui.status('checking for uncommitted changes\n') cmdutil.bailifchanged(repo) backout = not opts.get('apply') desc = {'action': 'backout', 'Actioned': 'Backed out', 'actioning': 'backing out', 'name': 'backout' } if not backout: desc = {'action': 'apply', 'Actioned': 'Reapplied', 'actioning': 'Reapplying', 'name': 'patch' } rev = scmutil.revrange(repo, rev) if len(rev) == 0: raise util.Abort('at least one revision required') csets = [repo[r] for r in rev] csets.sort(reverse=reverse_order, key=lambda cset: cset.rev()) new_opts = opts.copy() def bugs_suffix(bugs): if len(bugs) == 0: return '' elif len(bugs) == 1: return ' (bug ' + list(bugs)[0] + ')' else: return ' (' + ', '.join(map(lambda b: 'bug %s' % b, bugs)) + ')' def parse_bugs(msg): bugs = set() m = BUG_RE.search(msg) if m: bugs.add(m.group(2)) return bugs def apply_change(node, reverse, push_patch=True, name=None): p1, p2 = repo.changelog.parents(node) if p2 != nullid: raise util.Abort('cannot %s a merge changeset' % desc['action']) opts = mdiff.defaultopts opts.git = True rpatch = StringIO.StringIO() orig, mod = (node, p1) if reverse else (p1, node) for chunk in patch.diff(repo, node1=orig, node2=mod, opts=opts): rpatch.write(chunk) rpatch.seek(0) saved_stdin = None try: save_fin = ui.fin ui.fin = rpatch except: # Old versions of hg did not use the ui.fin mechanism saved_stdin = sys.stdin sys.stdin = rpatch handle_change(desc, node, qimport=(use_mq and new_opts.get('nopush'))) if saved_stdin is None: ui.fin = save_fin else: sys.stdin = saved_stdin allbugs = set() messages = [] for cset in csets: # Hunt down original description if we might want to use it orig_desc = None orig_desc_cset = None orig_author = None r = cset while len(csets) == 1 or not opts.get('single'): ui.debug("Parsing message for %s\n" % short(r.node())) m = backout_re.match(r.description()) if m: ui.debug(" looks like a backout of %s\n" % m.group(1)) else: m = reapply_re.match(r.description()) if m: ui.debug(" looks like a reapply of %s\n" % m.group(1)) else: ui.debug(" looks like the original description\n") orig_desc = r.description() orig_desc_cset = r orig_author = r.user() break r = repo[m.group(1)] bugs = parse_bugs(cset.description()) allbugs.update(bugs) node = cset.node() shortnode = short(node) ui.status('%s %s\n' % (desc['actioning'], shortnode)) apply_change(node, backout, push_patch=(not opts.get('nopush'))) msg = ('%s changeset %s' % (desc['Actioned'], shortnode)) + bugs_suffix(bugs) user = None if backout: # If backing out a backout, reuse the original commit message & author. if orig_desc_cset is not None and orig_desc_cset != cset: msg = orig_desc user = orig_author else: # If reapplying the original change, reuse the original commit message & author. if orig_desc_cset is not None and orig_desc_cset == cset: msg = orig_desc user = orig_author messages.append(msg) if not opts.get('single') and not opts.get('nopush'): new_opts['message'] = messages[-1] # Override the user to that of the original patch author in the case of --apply if user is not None: new_opts['user'] = user commit_change(ui, repo, desc['name'], node=node, force_name=opts.get('name'), **new_opts) msg = ('%s %d changesets' % (desc['Actioned'], len(rev))) + bugs_suffix(allbugs) + '\n' messages.insert(0, msg) new_opts['message'] = "\n".join(messages) if opts.get('single'): commit_change(ui, repo, desc['name'], revisions=rev, force_name=opts.get('name'), **new_opts)
def _checkchanged(repo): try: cmdutil.bailifchanged(repo) return False except util.Abort: return True
def _dotransplant(ui, repo, *revs, **opts): def incwalk(repo, csets, match=util.always): for node in csets: if match(node): yield node def transplantwalk(repo, dest, heads, match=util.always): """Yield all nodes that are ancestors of a head but not ancestors of dest. If no heads are specified, the heads of repo will be used.""" if not heads: heads = repo.heads() ancestors = [] ctx = repo[dest] for head in heads: ancestors.append(ctx.ancestor(repo[head]).node()) for node in repo.changelog.nodesbetween(ancestors, heads)[0]: if match(node): yield node def checkopts(opts, revs): if opts.get(b'continue'): cmdutil.check_incompatible_arguments(opts, b'continue', [b'branch', b'all', b'merge']) return if opts.get(b'stop'): cmdutil.check_incompatible_arguments(opts, b'stop', [b'branch', b'all', b'merge']) return if not (opts.get(b'source') or revs or opts.get(b'merge') or opts.get(b'branch')): raise error.Abort( _(b'no source URL, branch revision, or revision ' b'list provided')) if opts.get(b'all'): if not opts.get(b'branch'): raise error.Abort(_(b'--all requires a branch revision')) if revs: raise error.Abort( _(b'--all is incompatible with a revision list')) opts = pycompat.byteskwargs(opts) checkopts(opts, revs) if not opts.get(b'log'): # deprecated config: transplant.log opts[b'log'] = ui.config(b'transplant', b'log') if not opts.get(b'filter'): # deprecated config: transplant.filter opts[b'filter'] = ui.config(b'transplant', b'filter') tp = transplanter(ui, repo, opts) p1 = repo.dirstate.p1() if len(repo) > 0 and p1 == nullid: raise error.Abort(_(b'no revision checked out')) if opts.get(b'continue'): if not tp.canresume(): raise error.StateError(_(b'no transplant to continue')) elif opts.get(b'stop'): if not tp.canresume(): raise error.StateError(_(b'no interrupted transplant found')) return tp.stop(ui, repo) else: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) sourcerepo = opts.get(b'source') if sourcerepo: peer = hg.peer(repo, opts, ui.expandpath(sourcerepo)) heads = pycompat.maplist(peer.lookup, opts.get(b'branch', ())) target = set(heads) for r in revs: try: target.add(peer.lookup(r)) except error.RepoError: pass source, csets, cleanupfn = bundlerepo.getremotechanges( ui, repo, peer, onlyheads=sorted(target), force=True) else: source = repo heads = pycompat.maplist(source.lookup, opts.get(b'branch', ())) cleanupfn = None try: if opts.get(b'continue'): tp.resume(repo, source, opts) return tf = tp.transplantfilter(repo, source, p1) if opts.get(b'prune'): prune = { source[r].node() for r in scmutil.revrange(source, opts.get(b'prune')) } matchfn = lambda x: tf(x) and x not in prune else: matchfn = tf merges = pycompat.maplist(source.lookup, opts.get(b'merge', ())) revmap = {} if revs: for r in scmutil.revrange(source, revs): revmap[int(r)] = source[r].node() elif opts.get(b'all') or not merges: if source != repo: alltransplants = incwalk(source, csets, match=matchfn) else: alltransplants = transplantwalk(source, p1, heads, match=matchfn) if opts.get(b'all'): revs = alltransplants else: revs, newmerges = browserevs(ui, source, alltransplants, opts) merges.extend(newmerges) for r in revs: revmap[source.changelog.rev(r)] = r for r in merges: revmap[source.changelog.rev(r)] = r tp.apply(repo, source, revmap, merges, opts) finally: if cleanupfn: cleanupfn()
def qbackout(ui, repo, rev, **opts): """backout a change or set of changes qbackout creates a new patch or patches on top of any currently-applied patches. If the -s/--single option is set, then all backed-out changesets will be rolled up into a single backout changeset. Otherwise, there will be one backout changeset queued up for each backed-out changeset. The --apply option will reapply a patch instead of backing it out, which can be useful when you (or someone else) has backed your patch out and you want to try again. Normally, qbackout will error out if the patch (backout or application) fails to apply. The --nopush option may be used to leave the patch in your queue without pushing it so you can fix the conflicts manually. Examples: hg qbackout -r 20 -r 30 # backout revisions 20 and 30 hg qbackout -r 20+30 # backout revisions 20 and 30 hg qbackout -r 20+30:32 # backout revisions 20, 30, 31, and 32 hg qbackout -r a3a81775 # the usual revision syntax is available See "hg help revisions" and "hg help revsets" for more about specifying revisions. """ if not opts.get('force'): ui.status('checking for uncommitted changes\n') cmdutil.bailifchanged(repo) backout = not opts.get('apply') desc = { 'action': 'backout', 'Actioned': 'Backed out', 'actioning': 'backing out', 'name': 'backout' } if not backout: desc = { 'action': 'apply', 'Actioned': 'Reapplied', 'actioning': 'Reapplying', 'name': 'patch' } rev = scmutil.revrange(repo, rev) if len(rev) == 0: raise util.Abort('at least one revision required') csets = [ repo[r] for r in rev ] reverse_order = backout if opts.get('nopush'): reverse_order = not reverse_order csets.sort(reverse=reverse_order, key=lambda cset: cset.rev()) if opts.get('single') and opts.get('name') and len(rev) > 1: raise util.Abort('option "-n" not valid when backing out multiple changes') new_opts = opts.copy() mq.setupheaderopts(ui, new_opts) def bugs_suffix(bugs): if len(bugs) == 0: return '' elif len(bugs) == 1: return ' (bug ' + list(bugs)[0] + ')' else: return ' (' + ', '.join(map(lambda b: 'bug %s' % b, bugs)) + ')' def parse_bugs(msg): bugs = set() m = bug_re.search(msg) if m: bugs.add(m.group(2)) return bugs def apply_change(node, reverse, push_patch=True, name=None): p1, p2 = repo.changelog.parents(node) if p2 != nullid: raise util.Abort('cannot %s a merge changeset' % desc['action']) opts = mdiff.defaultopts opts.git = True rpatch = StringIO.StringIO() orig, mod = (node, p1) if reverse else (p1, node) for chunk in patch.diff(repo, node1=orig, node2=mod, opts=opts): rpatch.write(chunk) rpatch.seek(0) saved_stdin = None try: save_fin = ui.fin ui.fin = rpatch except: # Old versions of hg did not use the ui.fin mechanism saved_stdin = sys.stdin sys.stdin = rpatch if push_patch: commands.import_(ui, repo, '-', force=True, no_commit=True, strip=1, base='') else: mq.qimport(ui, repo, '-', name=name, rev=[], git=True) if saved_stdin is None: ui.fin = save_fin else: sys.stdin = saved_stdin allbugs = set() messages = [] for cset in csets: # Hunt down original description if we might want to use it orig_desc = None orig_desc_cset = None orig_author = None r = cset while len(csets) == 1 or not opts.get('single'): ui.debug("Parsing message for %s\n" % short(r.node())) m = backout_re.match(r.description()) if m: ui.debug(" looks like a backout of %s\n" % m.group(1)) else: m = reapply_re.match(r.description()) if m: ui.debug(" looks like a reapply of %s\n" % m.group(1)) else: ui.debug(" looks like the original description\n") orig_desc = r.description() orig_desc_cset = r orig_author = r.user() break r = repo[m.group(1)] bugs = parse_bugs(cset.description()) allbugs.update(bugs) node = cset.node() shortnode = short(node) ui.status('%s %s\n' % (desc['actioning'], shortnode)) if opts.get('nopush') and opts.get('single'): ui.fatal("--single not supported with --nopush") patchname = None if not opts.get('single'): patchname = opts.get('name') or '%s-%s' % (desc['name'], shortnode) apply_change(node, backout, push_patch=(not opts.get('nopush')), name=patchname) msg = ('%s changeset %s' % (desc['Actioned'], shortnode)) + bugs_suffix(bugs) user = None if backout: # If backing out a backout, reuse the original commit message & author. if orig_desc_cset is not None and orig_desc_cset != cset: msg = orig_desc user = orig_author else: # If reapplying the original change, reuse the original commit message & author. if orig_desc_cset is not None and orig_desc_cset == cset: msg = orig_desc user = orig_author messages.append(msg) if not opts.get('single') and not opts.get('nopush'): new_opts['message'] = messages[-1] # Override the user to that of the original patch author in the case of --apply if user is not None: new_opts['user'] = user mq.new(ui, repo, patchname, **new_opts) if ui.verbose: ui.write("queued up patch %s\n" % patchname) msg = ('%s %d changesets' % (desc['Actioned'], len(rev))) + bugs_suffix(allbugs) + '\n' messages.insert(0, msg) new_opts['message'] = "\n".join(messages) if opts.get('single'): patchname = opts.get('name') or '%s-%d-changesets' % (desc['name'], len(rev)) mq.new(ui, repo, patchname, **new_opts)
def collapse(ui, repo, **opts): """collapse multiple revisions into one Collapse combines multiple consecutive changesets into a single changeset, preserving any descendants of the final changeset. The commit messages for the collapsed changesets are concatenated and may be edited before the collapse is completed. """ try: from mercurial import scmutil rng = scmutil.revrange(repo, opts['rev']) except ImportError: rng = cmdutil.revrange(repo, opts['rev']) if not rng: raise util.Abort(_('no revisions specified')) first = rng[0] last = rng[-1] revs = inbetween(repo, first, last) if not revs: raise util.Abort(_('revision %s is not an ancestor of revision %s\n') % (first, last)) elif len(revs) == 1: raise util.Abort(_('only one revision specified')) ui.debug(_('Collapsing revisions %s\n') % revs) for r in revs: if repo[r].user() != ui.username() and not opts['force']: raise util.Abort(_('revision %s does not belong to %s\n') % (r, ui.username())) if r != last: children = repo[r].children() if len(children) > 1: for c in children: if not c.rev() in revs: raise util.Abort(_('revision %s has child %s not ' 'being collapsed, please rebase\n') % (r, c.rev())) if r != first: parents = repo[r].parents() if len(parents) > 1: for p in parents: if not p.rev() in revs: raise util.Abort(_('revision %s has parent %s not ' 'being collapsed.') % (r, p.rev())) if len(repo[first].parents()) > 1: raise util.Abort(_('start revision %s has multiple parents, ' 'won\'t collapse.') % first) try: cmdutil.bailifchanged(repo) except AttributeError: cmdutil.bail_if_changed(repo) parent = repo[first].parents()[0] tomove = list(repo.changelog.descendants(last)) movemap = dict.fromkeys(tomove, nullrev) ui.debug(_('will move revisions: %s\n') % tomove) origparent = repo['.'].rev() collapsed = None try: branch = repo[last].branch() collapsed = makecollapsed(ui, repo, parent, revs, branch, opts) movemap[max(revs)] = collapsed movedescendants(ui, repo, collapsed, tomove, movemap) except: merge.update(repo, repo[origparent].rev(), False, True, False) if collapsed: repair.strip(ui, repo, collapsed.node(), "strip") raise if not opts['keep']: ui.debug(_('stripping revision %d\n') % first) repair.strip(ui, repo, repo[first].node(), "strip") ui.status(_('collapse completed\n'))
def _histedit(ui, repo, state, *freeargs, **opts): # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) if mq and mq.applied: raise error.Abort(_('source has mq patches applied')) # basic argument incompatibility processing outg = opts.get('outgoing') cont = opts.get('continue') editplan = opts.get('edit_plan') abort = opts.get('abort') force = opts.get('force') rules = opts.get('commands', '') revs = opts.get('rev', []) goal = 'new' # This invocation goal, in new, continue, abort if force and not outg: raise error.Abort(_('--force only allowed with --outgoing')) if cont: if any((outg, abort, revs, freeargs, rules, editplan)): raise error.Abort(_('no arguments allowed with --continue')) goal = 'continue' elif abort: if any((outg, revs, freeargs, rules, editplan)): raise error.Abort(_('no arguments allowed with --abort')) goal = 'abort' elif editplan: if any((outg, revs, freeargs)): raise error.Abort(_('only --commands argument allowed with ' '--edit-plan')) goal = 'edit-plan' else: if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise error.Abort(_('history edit already in progress, try ' '--continue or --abort')) if outg: if revs: raise error.Abort(_('no revisions allowed with --outgoing')) if len(freeargs) > 1: raise error.Abort( _('only one repo argument allowed with --outgoing')) else: revs.extend(freeargs) if len(revs) == 0: # experimental config: histedit.defaultrev histeditdefault = ui.config('histedit', 'defaultrev') if histeditdefault: revs.append(histeditdefault) if len(revs) != 1: raise error.Abort( _('histedit requires exactly one ancestor revision')) replacements = [] state.keep = opts.get('keep', False) supportsmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt) # rebuild state if goal == 'continue': state.read() state = bootstrapcontinue(ui, state, opts) elif goal == 'edit-plan': state.read() if not rules: comment = editcomment % (node.short(state.parentctxnode), node.short(state.topmost)) rules = ruleeditor(repo, ui, state.rules, comment) else: if rules == '-': f = sys.stdin else: f = open(rules) rules = f.read() f.close() rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l.startswith('#')] rules = verifyrules(rules, repo, [repo[c] for [_a, c] in state.rules]) state.rules = rules state.write() return elif goal == 'abort': try: state.read() tmpnodes, leafs = newnodestoabort(state) ui.debug('restore wc to old parent %s\n' % node.short(state.topmost)) # Recover our old commits if necessary if not state.topmost in repo and state.backupfile: backupfile = repo.join(state.backupfile) f = hg.openpath(ui, backupfile) gen = exchange.readbundle(ui, f, backupfile) tr = repo.transaction('histedit.abort') try: if not isinstance(gen, bundle2.unbundle20): gen.apply(repo, 'histedit', 'bundle:' + backupfile) if isinstance(gen, bundle2.unbundle20): bundle2.applybundle(repo, gen, tr, source='histedit', url='bundle:' + backupfile) tr.close() finally: tr.release() os.remove(backupfile) # check whether we should update away if repo.unfiltered().revs('parents() and (%n or %ln::)', state.parentctxnode, leafs | tmpnodes): hg.clean(repo, state.topmost) cleanupnode(ui, repo, 'created', tmpnodes) cleanupnode(ui, repo, 'temp', leafs) except Exception: if state.inprogress(): ui.warn(_('warning: encountered an exception during histedit ' '--abort; the repository may not have been completely ' 'cleaned up\n')) raise finally: state.clear() return else: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) topmost, empty = repo.dirstate.parents() if outg: if freeargs: remote = freeargs[0] else: remote = None root = findoutgoing(ui, repo, remote, force, opts) else: rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs))) if len(rr) != 1: raise error.Abort(_('The specified revisions must have ' 'exactly one common root')) root = rr[0].node() revs = between(repo, root, topmost, state.keep) if not revs: raise error.Abort(_('%s is not an ancestor of working directory') % node.short(root)) ctxs = [repo[r] for r in revs] if not rules: comment = editcomment % (node.short(root), node.short(topmost)) rules = ruleeditor(repo, ui, [['pick', c] for c in ctxs], comment) else: if rules == '-': f = sys.stdin else: f = open(rules) rules = f.read() f.close() rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l.startswith('#')] rules = verifyrules(rules, repo, ctxs) parentctxnode = repo[root].parents()[0].node() state.parentctxnode = parentctxnode state.rules = rules state.topmost = topmost state.replacements = replacements # Create a backup so we can always abort completely. backupfile = None if not obsolete.isenabled(repo, obsolete.createmarkersopt): backupfile = repair._bundle(repo, [parentctxnode], [topmost], root, 'histedit') state.backupfile = backupfile # preprocess rules so that we can hide inner folds from the user # and only show one editor rules = state.rules[:] for idx, ((action, ha), (nextact, unused)) in enumerate( zip(rules, rules[1:] + [(None, None)])): if action == 'fold' and nextact == 'fold': state.rules[idx] = '_multifold', ha while state.rules: state.write() action, ha = state.rules.pop(0) ui.debug('histedit: processing %s %s\n' % (action, ha[:12])) actobj = actiontable[action].fromrule(state, ha) parentctx, replacement_ = actobj.run() state.parentctxnode = parentctx.node() state.replacements.extend(replacement_) state.write() hg.update(repo, state.parentctxnode) mapping, tmpnodes, created, ntm = processreplacement(state) if mapping: for prec, succs in mapping.iteritems(): if not succs: ui.debug('histedit: %s is dropped\n' % node.short(prec)) else: ui.debug('histedit: %s is replaced by %s\n' % ( node.short(prec), node.short(succs[0]))) if len(succs) > 1: m = 'histedit: %s' for n in succs[1:]: ui.debug(m % node.short(n)) if supportsmarkers: # Only create markers if the temp nodes weren't already removed. obsolete.createmarkers(repo, ((repo[t],()) for t in sorted(tmpnodes) if t in repo)) else: cleanupnode(ui, repo, 'temp', tmpnodes) if not state.keep: if mapping: movebookmarks(ui, repo, mapping, state.topmost, ntm) # TODO update mq state if supportsmarkers: markers = [] # sort by revision number because it sound "right" for prec in sorted(mapping, key=repo.changelog.rev): succs = mapping[prec] markers.append((repo[prec], tuple(repo[s] for s in succs))) if markers: obsolete.createmarkers(repo, markers) else: cleanupnode(ui, repo, 'replaced', mapping) state.clear() if os.path.exists(repo.sjoin('undo')): os.unlink(repo.sjoin('undo'))
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history (the source) onto another (the destination). This can be useful for linearizing *local* changes relative to a master development tree. You should not rebase changesets that have already been shared with others. Doing so will force everybody else to perform the same rebase or they will end up with duplicated changesets after pulling in your rebased changesets. If you don't specify a destination changeset (``-d/--dest``), rebase uses the tipmost head of the current named branch as the destination. (The destination changeset is not modified by rebasing, but new changesets are added as its descendants.) You can specify which changesets to rebase in two ways: as a "source" changeset or as a "base" changeset. Both are shorthand for a topologically related set of changesets (the "source branch"). If you specify source (``-s/--source``), rebase will rebase that changeset and all of its descendants onto dest. If you specify base (``-b/--base``), rebase will select ancestors of base back to but not including the common ancestor with dest. Thus, ``-b`` is less precise but more convenient than ``-s``: you can specify any changeset in the source branch, and rebase will select the whole branch. If you specify neither ``-s`` nor ``-b``, rebase uses the parent of the working directory as the base. By default, rebase recreates the changesets in the source branch as descendants of dest and then destroys the originals. Use ``--keep`` to preserve the original source changesets. Some changesets in the source branch (e.g. merges from the destination branch) may be dropped if they no longer contribute any change. One result of the rules for selecting the destination changeset and source branch is that, unlike ``merge``, rebase will do nothing if you are at the latest (tipmost) head of a named branch with two heads. You need to explicitly specify source and/or destination (or ``update`` to the other head, if it's the head of the intended source branch). If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. Returns 0 on success, 1 if nothing to rebase. """ originalwd = target = None external = nullrev state = {} skipped = set() targetancestors = set() editor = None if opts.get('edit'): editor = cmdutil.commitforceeditor lock = wlock = None try: lock = repo.lock() wlock = repo.wlock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) revf = opts.get('rev', []) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) collapsemsg = cmdutil.logmessage(ui, opts) extrafn = opts.get('extrafn') # internal, used by e.g. hgsubversion keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) detachf = opts.get('detach', False) # keepopen is not meant for use on the command line, but by # other extensions keepopen = opts.get('keepopen', False) if collapsemsg and not collapsef: raise util.Abort(_('message can only be specified with collapse')) if contf or abortf: if contf and abortf: raise util.Abort(_('cannot use both abort and continue')) if collapsef: raise util.Abort( _('cannot use collapse with continue or abort')) if detachf: raise util.Abort(_('cannot use detach with continue or abort')) if srcf or basef or destf: raise util.Abort( _('abort and continue do not allow specifying revisions')) if opts.get('tool', False): ui.warn(_('tool option will be ignored\n')) (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external) = restorestatus(repo) if abortf: return abort(repo, originalwd, target, state) else: if srcf and basef: raise util.Abort( _('cannot specify both a ' 'source and a base')) if revf and basef: raise util.Abort( _('cannot specify both a ' 'revision and a base')) if revf and srcf: raise util.Abort( _('cannot specify both a ' 'revision and a source')) if detachf: if not srcf: raise util.Abort( _('detach requires a revision to be specified')) if basef: raise util.Abort(_('cannot specify a base with detach')) cmdutil.bailifchanged(repo) if not destf: # Destination defaults to the latest revision in the # current branch branch = repo[None].branch() dest = repo[branch] else: dest = repo[destf] if revf: revgen = repo.set('%lr', revf) elif srcf: revgen = repo.set('(%r)::', srcf) else: base = basef or '.' revgen = repo.set('(children(ancestor(%r, %d)) and ::(%r))::', base, dest, base) rebaseset = [c.rev() for c in revgen] if not rebaseset: repo.ui.debug('base is ancestor of destination') result = None elif not keepf and list( repo.set('first(children(%ld) - %ld)', rebaseset, rebaseset)): raise util.Abort( _("can't remove original changesets with" " unrebased descendants"), hint=_('use --keep to keep original changesets')) else: result = buildstate(repo, dest, rebaseset, detachf) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return 1 else: originalwd, target, state = result if collapsef: targetancestors = set(repo.changelog.ancestors(target)) external = checkexternal(repo, state, targetancestors) if keepbranchesf: assert not extrafn, 'cannot use both keepbranches and extrafn' def extrafn(ctx, extra): extra['branch'] = ctx.branch() if collapsef: branches = set() for rev in state: branches.add(repo[rev].branch()) if len(branches) > 1: raise util.Abort( _('cannot collapse multiple named ' 'branches')) # Rebase if not targetancestors: targetancestors = set(repo.changelog.ancestors(target)) targetancestors.add(target) # Keep track of the current bookmarks in order to reset them later currentbookmarks = repo._bookmarks.copy() sortedstate = sorted(state) total = len(sortedstate) pos = 0 for rev in sortedstate: pos += 1 if state[rev] == -1: ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])), _('changesets'), total) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external) p1, p2 = defineparents(repo, rev, target, state, targetancestors) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: try: ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) stats = rebasenode(repo, rev, p1, state) if stats and stats[3] > 0: raise util.Abort( _('unresolved conflicts (see hg ' 'resolve, then hg rebase --continue)')) finally: ui.setconfig('ui', 'forcemerge', '') cmdutil.duplicatecopies(repo, rev, target, p2) if not collapsef: newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn, editor=editor) else: # Skip commit if we are collapsing repo.dirstate.setparents(repo[p1].node()) newrev = None # Update the state if newrev is not None: state[rev] = repo[newrev].rev() else: if not collapsef: ui.note(_('no changes, revision %d skipped\n') % rev) ui.debug('next revision set to %s\n' % p1) skipped.add(rev) state[rev] = p1 ui.progress(_('rebasing'), None) ui.note(_('rebase merging completed\n')) if collapsef and not keepopen: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) if collapsemsg: commitmsg = collapsemsg else: commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] != nullmerge: commitmsg += '\n* %s' % repo[rebased].description() commitmsg = ui.edit(commitmsg, repo.ui.username()) newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn, editor=editor) if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if currentbookmarks: # Nodeids are needed to reset bookmarks nstate = {} for k, v in state.iteritems(): if v != nullmerge: nstate[repo[k].node()] = repo[v].node() if not keepf: # Remove no more useful revisions rebased = [rev for rev in state if state[rev] != nullmerge] if rebased: if set(repo.changelog.descendants(min(rebased))) - set(state): ui.warn( _("warning: new changesets detected " "on source branch, not stripping\n")) else: # backup the old csets by default repair.strip(ui, repo, repo[min(rebased)].node(), "all") if currentbookmarks: updatebookmarks(repo, nstate, currentbookmarks, **opts) clearstatus(repo) ui.note(_("rebase completed\n")) if os.path.exists(repo.sjoin('undo')): util.unlinkpath(repo.sjoin('undo')) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) finally: release(lock, wlock)
def do_collapse(ui, repo, first, last, revs, movelog, timedelta, opts): ui.debug(_('Collapsing revisions %s\n') % revs) if opts['debugdelay']: debug_delay = float(opts['debugdelay']) else: debug_delay = False for r in revs: if repo[r].user() != ui.username() and not opts['force']: raise util.Abort(_('revision %s does not belong to %s\n') % (r, ui.username())) if r != last: children = repo[r].children() if len(children) > 1: for c in children: if not c.rev() in revs: raise util.Abort(_('revision %s has child %s not ' 'being collapsed, please rebase\n') % (r, c.rev())) if r != first: parents = repo[r].parents() if len(parents) > 1: for p in parents: if not p.rev() in revs: raise util.Abort(_('revision %s has parent %s not ' 'being collapsed.') % (r, p.rev())) if len(repo[first].parents()) > 1: raise util.Abort(_('start revision %s has multiple parents, ' 'won\'t collapse.') % first) try: cmdutil.bailifchanged(repo) except AttributeError: cmdutil.bail_if_changed(repo) parent = repo[first].parents()[0] tomove = list(repo.changelog.descendants(last)) head_hgtags = get_hgtags_from_heads(ui, repo, last) if '.hgtags' in parent: parent_hgtags = parent['.hgtags'].data() else: parent_hgtags = False movemap = dict.fromkeys(tomove, nullrev) ui.debug(_('will move revisions: %s\n') % tomove) tagsmap = dict() if opts['noop']: ui.status(_('noop: not collapsing\n')) else: origparent = repo['.'].rev() collapsed = None try: branch = repo[last].branch() collapsed = makecollapsed(ui, repo, parent, revs, branch, tagsmap, parent_hgtags, movelog, opts) movemap[max(revs)] = collapsed movedescendants(ui, repo, collapsed, tomove, movemap, tagsmap, parent_hgtags, movelog, debug_delay) fix_hgtags(ui, repo, head_hgtags, tagsmap) except: merge.update(repo, repo[origparent].rev(), False, True, False) if collapsed: repair.strip(ui, repo, collapsed.node(), "strip") raise if not opts['keep']: ui.debug(_('stripping revision %d\n') % first) repair.strip(ui, repo, repo[first].node(), "strip") ui.status(_('collapse completed\n'))
def collapse(ui, repo, **opts): """collapse multiple revisions into one Collapse combines multiple consecutive changesets into a single changeset, preserving any descendants of the final changeset. The commit messages for the collapsed changesets are concatenated and may be edited before the collapse is completed. """ try: from mercurial import scmutil rng = scmutil.revrange(repo, opts['rev']) except ImportError: rng = cmdutil.revrange(repo, opts['rev']) if not rng: raise util.Abort(_('no revisions specified')) first = rng[0] last = rng[-1] revs = inbetween(repo, first, last) if not revs: raise util.Abort( _('revision %s is not an ancestor of revision %s\n') % (first, last)) elif len(revs) == 1: raise util.Abort(_('only one revision specified')) ui.debug(_('Collapsing revisions %s\n') % revs) for r in revs: if repo[r].user() != ui.username() and not opts['force']: raise util.Abort( _('revision %s does not belong to %s\n') % (r, ui.username())) if r != last: children = repo[r].children() if len(children) > 1: for c in children: if not c.rev() in revs: raise util.Abort( _('revision %s has child %s not ' 'being collapsed, please rebase\n') % (r, c.rev())) if r != first: parents = repo[r].parents() if len(parents) > 1: for p in parents: if not p.rev() in revs: raise util.Abort( _('revision %s has parent %s not ' 'being collapsed.') % (r, p.rev())) if len(repo[first].parents()) > 1: raise util.Abort( _('start revision %s has multiple parents, ' 'won\'t collapse.') % first) try: cmdutil.bailifchanged(repo) except AttributeError: cmdutil.bail_if_changed(repo) parent = repo[first].parents()[0] tomove = list(repo.changelog.descendants(last)) movemap = dict.fromkeys(tomove, nullrev) ui.debug(_('will move revisions: %s\n') % tomove) origparent = repo['.'].rev() collapsed = None try: branch = repo[last].branch() collapsed = makecollapsed(ui, repo, parent, revs, branch, opts) movemap[max(revs)] = collapsed movedescendants(ui, repo, collapsed, tomove, movemap) except: merge.update(repo, repo[origparent].rev(), False, True, False) if collapsed: repair.strip(ui, repo, collapsed.node(), "strip") raise if not opts['keep']: ui.debug(_('stripping revision %d\n') % first) repair.strip(ui, repo, repo[first].node(), "strip") ui.status(_('collapse completed\n'))
def push(repo, dest, force, revs): """push revisions starting at a specified head back to Subversion. """ assert not revs, 'designated revisions for push remains unimplemented.' cmdutil.bailifchanged(repo) checkpush = getattr(repo, 'checkpush', None) if checkpush: try: # The checkpush function changed as of e10000369b47 (first # in 3.0) in mercurial from mercurial.exchange import pushoperation pushop = pushoperation(repo, dest, force, revs, False) checkpush(pushop) except (ImportError, TypeError): checkpush(force, revs) ui = repo.ui old_encoding = util.swap_out_encoding() try: hasobsolete = obsolete._enabled except: hasobsolete = False temporary_commits = [] obsmarkers = [] try: # TODO: implement --rev/#rev support # TODO: do credentials specified in the URL still work? svn = dest.svn meta = repo.svnmeta(svn.uuid, svn.subdir) # Strategy: # 1. Find all outgoing commits from this head if len(repo[None].parents()) != 1: ui.status('Cowardly refusing to push branch merge\n') return 0 # results in nonzero exit status, see hg's commands.py workingrev = repo[None].parents()[0] workingbranch = workingrev.branch() ui.status('searching for changes\n') hashes = meta.revmap.hashes() outgoing = util.outgoing_revisions(repo, hashes, workingrev.node()) if not (outgoing and len(outgoing)): ui.status('no changes found\n') return 1 # so we get a sane exit status, see hg's commands.push tip_ctx = repo[outgoing[-1]].p1() svnbranch = tip_ctx.branch() modified_files = {} for i in range(len(outgoing) - 1, -1, -1): # 2. Pick the oldest changeset that needs to be pushed current_ctx = repo[outgoing[i]] original_ctx = current_ctx if len(current_ctx.parents()) != 1: ui.status('Found a branch merge, this needs discussion and ' 'implementation.\n') # results in nonzero exit status, see hg's commands.py return 0 # 3. Move the changeset to the tip of the branch if necessary conflicts = False for file in current_ctx.files(): if file in modified_files: conflicts = True break if conflicts or current_ctx.branch() != svnbranch: util.swap_out_encoding(old_encoding) try: def extrafn(ctx, extra): extra['branch'] = ctx.branch() ui.note('rebasing %s onto %s \n' % (current_ctx, tip_ctx)) hgrebase.rebase(ui, repo, dest=node.hex(tip_ctx.node()), rev=[node.hex(current_ctx.node())], extrafn=extrafn, keep=True) finally: util.swap_out_encoding() # Don't trust the pre-rebase repo and context. repo = getlocalpeer(ui, {}, meta.path) meta = repo.svnmeta(svn.uuid, svn.subdir) hashes = meta.revmap.hashes() tip_ctx = repo[tip_ctx.node()] for c in tip_ctx.descendants(): rebasesrc = c.extra().get('rebase_source') if rebasesrc and node.bin(rebasesrc) == current_ctx.node(): current_ctx = c temporary_commits.append(c.node()) break # 4. Push the changeset to subversion tip_hash = hashes[tip_ctx.node()][0] try: ui.status('committing %s\n' % current_ctx) pushedrev = pushmod.commit(ui, repo, current_ctx, meta, tip_hash, svn) except pushmod.NoFilesException: ui.warn("Could not push revision %s because it had no changes " "in svn.\n" % current_ctx) return # This hook is here purely for testing. It allows us to # onsistently trigger hit the race condition between # pushing and pulling here. In particular, we use it to # trigger another revision landing between the time we # push a revision and pull it back. repo.hook('debug-hgsubversion-between-push-and-pull-for-tests') # 5. Pull the latest changesets from subversion, which will # include the one we just committed (and possibly others). r = pull(repo, dest, force=force, meta=meta) assert not r or r == 0 # 6. Move our tip to the latest pulled tip for c in tip_ctx.descendants(): if c.node() in hashes and c.branch() == svnbranch: if meta.get_source_rev(ctx=c)[0] == pushedrev.revnum: # This is corresponds to the changeset we just pushed if hasobsolete: obsmarkers.append([(original_ctx, [c])]) tip_ctx = c # Remember what files have been modified since the # whole push started. for file in c.files(): modified_files[file] = True # 7. Rebase any children of the commit we just pushed # that are not in the outgoing set for c in original_ctx.children(): if not c.node() in hashes and not c.node() in outgoing: util.swap_out_encoding(old_encoding) try: # Path changed as subdirectories were getting # deleted during push. saved_path = os.getcwd() os.chdir(repo.root) def extrafn(ctx, extra): extra['branch'] = ctx.branch() ui.status('rebasing non-outgoing %s onto %s\n' % (c, tip_ctx)) needs_rebase_set = "%s::" % node.hex(c.node()) hgrebase.rebase(ui, repo, dest=node.hex(tip_ctx.node()), rev=[needs_rebase_set], extrafn=extrafn, keep=not hasobsolete) finally: os.chdir(saved_path) util.swap_out_encoding() util.swap_out_encoding(old_encoding) try: hg.update(repo, repo.branchtip(workingbranch)) finally: util.swap_out_encoding() if hasobsolete: for marker in obsmarkers: obsolete.createmarkers(repo, marker) beforepush = marker[0][0] afterpush = marker[0][1][0] ui.note('marking %s as obsoleted by %s\n' % (beforepush.hex(), afterpush.hex())) else: # strip the original changesets since the push was # successful and changeset obsolescence is unavailable util.strip(ui, repo, outgoing, "all") finally: try: # It's always safe to delete the temporary commits. # The originals are not deleted unless the push # completely succeeded. if temporary_commits: # If the repo is on a temporary commit, get off before # the strip. parent = repo[None].p1() if parent.node() in temporary_commits: hg.update(repo, parent.p1().node()) if hasobsolete: relations = ((repo[n], ()) for n in temporary_commits) obsolete.createmarkers(repo, relations) else: util.strip(ui, repo, temporary_commits, backup=None) finally: util.swap_out_encoding(old_encoding) return 1 # so we get a sane exit status, see hg's commands.push
def histedit(ui, repo, *freeargs, **opts): """interactively edit changeset history This command edits changesets between ANCESTOR and the parent of the working directory. With --outgoing, this edits changesets not found in the destination repository. If URL of the destination is omitted, the 'default-push' (or 'default') path will be used. For safety, this command is aborted, also if there are ambiguous outgoing revisions which may confuse users: for example, there are multiple branches containing outgoing revisions. Use "min(outgoing() and ::.)" or similar revset specification instead of --outgoing to specify edit target revision exactly in such ambiguous situation. See :hg:`help revsets` for detail about selecting revisions. """ # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, "mq", None) if mq and mq.applied: raise util.Abort(_("source has mq patches applied")) # basic argument incompatibility processing outg = opts.get("outgoing") cont = opts.get("continue") abort = opts.get("abort") force = opts.get("force") rules = opts.get("commands", "") revs = opts.get("rev", []) goal = "new" # This invocation goal, in new, continue, abort if force and not outg: raise util.Abort(_("--force only allowed with --outgoing")) if cont: if util.any((outg, abort, revs, freeargs, rules)): raise util.Abort(_("no arguments allowed with --continue")) goal = "continue" elif abort: if util.any((outg, revs, freeargs, rules)): raise util.Abort(_("no arguments allowed with --abort")) goal = "abort" else: if os.path.exists(os.path.join(repo.path, "histedit-state")): raise util.Abort(_("history edit already in progress, try " "--continue or --abort")) if outg: if revs: raise util.Abort(_("no revisions allowed with --outgoing")) if len(freeargs) > 1: raise util.Abort(_("only one repo argument allowed with --outgoing")) else: revs.extend(freeargs) if len(revs) != 1: raise util.Abort(_("histedit requires exactly one ancestor revision")) if goal == "continue": (parentctxnode, rules, keep, topmost, replacements) = readstate(repo) parentctx = repo[parentctxnode] parentctx, repl = bootstrapcontinue(ui, repo, parentctx, rules, opts) replacements.extend(repl) elif goal == "abort": (parentctxnode, rules, keep, topmost, replacements) = readstate(repo) mapping, tmpnodes, leafs, _ntm = processreplacement(repo, replacements) ui.debug("restore wc to old parent %s\n" % node.short(topmost)) # check whether we should update away parentnodes = [c.node() for c in repo[None].parents()] for n in leafs | set([parentctxnode]): if n in parentnodes: hg.clean(repo, topmost) break else: pass cleanupnode(ui, repo, "created", tmpnodes) cleanupnode(ui, repo, "temp", leafs) os.unlink(os.path.join(repo.path, "histedit-state")) return else: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) topmost, empty = repo.dirstate.parents() if outg: if freeargs: remote = freeargs[0] else: remote = None root = findoutgoing(ui, repo, remote, force, opts) else: root = revs[0] root = scmutil.revsingle(repo, root).node() keep = opts.get("keep", False) revs = between(repo, root, topmost, keep) if not revs: raise util.Abort(_("%s is not an ancestor of working directory") % node.short(root)) ctxs = [repo[r] for r in revs] if not rules: rules = "\n".join([makedesc(c) for c in ctxs]) rules += "\n\n" rules += editcomment % (node.short(root), node.short(topmost)) rules = ui.edit(rules, ui.username()) # Save edit rules in .hg/histedit-last-edit.txt in case # the user needs to ask for help after something # surprising happens. f = open(repo.join("histedit-last-edit.txt"), "w") f.write(rules) f.close() else: if rules == "-": f = sys.stdin else: f = open(rules) rules = f.read() f.close() rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l[0] == "#"] rules = verifyrules(rules, repo, ctxs) parentctx = repo[root].parents()[0] keep = opts.get("keep", False) replacements = [] while rules: writestate(repo, parentctx.node(), rules, keep, topmost, replacements) action, ha = rules.pop(0) ui.debug("histedit: processing %s %s\n" % (action, ha)) actfunc = actiontable[action] parentctx, replacement_ = actfunc(ui, repo, parentctx, ha, opts) replacements.extend(replacement_) hg.update(repo, parentctx.node()) mapping, tmpnodes, created, ntm = processreplacement(repo, replacements) if mapping: for prec, succs in mapping.iteritems(): if not succs: ui.debug("histedit: %s is dropped\n" % node.short(prec)) else: ui.debug("histedit: %s is replaced by %s\n" % (node.short(prec), node.short(succs[0]))) if len(succs) > 1: m = "histedit: %s" for n in succs[1:]: ui.debug(m % node.short(n)) if not keep: if mapping: movebookmarks(ui, repo, mapping, topmost, ntm) # TODO update mq state if obsolete._enabled: markers = [] # sort by revision number because it sound "right" for prec in sorted(mapping, key=repo.changelog.rev): succs = mapping[prec] markers.append((repo[prec], tuple(repo[s] for s in succs))) if markers: obsolete.createmarkers(repo, markers) else: cleanupnode(ui, repo, "replaced", mapping) cleanupnode(ui, repo, "temp", tmpnodes) os.unlink(os.path.join(repo.path, "histedit-state")) if os.path.exists(repo.sjoin("undo")): os.unlink(repo.sjoin("undo"))
def _histedit(ui, repo, state, *freeargs, **opts): # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) if mq and mq.applied: raise util.Abort(_('source has mq patches applied')) # basic argument incompatibility processing outg = opts.get('outgoing') cont = opts.get('continue') abort = opts.get('abort') force = opts.get('force') rules = opts.get('commands', '') revs = opts.get('rev', []) goal = 'new' # This invocation goal, in new, continue, abort if force and not outg: raise util.Abort(_('--force only allowed with --outgoing')) if cont: if util.any((outg, abort, revs, freeargs, rules)): raise util.Abort(_('no arguments allowed with --continue')) goal = 'continue' elif abort: if util.any((outg, revs, freeargs, rules)): raise util.Abort(_('no arguments allowed with --abort')) goal = 'abort' else: if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise util.Abort(_('history edit already in progress, try ' '--continue or --abort')) if outg: if revs: raise util.Abort(_('no revisions allowed with --outgoing')) if len(freeargs) > 1: raise util.Abort( _('only one repo argument allowed with --outgoing')) else: revs.extend(freeargs) if len(revs) != 1: raise util.Abort( _('histedit requires exactly one ancestor revision')) replacements = [] keep = opts.get('keep', False) # rebuild state if goal == 'continue': state = histeditstate(repo) state.read() state = bootstrapcontinue(ui, state, opts) elif goal == 'abort': state = histeditstate(repo) state.read() mapping, tmpnodes, leafs, _ntm = processreplacement(state) ui.debug('restore wc to old parent %s\n' % node.short(state.topmost)) # check whether we should update away parentnodes = [c.node() for c in repo[None].parents()] for n in leafs | set([state.parentctx.node()]): if n in parentnodes: hg.clean(repo, state.topmost) break else: pass cleanupnode(ui, repo, 'created', tmpnodes) cleanupnode(ui, repo, 'temp', leafs) state.clear() return else: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) topmost, empty = repo.dirstate.parents() if outg: if freeargs: remote = freeargs[0] else: remote = None root = findoutgoing(ui, repo, remote, force, opts) else: rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs))) if len(rr) != 1: raise util.Abort(_('The specified revisions must have ' 'exactly one common root')) root = rr[0].node() revs = between(repo, root, topmost, keep) if not revs: raise util.Abort(_('%s is not an ancestor of working directory') % node.short(root)) ctxs = [repo[r] for r in revs] if not rules: rules = '\n'.join([makedesc(c) for c in ctxs]) rules += '\n\n' rules += editcomment % (node.short(root), node.short(topmost)) rules = ui.edit(rules, ui.username()) # Save edit rules in .hg/histedit-last-edit.txt in case # the user needs to ask for help after something # surprising happens. f = open(repo.join('histedit-last-edit.txt'), 'w') f.write(rules) f.close() else: if rules == '-': f = sys.stdin else: f = open(rules) rules = f.read() f.close() rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l.startswith('#')] rules = verifyrules(rules, repo, ctxs) parentctx = repo[root].parents()[0] state.parentctx = parentctx state.rules = rules state.keep = keep state.topmost = topmost state.replacements = replacements while state.rules: state.write() action, ha = state.rules.pop(0) ui.debug('histedit: processing %s %s\n' % (action, ha)) actfunc = actiontable[action] state.parentctx, replacement_ = actfunc(ui, state, ha, opts) state.replacements.extend(replacement_) hg.update(repo, state.parentctx.node()) mapping, tmpnodes, created, ntm = processreplacement(state) if mapping: for prec, succs in mapping.iteritems(): if not succs: ui.debug('histedit: %s is dropped\n' % node.short(prec)) else: ui.debug('histedit: %s is replaced by %s\n' % ( node.short(prec), node.short(succs[0]))) if len(succs) > 1: m = 'histedit: %s' for n in succs[1:]: ui.debug(m % node.short(n)) if not keep: if mapping: movebookmarks(ui, repo, mapping, state.topmost, ntm) # TODO update mq state if obsolete.isenabled(repo, obsolete.createmarkersopt): markers = [] # sort by revision number because it sound "right" for prec in sorted(mapping, key=repo.changelog.rev): succs = mapping[prec] markers.append((repo[prec], tuple(repo[s] for s in succs))) if markers: obsolete.createmarkers(repo, markers) else: cleanupnode(ui, repo, 'replaced', mapping) cleanupnode(ui, repo, 'temp', tmpnodes) state.clear() if os.path.exists(repo.sjoin('undo')): os.unlink(repo.sjoin('undo'))
def push(repo, dest, force, revs): """push revisions starting at a specified head back to Subversion. """ assert not revs, 'designated revisions for push remains unimplemented.' cmdutil.bailifchanged(repo) checkpush = getattr(repo, 'checkpush', None) if checkpush: try: # The checkpush function changed as of e10000369b47 (first # in 3.0) in mercurial from mercurial.exchange import pushoperation pushop = pushoperation(repo, dest, force, revs, False) checkpush(pushop) except (ImportError, TypeError): checkpush(force, revs) ui = repo.ui old_encoding = util.swap_out_encoding() try: hasobsolete = (obsolete._enabled or obsolete.isenabled(repo, obsolete.createmarkersopt)) except: hasobsolete = False temporary_commits = [] obsmarkers = [] try: # TODO: implement --rev/#rev support # TODO: do credentials specified in the URL still work? svn = dest.svn meta = repo.svnmeta(svn.uuid, svn.subdir) # Strategy: # 1. Find all outgoing commits from this head if len(repo[None].parents()) != 1: ui.status('Cowardly refusing to push branch merge\n') return 0 # results in nonzero exit status, see hg's commands.py workingrev = repo[None].parents()[0] workingbranch = workingrev.branch() ui.status('searching for changes\n') hashes = meta.revmap.hashes() outgoing = util.outgoing_revisions(repo, hashes, workingrev.node()) if not (outgoing and len(outgoing)): ui.status('no changes found\n') return 1 # so we get a sane exit status, see hg's commands.push tip_ctx = repo[outgoing[-1]].p1() svnbranch = tip_ctx.branch() modified_files = {} for i in range(len(outgoing) - 1, -1, -1): # 2. Pick the oldest changeset that needs to be pushed current_ctx = repo[outgoing[i]] original_ctx = current_ctx if len(current_ctx.parents()) != 1: ui.status('Found a branch merge, this needs discussion and ' 'implementation.\n') # results in nonzero exit status, see hg's commands.py return 0 # 3. Move the changeset to the tip of the branch if necessary conflicts = False for file in current_ctx.files(): if file in modified_files: conflicts = True break if conflicts or current_ctx.branch() != svnbranch: util.swap_out_encoding(old_encoding) try: def extrafn(ctx, extra): extra['branch'] = ctx.branch() ui.note('rebasing %s onto %s \n' % (current_ctx, tip_ctx)) hgrebase.rebase(ui, repo, dest=node.hex(tip_ctx.node()), rev=[node.hex(current_ctx.node())], extrafn=extrafn, keep=True) finally: util.swap_out_encoding() # Don't trust the pre-rebase repo and context. repo = getlocalpeer(ui, {}, meta.path) meta = repo.svnmeta(svn.uuid, svn.subdir) hashes = meta.revmap.hashes() tip_ctx = repo[tip_ctx.node()] for c in tip_ctx.descendants(): rebasesrc = c.extra().get('rebase_source') if rebasesrc and node.bin(rebasesrc) == current_ctx.node(): current_ctx = c temporary_commits.append(c.node()) break # 4. Push the changeset to subversion tip_hash = hashes[tip_ctx.node()][0] try: ui.status('committing %s\n' % current_ctx) pushedrev = pushmod.commit(ui, repo, current_ctx, meta, tip_hash, svn) except pushmod.NoFilesException: ui.warn("Could not push revision %s because it had no changes " "in svn.\n" % current_ctx) return # This hook is here purely for testing. It allows us to # onsistently trigger hit the race condition between # pushing and pulling here. In particular, we use it to # trigger another revision landing between the time we # push a revision and pull it back. repo.hook('debug-hgsubversion-between-push-and-pull-for-tests') # 5. Pull the latest changesets from subversion, which will # include the one we just committed (and possibly others). r = pull(repo, dest, force=force, meta=meta) assert not r or r == 0 # 6. Move our tip to the latest pulled tip for c in tip_ctx.descendants(): if c.node() in hashes and c.branch() == svnbranch: if meta.get_source_rev(ctx=c)[0] == pushedrev.revnum: # This is corresponds to the changeset we just pushed if hasobsolete: obsmarkers.append([(original_ctx, [c])]) tip_ctx = c # Remember what files have been modified since the # whole push started. for file in c.files(): modified_files[file] = True # 7. Rebase any children of the commit we just pushed # that are not in the outgoing set for c in original_ctx.children(): if not c.node() in hashes and not c.node() in outgoing: util.swap_out_encoding(old_encoding) try: # Path changed as subdirectories were getting # deleted during push. saved_path = os.getcwd() os.chdir(repo.root) def extrafn(ctx, extra): extra['branch'] = ctx.branch() ui.status('rebasing non-outgoing %s onto %s\n' % (c, tip_ctx)) needs_rebase_set = "%s::" % node.hex(c.node()) hgrebase.rebase(ui, repo, dest=node.hex(tip_ctx.node()), rev=[needs_rebase_set], extrafn=extrafn, keep=not hasobsolete) finally: os.chdir(saved_path) util.swap_out_encoding() util.swap_out_encoding(old_encoding) try: hg.update(repo, repo.branchtip(workingbranch)) finally: util.swap_out_encoding() with repo.wlock(): with repo.lock(): if hasobsolete: for marker in obsmarkers: obsolete.createmarkers(repo, marker) beforepush = marker[0][0] afterpush = marker[0][1][0] ui.note('marking %s as obsoleted by %s\n' % (beforepush.hex(), afterpush.hex())) else: # strip the original changesets since the push was # successful and changeset obsolescence is unavailable util.strip(ui, repo, outgoing, "all") finally: try: # It's always safe to delete the temporary commits. # The originals are not deleted unless the push # completely succeeded. if temporary_commits: # If the repo is on a temporary commit, get off before # the strip. parent = repo[None].p1() if parent.node() in temporary_commits: hg.update(repo, parent.p1().node()) with repo.wlock(): with repo.lock(): if hasobsolete: relations = ( (repo[n], ()) for n in temporary_commits) obsolete.createmarkers(repo, relations) else: util.strip( ui, repo, temporary_commits, backup=None) finally: util.swap_out_encoding(old_encoding) return 1 # so we get a sane exit status, see hg's commands.push
def histedit(ui, repo, *parent, **opts): """hg histedit <parent> """ # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) if mq and mq.applied: raise util.Abort(_('source has mq patches applied')) parent = list(parent) + opts.get('rev', []) if opts.get('outgoing'): if len(parent) > 1: raise util.Abort('only one repo argument allowed with --outgoing') elif parent: parent = parent[0] dest = ui.expandpath(parent or 'default-push', parent or 'default') dest, revs = hg.parseurl(dest, None)[:2] if isinstance(revs, tuple): # hg >= 1.6 revs, checkout = hg.addbranchrevs(repo, repo, revs, None) other = hg.repository(hg.remoteui(repo, opts), dest) # hg >= 1.9 findoutgoing = getattr(discovery, 'findoutgoing', None) if findoutgoing is None: if getattr(discovery, 'outgoing', None) is not None: def findoutgoing(repo, other, force=False): out = discovery.findcommonoutgoing( repo, other, [], force=force) return out.missing[0:1] else: # hg 1.9 and 2.0 def findoutgoing(repo, other, force=False): common, outheads = discovery.findcommonoutgoing( repo, other, [], force=force) return repo.changelog.findmissing(common, outheads)[0:1] else: other = hg.repository(ui, dest) def findoutgoing(repo, other, force=False): return repo.findoutgoing(other, force=force) if revs: revs = [repo.lookup(rev) for rev in revs] ui.status(_('comparing with %s\n') % hidepassword(dest)) parent = findoutgoing(repo, other, force=opts.get('force')) else: if opts.get('force'): raise util.Abort('--force only allowed with --outgoing') if opts.get('continue', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --continue') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, replacemap ) = readstate(repo) currentparent, wantnull = repo.dirstate.parents() parentctx = repo[parentctxnode] # discover any nodes the user has added in the interim newchildren = [c for c in parentctx.children() if c.node() not in existing] action, currentnode = rules.pop(0) while newchildren: if action in ['f', 'fold', ]: tmpnodes.extend([n.node() for n in newchildren]) else: created.extend([n.node() for n in newchildren]) newchildren = filter(lambda x: x.node() not in existing, reduce(lambda x, y: x + y, map(lambda r: r.children(), newchildren))) m, a, r, d = repo.status()[:4] oldctx = repo[currentnode] message = oldctx.description() if action in ('e', 'edit', 'm', 'mess'): message = ui.edit(message, ui.username()) elif action in ('f', 'fold', ): message = 'fold-temp-revision %s' % currentnode new = None if m or a or r or d: new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) if action in ('f', 'fold'): if new: tmpnodes.append(new) else: new = newchildren[-1] (parentctx, created_, replaced_, tmpnodes_, ) = finishfold(ui, repo, parentctx, oldctx, new, opts, newchildren) replaced.extend(replaced_) created.extend(created_) tmpnodes.extend(tmpnodes_) elif action not in ('d', 'drop'): if new != oldctx.node(): replaced.append(oldctx.node()) if new: if new != oldctx.node(): created.append(new) parentctx = repo[new] elif opts.get('abort', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --abort') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, replacemap) = readstate(repo) ui.debug('restore wc to old tip %s\n' % node.hex(tip)) hg.clean(repo, tip) ui.debug('should strip created nodes %s\n' % ', '.join([node.hex(n)[:12] for n in created])) ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for nodes in (created, tmpnodes, ): for n in reversed(nodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) return else: bailifchanged(repo) if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise util.Abort('history edit already in progress, try ' '--continue or --abort') tip, empty = repo.dirstate.parents() if len(parent) != 1: raise util.Abort('requires exactly one parent revision') parent = _revsingle(repo, parent[0]).node() keep = opts.get('keep', False) revs = between(repo, parent, tip, keep) ctxs = [repo[r] for r in revs] existing = [r.node() for r in ctxs] rules = opts.get('commands', '') if not rules: rules = '\n'.join([makedesc(c) for c in ctxs]) rules += editcomment % (node.hex(parent)[:12], node.hex(tip)[:12], ) rules = ui.edit(rules, ui.username()) # Save edit rules in .hg/histedit-last-edit.txt in case # the user needs to ask for help after something # surprising happens. f = open(repo.join('histedit-last-edit.txt'), 'w') f.write(rules) f.close() else: f = open(rules) rules = f.read() f.close() rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l[0] == '#'] rules = verifyrules(rules, repo, ctxs) parentctx = repo[parent].parents()[0] keep = opts.get('keep', False) replaced = [] replacemap = {} tmpnodes = [] created = [] while rules: writestate(repo, parentctx.node(), created, replaced, tmpnodes, existing, rules, keep, tip, replacemap) action, ha = rules.pop(0) (parentctx, created_, replaced_, tmpnodes_, ) = actiontable[action](ui, repo, parentctx, ha, opts) hexshort = lambda x: node.hex(x)[:12] if replaced_: clen, rlen = len(created_), len(replaced_) if clen == rlen == 1: ui.debug('histedit: exact replacement of %s with %s\n' % ( hexshort(replaced_[0]), hexshort(created_[0]))) replacemap[replaced_[0]] = created_[0] elif clen > rlen: assert rlen == 1, ('unexpected replacement of ' '%d changes with %d changes' % (rlen, clen)) # made more changesets than we're replacing # TODO synthesize patch names for created patches replacemap[replaced_[0]] = created_[-1] ui.debug('histedit: created many, assuming %s replaced by %s' % ( hexshort(replaced_[0]), hexshort(created_[-1]))) elif rlen > clen: if not created_: # This must be a drop. Try and put our metadata on # the parent change. assert rlen == 1 r = replaced_[0] ui.debug('histedit: %s seems replaced with nothing, ' 'finding a parent\n' % (hexshort(r))) pctx = repo[r].parents()[0] if pctx.node() in replacemap: ui.debug('histedit: parent is already replaced\n') replacemap[r] = replacemap[pctx.node()] else: replacemap[r] = pctx.node() ui.debug('histedit: %s best replaced by %s\n' % ( hexshort(r), hexshort(replacemap[r]))) else: assert len(created_) == 1 for r in replaced_: ui.debug('histedit: %s replaced by %s\n' % ( hexshort(r), hexshort(created_[0]))) replacemap[r] = created_[0] else: assert False, ( 'Unhandled case in replacement mapping! ' 'replacing %d changes with %d changes' % (rlen, clen)) created.extend(created_) replaced.extend(replaced_) tmpnodes.extend(tmpnodes_) hg.update(repo, parentctx.node()) if not keep: if replacemap: ui.note('histedit: Should update metadata for the following ' 'changes:\n') for old, new in replacemap.iteritems(): if old in tmpnodes or old in created: # can't have any metadata we'd want to update continue while new in replacemap: new = replacemap[new] ui.note('histedit: %s to %s\n' % (hexshort(old), hexshort(new))) octx = repo[old] if bookmarks is not None: marks = octx.bookmarks() if marks: ui.note('histedit: moving bookmarks %s\n' % ', '.join(marks)) for mark in marks: repo._bookmarks[mark] = new bookmarks.write(repo) # TODO update mq state ui.debug('should strip replaced nodes %s\n' % ', '.join([node.hex(n)[:12] for n in replaced])) for n in sorted(replaced, key=lambda x: repo[x].rev()): try: repair.strip(ui, repo, n) except error.LookupError: pass ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for n in reversed(tmpnodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) if os.path.exists(repo.sjoin('undo')): os.unlink(repo.sjoin('undo'))
def wrappedpushdiscovery(orig, pushop): """Wraps exchange._pushdiscovery to add extra review metadata. We discover what nodes to review before discovery. This ensures that errors are discovered and reported quickly, without waiting for server communication. """ pushop.reviewnodes = None caps = getreviewcaps(pushop.remote) if "pushreview" not in caps: return orig(pushop) ui = pushop.ui repo = pushop.repo if repo.noreviewboardpush: return orig(pushop) # If no arguments are specified to push, Mercurial will try to push all # non-remote changesets by default. This can result in unexpected behavior, # especially for people doing multi-headed development. # # Since we reject pushes with multiple heads anyway, default to pushing # the working copy. if not pushop.revs: pushop.revs = [repo["."].node()] tipnode = None basenode = None # Our prepushoutgoing hook validates that all pushed changesets are # part of the same DAG head. If revisions were specified by the user, # the last is the tip commit to review and the first (if more than 1) # is the base commit to review. # # Note: the revisions are in the order they were specified by the user. # This may not be DAG order. So we have to explicitly order them here. revs = sorted(repo[r].rev() for r in pushop.revs) tipnode = repo[revs[-1]].node() if len(revs) > 1: basenode = repo[revs[0]].node() if repo.pushsingle: basenode = tipnode # Given a base and tip node, find all changesets to review. # # A solution that works most of the time is to find all non-public # ancestors of that node. This is our default. # # If basenode is specified, we stop the traversal when we encounter it. # # Note that we will still refuse to review a public changeset even with # basenode. This decision is somewhat arbitrary and can be revisited later # if there is an actual need to review public changesets. nodes = [tipnode] # Special case where basenode is the tip node. if basenode and tipnode == basenode: pass else: for node in repo[tipnode].ancestors(): ctx = repo[node] if ctx.phase() == phases.public: break if basenode and ctx.node() == basenode: nodes.insert(0, ctx.node()) break nodes.insert(0, ctx.node()) # Filter out public nodes. publicnodes = [] for node in nodes: ctx = repo[node] if ctx.phase() == phases.public: publicnodes.append(node) ui.status(_("(ignoring public changeset %s in review request)\n") % ctx.hex()[0:12]) nodes = [n for n in nodes if n not in publicnodes] if not nodes: raise util.Abort( _("no non-public changesets left to review"), hint=_("add or change the -r argument to include draft changesets"), ) # We stop completely empty changesets prior to review. for node in nodes: ctx = repo[node] if not ctx.files(): raise util.Abort( _("cannot review empty changeset %s") % ctx.hex()[:12], hint=_("add files to or remove changeset") ) # Ensure all reviewed changesets have commit IDs. replacenodes = [] for node in nodes: ctx = repo[node] if "commitid" not in ctx.extra(): replacenodes.append(node) def addcommitid(repo, ctx, revmap, copyfilectxfn): parents = newparents(repo, ctx, revmap) # Need to make a copy otherwise modification is made on original, # which is just plain wrong. extra = dict(ctx.extra()) assert "commitid" not in extra extra["commitid"] = genid(repo) memctx = context.memctx( repo, parents, ctx.description(), ctx.files(), copyfilectxfn, user=ctx.user(), date=ctx.date(), extra=extra ) return memctx if replacenodes: ui.status(_("(adding commit id to %d changesets)\n") % (len(replacenodes))) nodemap = replacechangesets(repo, replacenodes, addcommitid, backuptopic="addcommitid") # Since we're in the middle of an operation, update references # to rewritten nodes. nodes = [nodemap.get(node, node) for node in nodes] pushop.revs = [nodemap.get(node, node) for node in pushop.revs] pushop.reviewnodes = nodes # Since we may rewrite changesets to contain review metadata after # push, abort immediately if the working directory state is not # compatible with rewriting. This prevents us from successfully # pushing and failing to update commit metadata after the push. i.e. # it prevents potential loss of metadata. # # There may be some scenarios where we don't rewrite after push. # But coding that here would be complicated. And future server changes # may change things like review request mapping, which may invalidate # client assumptions. So always assume a rewrite is needed. impactedrevs = list(repo.revs("%ln::", nodes)) if repo["."].rev() in impactedrevs: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) return orig(pushop)
def wrappedpushdiscovery(orig, pushop): """Wraps exchange._pushdiscovery to add extra review metadata. We discover what nodes to review before discovery. This ensures that errors are discovered and reported quickly, without waiting for server communication. """ pushop.reviewnodes = None caps = getreviewcaps(pushop.remote) if 'pushreview' not in caps: return orig(pushop) ui = pushop.ui repo = pushop.repo if repo.noreviewboardpush: return orig(pushop) # If no arguments are specified to push, Mercurial will try to push all # non-remote changesets by default. This can result in unexpected behavior, # especially for people doing multi-headed development. # # Since we reject pushes with multiple heads anyway, default to pushing # the working copy. if not pushop.revs: pushop.revs = [repo['.'].node()] tipnode = None basenode = None # Our prepushoutgoing hook validates that all pushed changesets are # part of the same DAG head. If revisions were specified by the user, # the last is the tip commit to review and the first (if more than 1) # is the base commit to review. # # Note: the revisions are in the order they were specified by the user. # This may not be DAG order. So we have to explicitly order them here. revs = sorted(repo[r].rev() for r in pushop.revs) tipnode = repo[revs[-1]].node() if len(revs) > 1: basenode = repo[revs[0]].node() if repo.pushsingle: basenode = tipnode # Given a base and tip node, find all changesets to review. # # A solution that works most of the time is to find all non-public # ancestors of that node. This is our default. # # If basenode is specified, we stop the traversal when we encounter it. # # Note that we will still refuse to review a public changeset even with # basenode. This decision is somewhat arbitrary and can be revisited later # if there is an actual need to review public changesets. nodes = [tipnode] # Special case where basenode is the tip node. if basenode and tipnode == basenode: pass else: for node in repo[tipnode].ancestors(): ctx = repo[node] if ctx.phase() == phases.public: break if basenode and ctx.node() == basenode: nodes.insert(0, ctx.node()) break nodes.insert(0, ctx.node()) # Filter out public nodes. publicnodes = [] for node in nodes: ctx = repo[node] if ctx.phase() == phases.public: publicnodes.append(node) ui.status( _('(ignoring public changeset %s in review request)\n') % ctx.hex()[0:12]) nodes = [n for n in nodes if n not in publicnodes] if not nodes: raise util.Abort( _('no non-public changesets left to review'), hint=_( 'add or change the -r argument to include draft changesets')) # We stop completely empty changesets prior to review. for node in nodes: ctx = repo[node] if not ctx.files(): raise util.Abort(_('cannot review empty changeset %s') % ctx.hex()[:12], hint=_('add files to or remove changeset')) run_android_checkstyle(repo, nodes) # Ensure all reviewed changesets have commit IDs. replacenodes = [] for node in nodes: ctx = repo[node] if not parse_commit_id(encoding.fromlocal(ctx.description())): replacenodes.append(node) def makememctx(repo, ctx, revmap, copyfilectxfn): parents = newparents(repo, ctx, revmap) # Need to make a copy otherwise modification is made on original, # which is just plain wrong. msg = encoding.fromlocal(ctx.description()) new_msg, changed = addcommitid(msg, repo=repo) memctx = context.memctx(repo, parents, encoding.tolocal(new_msg), ctx.files(), copyfilectxfn, user=ctx.user(), date=ctx.date(), extra=dict(ctx.extra())) return memctx if replacenodes: ui.status( _('(adding commit id to %d changesets)\n') % (len(replacenodes))) nodemap = replacechangesets(repo, replacenodes, makememctx, backuptopic='addcommitid') # Since we're in the middle of an operation, update references # to rewritten nodes. nodes = [nodemap.get(node, node) for node in nodes] pushop.revs = [nodemap.get(node, node) for node in pushop.revs] pushop.reviewnodes = nodes # Since we may rewrite changesets to contain review metadata after # push, abort immediately if the working directory state is not # compatible with rewriting. This prevents us from successfully # pushing and failing to update commit metadata after the push. i.e. # it prevents potential loss of metadata. # # There may be some scenarios where we don't rewrite after push. # But coding that here would be complicated. And future server changes # may change things like review request mapping, which may invalidate # client assumptions. So always assume a rewrite is needed. impactedrevs = list(repo.revs('%ln::', nodes)) if repo['.'].rev() in impactedrevs: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) return orig(pushop)
def trackedcmd(ui, repo, remotepath=None, *pats, **opts): """show or change the current narrowspec With no argument, shows the current narrowspec entries, one per line. Each line will be prefixed with 'I' or 'X' for included or excluded patterns, respectively. The narrowspec is comprised of expressions to match remote files and/or directories that should be pulled into your client. The narrowspec has *include* and *exclude* expressions, with excludes always trumping includes: that is, if a file matches an exclude expression, it will be excluded even if it also matches an include expression. Excluding files that were never included has no effect. Each included or excluded entry is in the format described by 'hg help patterns'. The options allow you to add or remove included and excluded expressions. If --clear is specified, then all previous includes and excludes are DROPPED and replaced by the new ones specified to --addinclude and --addexclude. If --clear is specified without any further options, the narrowspec will be empty and will not match any files. If --auto-remove-includes is specified, then those includes that don't match any files modified by currently visible local commits (those not shared by the remote) will be added to the set of explicitly specified includes to remove. --import-rules accepts a path to a file containing rules, allowing you to add --addinclude, --addexclude rules in bulk. Like the other include and exclude switches, the changes are applied immediately. """ opts = pycompat.byteskwargs(opts) if repository.NARROW_REQUIREMENT not in repo.requirements: raise error.Abort( _(b'the tracked command is only supported on ' b'repositories cloned with --narrow')) # Before supporting, decide whether it "hg tracked --clear" should mean # tracking no paths or all paths. if opts[b'clear']: raise error.Abort(_(b'the --clear option is not yet supported')) # import rules from a file newrules = opts.get(b'import_rules') if newrules: try: filepath = os.path.join(encoding.getcwd(), newrules) fdata = util.readfile(filepath) except IOError as inst: raise error.Abort( _(b"cannot read narrowspecs from '%s': %s") % (filepath, encoding.strtolocal(inst.strerror))) includepats, excludepats, profiles = sparse.parseconfig( ui, fdata, b'narrow') if profiles: raise error.Abort( _(b"including other spec files using '%include' " b"is not supported in narrowspec")) opts[b'addinclude'].extend(includepats) opts[b'addexclude'].extend(excludepats) addedincludes = narrowspec.parsepatterns(opts[b'addinclude']) removedincludes = narrowspec.parsepatterns(opts[b'removeinclude']) addedexcludes = narrowspec.parsepatterns(opts[b'addexclude']) removedexcludes = narrowspec.parsepatterns(opts[b'removeexclude']) autoremoveincludes = opts[b'auto_remove_includes'] update_working_copy = opts[b'update_working_copy'] only_show = not (addedincludes or removedincludes or addedexcludes or removedexcludes or newrules or autoremoveincludes or update_working_copy) oldincludes, oldexcludes = repo.narrowpats # filter the user passed additions and deletions into actual additions and # deletions of excludes and includes addedincludes -= oldincludes removedincludes &= oldincludes addedexcludes -= oldexcludes removedexcludes &= oldexcludes widening = addedincludes or removedexcludes narrowing = removedincludes or addedexcludes # Only print the current narrowspec. if only_show: ui.pager(b'tracked') fm = ui.formatter(b'narrow', opts) for i in sorted(oldincludes): fm.startitem() fm.write(b'status', b'%s ', b'I', label=b'narrow.included') fm.write(b'pat', b'%s\n', i, label=b'narrow.included') for i in sorted(oldexcludes): fm.startitem() fm.write(b'status', b'%s ', b'X', label=b'narrow.excluded') fm.write(b'pat', b'%s\n', i, label=b'narrow.excluded') fm.end() return 0 if update_working_copy: with repo.wlock(), repo.lock(), repo.transaction(b'narrow-wc'): narrowspec.updateworkingcopy(repo) narrowspec.copytoworkingcopy(repo) return 0 if not (widening or narrowing or autoremoveincludes): ui.status(_(b"nothing to widen or narrow\n")) return 0 with repo.wlock(), repo.lock(): cmdutil.bailifchanged(repo) # Find the revisions we have in common with the remote. These will # be used for finding local-only changes for narrowing. They will # also define the set of revisions to update for widening. remotepath = ui.expandpath(remotepath or b'default') url, branches = hg.parseurl(remotepath) ui.status(_(b'comparing with %s\n') % util.hidepassword(url)) remote = hg.peer(repo, opts, url) # check narrow support before doing anything if widening needs to be # performed. In future we should also abort if client is ellipses and # server does not support ellipses if widening and wireprototypes.NARROWCAP not in remote.capabilities(): raise error.Abort(_(b"server does not support narrow clones")) commoninc = discovery.findcommonincoming(repo, remote) if autoremoveincludes: outgoing = discovery.findcommonoutgoing(repo, remote, commoninc=commoninc) ui.status(_(b'looking for unused includes to remove\n')) localfiles = set() for n in itertools.chain(outgoing.missing, outgoing.excluded): localfiles.update(repo[n].files()) suggestedremovals = [] for include in sorted(oldincludes): match = narrowspec.match(repo.root, [include], oldexcludes) if not any(match(f) for f in localfiles): suggestedremovals.append(include) if suggestedremovals: for s in suggestedremovals: ui.status(b'%s\n' % s) if (ui.promptchoice( _(b'remove these unused includes (yn)?' b'$$ &Yes $$ &No')) == 0): removedincludes.update(suggestedremovals) narrowing = True else: ui.status(_(b'found no unused includes\n')) if narrowing: newincludes = oldincludes - removedincludes newexcludes = oldexcludes | addedexcludes _narrow( ui, repo, remote, commoninc, oldincludes, oldexcludes, newincludes, newexcludes, opts[b'force_delete_local_changes'], ) # _narrow() updated the narrowspec and _widen() below needs to # use the updated values as its base (otherwise removed includes # and addedexcludes will be lost in the resulting narrowspec) oldincludes = newincludes oldexcludes = newexcludes if widening: newincludes = oldincludes | addedincludes newexcludes = oldexcludes - removedexcludes _widen( ui, repo, remote, commoninc, oldincludes, oldexcludes, newincludes, newexcludes, ) return 0
def split(ui, repo, *revs, **opts): """split a changeset into smaller ones Repeatedly prompt changes and commit message for new changesets until there is nothing left in the original changeset. If --rev was not given, split the working directory parent. By default, rebase connected non-obsoleted descendants onto the new changeset. Use --no-rebase to avoid the rebase. """ opts = pycompat.byteskwargs(opts) revlist = [] if opts.get(b'rev'): revlist.append(opts.get(b'rev')) revlist.extend(revs) with repo.wlock(), repo.lock(), repo.transaction(b'split') as tr: revs = scmutil.revrange(repo, revlist or [b'.']) if len(revs) > 1: raise error.InputError(_(b'cannot split multiple revisions')) rev = revs.first() ctx = repo[rev] # Handle nullid specially here (instead of leaving for precheck() # below) so we get a nicer message and error code. if rev is None or ctx.node() == nullid: ui.status(_(b'nothing to split\n')) return 1 if ctx.node() is None: raise error.InputError(_(b'cannot split working directory')) if opts.get(b'rebase'): # Skip obsoleted descendants and their descendants so the rebase # won't cause conflicts for sure. descendants = list(repo.revs(b'(%d::) - (%d)', rev, rev)) torebase = list( repo.revs(b'%ld - (%ld & obsolete())::', descendants, descendants)) else: torebase = [] rewriteutil.precheck(repo, [rev] + torebase, b'split') if len(ctx.parents()) > 1: raise error.InputError(_(b'cannot split a merge changeset')) cmdutil.bailifchanged(repo) # Deactivate bookmark temporarily so it won't get moved unintentionally bname = repo._activebookmark if bname and repo._bookmarks[bname] != ctx.node(): bookmarks.deactivate(repo) wnode = repo[b'.'].node() top = None try: top = dosplit(ui, repo, tr, ctx, opts) finally: # top is None: split failed, need update --clean recovery. # wnode == ctx.node(): wnode split, no need to update. if top is None or wnode != ctx.node(): hg.clean(repo, wnode, show_stats=False) if bname: bookmarks.activate(repo, bname) if torebase and top: dorebase(ui, repo, torebase, top)
def trackedcmd(ui, repo, remotepath=None, *pats, **opts): """show or change the current narrowspec With no argument, shows the current narrowspec entries, one per line. Each line will be prefixed with 'I' or 'X' for included or excluded patterns, respectively. The narrowspec is comprised of expressions to match remote files and/or directories that should be pulled into your client. The narrowspec has *include* and *exclude* expressions, with excludes always trumping includes: that is, if a file matches an exclude expression, it will be excluded even if it also matches an include expression. Excluding files that were never included has no effect. Each included or excluded entry is in the format described by 'hg help patterns'. The options allow you to add or remove included and excluded expressions. If --clear is specified, then all previous includes and excludes are DROPPED and replaced by the new ones specified to --addinclude and --addexclude. If --clear is specified without any further options, the narrowspec will be empty and will not match any files. """ opts = pycompat.byteskwargs(opts) if repository.NARROW_REQUIREMENT not in repo.requirements: ui.warn(_('The narrow command is only supported on respositories cloned' ' with --narrow.\n')) return 1 # Before supporting, decide whether it "hg tracked --clear" should mean # tracking no paths or all paths. if opts['clear']: ui.warn(_('The --clear option is not yet supported.\n')) return 1 # import rules from a file newrules = opts.get('import_rules') if newrules: try: filepath = os.path.join(encoding.getcwd(), newrules) fdata = util.readfile(filepath) except IOError as inst: raise error.Abort(_("cannot read narrowspecs from '%s': %s") % (filepath, encoding.strtolocal(inst.strerror))) includepats, excludepats, profiles = sparse.parseconfig(ui, fdata, 'narrow') if profiles: raise error.Abort(_("including other spec files using '%include' " "is not supported in narrowspec")) opts['addinclude'].extend(includepats) opts['addexclude'].extend(excludepats) addedincludes = narrowspec.parsepatterns(opts['addinclude']) removedincludes = narrowspec.parsepatterns(opts['removeinclude']) addedexcludes = narrowspec.parsepatterns(opts['addexclude']) removedexcludes = narrowspec.parsepatterns(opts['removeexclude']) only_show = not (addedincludes or removedincludes or addedexcludes or removedexcludes or newrules) oldincludes, oldexcludes = repo.narrowpats # filter the user passed additions and deletions into actual additions and # deletions of excludes and includes addedincludes -= oldincludes removedincludes &= oldincludes addedexcludes -= oldexcludes removedexcludes &= oldexcludes widening = addedincludes or removedexcludes narrowing = removedincludes or addedexcludes # Only print the current narrowspec. if only_show: ui.pager('tracked') fm = ui.formatter('narrow', opts) for i in sorted(oldincludes): fm.startitem() fm.write('status', '%s ', 'I', label='narrow.included') fm.write('pat', '%s\n', i, label='narrow.included') for i in sorted(oldexcludes): fm.startitem() fm.write('status', '%s ', 'X', label='narrow.excluded') fm.write('pat', '%s\n', i, label='narrow.excluded') fm.end() return 0 if not widening and not narrowing: ui.status(_("nothing to widen or narrow\n")) return 0 with repo.wlock(), repo.lock(): cmdutil.bailifchanged(repo) # Find the revisions we have in common with the remote. These will # be used for finding local-only changes for narrowing. They will # also define the set of revisions to update for widening. remotepath = ui.expandpath(remotepath or 'default') url, branches = hg.parseurl(remotepath) ui.status(_('comparing with %s\n') % util.hidepassword(url)) remote = hg.peer(repo, opts, url) # check narrow support before doing anything if widening needs to be # performed. In future we should also abort if client is ellipses and # server does not support ellipses if widening and wireprototypes.NARROWCAP not in remote.capabilities(): raise error.Abort(_("server does not support narrow clones")) commoninc = discovery.findcommonincoming(repo, remote) if narrowing: newincludes = oldincludes - removedincludes newexcludes = oldexcludes | addedexcludes _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes, newincludes, newexcludes, opts['force_delete_local_changes']) # _narrow() updated the narrowspec and _widen() below needs to # use the updated values as its base (otherwise removed includes # and addedexcludes will be lost in the resulting narrowspec) oldincludes = newincludes oldexcludes = newexcludes if widening: newincludes = oldincludes | addedincludes newexcludes = oldexcludes - removedexcludes _widen(ui, repo, remote, commoninc, oldincludes, oldexcludes, newincludes, newexcludes) return 0
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history (the source) onto another (the destination). This can be useful for linearizing *local* changes relative to a master development tree. You should not rebase changesets that have already been shared with others. Doing so will force everybody else to perform the same rebase or they will end up with duplicated changesets after pulling in your rebased changesets. In its default configuration, Mercurial will prevent you from rebasing published changes. See :hg:`help phases` for details. If you don't specify a destination changeset (``-d/--dest``), rebase uses the current branch tip as the destination. (The destination changeset is not modified by rebasing, but new changesets are added as its descendants.) You can specify which changesets to rebase in two ways: as a "source" changeset or as a "base" changeset. Both are shorthand for a topologically related set of changesets (the "source branch"). If you specify source (``-s/--source``), rebase will rebase that changeset and all of its descendants onto dest. If you specify base (``-b/--base``), rebase will select ancestors of base back to but not including the common ancestor with dest. Thus, ``-b`` is less precise but more convenient than ``-s``: you can specify any changeset in the source branch, and rebase will select the whole branch. If you specify neither ``-s`` nor ``-b``, rebase uses the parent of the working directory as the base. For advanced usage, a third way is available through the ``--rev`` option. It allows you to specify an arbitrary set of changesets to rebase. Descendants of revs you specify with this option are not automatically included in the rebase. By default, rebase recreates the changesets in the source branch as descendants of dest and then destroys the originals. Use ``--keep`` to preserve the original source changesets. Some changesets in the source branch (e.g. merges from the destination branch) may be dropped if they no longer contribute any change. One result of the rules for selecting the destination changeset and source branch is that, unlike ``merge``, rebase will do nothing if you are at the branch tip of a named branch with two heads. You need to explicitly specify source and/or destination (or ``update`` to the other head, if it's the head of the intended source branch). If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. .. container:: verbose Examples: - move "local changes" (current commit back to branching point) to the current branch tip after a pull:: hg rebase - move a single changeset to the stable branch:: hg rebase -r 5f493448 -d stable - splice a commit and all its descendants onto another part of history:: hg rebase --source c0c3 --dest 4cf9 - rebase everything on a branch marked by a bookmark onto the default branch:: hg rebase --base myfeature --dest default - collapse a sequence of changes into a single commit:: hg rebase --collapse -r 1520:1525 -d . - move a named branch while preserving its name:: hg rebase -r "branch(featureX)" -d 1.3 --keepbranches Returns 0 on success, 1 if nothing to rebase or there are unresolved conflicts. """ originalwd = target = None activebookmark = None external = nullrev # Mapping between the old revision id and either what is the new rebased # revision or what needs to be done with the old revision. The state dict # will be what contains most of the rebase progress state. state = {} skipped = set() targetancestors = set() lock = wlock = None try: wlock = repo.wlock() lock = repo.lock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) revf = opts.get('rev', []) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) collapsemsg = cmdutil.logmessage(ui, opts) e = opts.get('extrafn') # internal, used by e.g. hgsubversion extrafns = [_savegraft] if e: extrafns = [e] keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) # keepopen is not meant for use on the command line, but by # other extensions keepopen = opts.get('keepopen', False) if opts.get('interactive'): try: if extensions.find('histedit'): enablehistedit = '' except KeyError: enablehistedit = " --config extensions.histedit=" help = "hg%s help -e histedit" % enablehistedit msg = _("interactive history editing is supported by the " "'histedit' extension (see \"%s\")") % help raise error.Abort(msg) if collapsemsg and not collapsef: raise error.Abort( _('message can only be specified with collapse')) if contf or abortf: if contf and abortf: raise error.Abort(_('cannot use both abort and continue')) if collapsef: raise error.Abort( _('cannot use collapse with continue or abort')) if srcf or basef or destf: raise error.Abort( _('abort and continue do not allow specifying revisions')) if abortf and opts.get('tool', False): ui.warn(_('tool option will be ignored\n')) try: (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external, activebookmark) = restorestatus(repo) except error.RepoLookupError: if abortf: clearstatus(repo) repo.ui.warn(_('rebase aborted (no revision is removed,' ' only broken state is cleared)\n')) return 0 else: msg = _('cannot continue inconsistent rebase') hint = _('use "hg rebase --abort" to clear broken state') raise error.Abort(msg, hint=hint) if abortf: return abort(repo, originalwd, target, state, activebookmark=activebookmark) else: if srcf and basef: raise error.Abort(_('cannot specify both a ' 'source and a base')) if revf and basef: raise error.Abort(_('cannot specify both a ' 'revision and a base')) if revf and srcf: raise error.Abort(_('cannot specify both a ' 'revision and a source')) cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) if destf: dest = scmutil.revsingle(repo, destf) else: dest = repo[_destrebase(repo)] destf = str(dest) if revf: rebaseset = scmutil.revrange(repo, revf) if not rebaseset: ui.status(_('empty "rev" revision set - ' 'nothing to rebase\n')) return _nothingtorebase() elif srcf: src = scmutil.revrange(repo, [srcf]) if not src: ui.status(_('empty "source" revision set - ' 'nothing to rebase\n')) return _nothingtorebase() rebaseset = repo.revs('(%ld)::', src) assert rebaseset else: base = scmutil.revrange(repo, [basef or '.']) if not base: ui.status(_('empty "base" revision set - ' "can't compute rebase set\n")) return _nothingtorebase() commonanc = repo.revs('ancestor(%ld, %d)', base, dest).first() if commonanc is not None: rebaseset = repo.revs('(%d::(%ld) - %d)::', commonanc, base, commonanc) else: rebaseset = [] if not rebaseset: # transform to list because smartsets are not comparable to # lists. This should be improved to honor laziness of # smartset. if list(base) == [dest.rev()]: if basef: ui.status(_('nothing to rebase - %s is both "base"' ' and destination\n') % dest) else: ui.status(_('nothing to rebase - working directory ' 'parent is also destination\n')) elif not repo.revs('%ld - ::%d', base, dest): if basef: ui.status(_('nothing to rebase - "base" %s is ' 'already an ancestor of destination ' '%s\n') % ('+'.join(str(repo[r]) for r in base), dest)) else: ui.status(_('nothing to rebase - working ' 'directory parent is already an ' 'ancestor of destination %s\n') % dest) else: # can it happen? ui.status(_('nothing to rebase from %s to %s\n') % ('+'.join(str(repo[r]) for r in base), dest)) return _nothingtorebase() allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt) if (not (keepf or allowunstable) and repo.revs('first(children(%ld) - %ld)', rebaseset, rebaseset)): raise error.Abort( _("can't remove original changesets with" " unrebased descendants"), hint=_('use --keep to keep original changesets')) obsoletenotrebased = {} if ui.configbool('experimental', 'rebaseskipobsolete'): rebasesetrevs = set(rebaseset) obsoletenotrebased = _computeobsoletenotrebased(repo, rebasesetrevs, dest) # - plain prune (no successor) changesets are rebased # - split changesets are not rebased if at least one of the # changeset resulting from the split is an ancestor of dest rebaseset = rebasesetrevs - set(obsoletenotrebased) result = buildstate(repo, dest, rebaseset, collapsef, obsoletenotrebased) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return _nothingtorebase() root = min(rebaseset) if not keepf and not repo[root].mutable(): raise error.Abort(_("can't rebase public changeset %s") % repo[root], hint=_('see "hg help phases" for details')) originalwd, target, state = result if collapsef: targetancestors = repo.changelog.ancestors([target], inclusive=True) external = externalparent(repo, state, targetancestors) if dest.closesbranch() and not keepbranchesf: ui.status(_('reopening closed branch head %s\n') % dest) if keepbranchesf: # insert _savebranch at the start of extrafns so if # there's a user-provided extrafn it can clobber branch if # desired extrafns.insert(0, _savebranch) if collapsef: branches = set() for rev in state: branches.add(repo[rev].branch()) if len(branches) > 1: raise error.Abort(_('cannot collapse multiple named ' 'branches')) # Rebase if not targetancestors: targetancestors = repo.changelog.ancestors([target], inclusive=True) # Keep track of the current bookmarks in order to reset them later currentbookmarks = repo._bookmarks.copy() activebookmark = activebookmark or repo._activebookmark if activebookmark: bookmarks.deactivate(repo) extrafn = _makeextrafn(extrafns) sortedstate = sorted(state) total = len(sortedstate) pos = 0 for rev in sortedstate: ctx = repo[rev] desc = '%d:%s "%s"' % (ctx.rev(), ctx, ctx.description().split('\n', 1)[0]) names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node()) if names: desc += ' (%s)' % ' '.join(names) pos += 1 if state[rev] == revtodo: ui.status(_('rebasing %s\n') % desc) ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)), _('changesets'), total) p1, p2, base = defineparents(repo, rev, target, state, targetancestors) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external, activebookmark) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: try: ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'rebase') stats = rebasenode(repo, rev, p1, base, state, collapsef, target) if stats and stats[3] > 0: raise error.InterventionRequired( _('unresolved conflicts (see hg ' 'resolve, then hg rebase --continue)')) finally: ui.setconfig('ui', 'forcemerge', '', 'rebase') if not collapsef: merging = p2 != nullrev editform = cmdutil.mergeeditform(merging, 'rebase') editor = cmdutil.getcommiteditor(editform=editform, **opts) newnode = concludenode(repo, rev, p1, p2, extrafn=extrafn, editor=editor, keepbranches=keepbranchesf) else: # Skip commit if we are collapsing repo.dirstate.beginparentchange() repo.setparents(repo[p1].node()) repo.dirstate.endparentchange() newnode = None # Update the state if newnode is not None: state[rev] = repo[newnode].rev() ui.debug('rebased as %s\n' % short(newnode)) else: if not collapsef: ui.warn(_('note: rebase of %d:%s created no changes ' 'to commit\n') % (rev, ctx)) skipped.add(rev) state[rev] = p1 ui.debug('next revision set to %s\n' % p1) elif state[rev] == nullmerge: ui.debug('ignoring null merge rebase of %s\n' % rev) elif state[rev] == revignored: ui.status(_('not rebasing ignored %s\n') % desc) elif state[rev] == revprecursor: targetctx = repo[obsoletenotrebased[rev]] desctarget = '%d:%s "%s"' % (targetctx.rev(), targetctx, targetctx.description().split('\n', 1)[0]) msg = _('note: not rebasing %s, already in destination as %s\n') ui.status(msg % (desc, desctarget)) else: ui.status(_('already rebased %s as %s\n') % (desc, repo[state[rev]])) ui.progress(_('rebasing'), None) ui.note(_('rebase merging completed\n')) if collapsef and not keepopen: p1, p2, _base = defineparents(repo, min(state), target, state, targetancestors) editopt = opts.get('edit') editform = 'rebase.collapse' if collapsemsg: commitmsg = collapsemsg else: commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] > nullmerge: commitmsg += '\n* %s' % repo[rebased].description() editopt = True editor = cmdutil.getcommiteditor(edit=editopt, editform=editform) newnode = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn, editor=editor, keepbranches=keepbranchesf) if newnode is None: newrev = target else: newrev = repo[newnode].rev() for oldrev in state.iterkeys(): if state[oldrev] > nullmerge: state[oldrev] = newrev if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if currentbookmarks: # Nodeids are needed to reset bookmarks nstate = {} for k, v in state.iteritems(): if v > nullmerge: nstate[repo[k].node()] = repo[v].node() # XXX this is the same as dest.node() for the non-continue path -- # this should probably be cleaned up targetnode = repo[target].node() # restore original working directory # (we do this before stripping) newwd = state.get(originalwd, originalwd) if newwd < 0: # original directory is a parent of rebase set root or ignored newwd = originalwd if newwd not in [c.rev() for c in repo[None].parents()]: ui.note(_("update back to initial working directory parent\n")) hg.updaterepo(repo, newwd, False) if not keepf: collapsedas = None if collapsef: collapsedas = newnode clearrebased(ui, repo, state, skipped, collapsedas) if currentbookmarks: updatebookmarks(repo, targetnode, nstate, currentbookmarks) if activebookmark not in repo._bookmarks: # active bookmark was divergent one and has been deleted activebookmark = None clearstatus(repo) ui.note(_("rebase completed\n")) util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) if (activebookmark and repo['.'].node() == repo._bookmarks[activebookmark]): bookmarks.activate(repo, activebookmark) finally: release(lock, wlock)
def histedit(ui, repo, *parent, **opts): """hg histedit <parent> """ # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) if mq and mq.applied: raise util.Abort(_('source has mq patches applied')) parent = list(parent) + opts.get('rev', []) if opts.get('outgoing'): if len(parent) > 1: raise util.Abort('only one repo argument allowed with --outgoing') elif parent: parent = parent[0] dest = ui.expandpath(parent or 'default-push', parent or 'default') dest, revs = hg.parseurl(dest, None)[:2] if isinstance(revs, tuple): # hg >= 1.6 revs, checkout = hg.addbranchrevs(repo, repo, revs, None) other = hg.repository(hg.remoteui(repo, opts), dest) # hg >= 1.9 findoutgoing = getattr(discovery, 'findoutgoing', None) if findoutgoing is None: if getattr(discovery, 'outgoing', None) is not None: def findoutgoing(repo, other, force=False): out = discovery.findcommonoutgoing( repo, other, [], force=force) return out.missing[0:1] else: # hg 1.9 and 2.0 def findoutgoing(repo, other, force=False): common, outheads = discovery.findcommonoutgoing( repo, other, [], force=force) return repo.changelog.findmissing(common, outheads)[0:1] else: other = hg.repository(ui, dest) def findoutgoing(repo, other, force=False): return repo.findoutgoing(other, force=force) if revs: revs = [repo.lookup(rev) for rev in revs] ui.status(_('comparing with %s\n') % hidepassword(dest)) parent = findoutgoing(repo, other, force=opts.get('force')) else: if opts.get('force'): raise util.Abort('--force only allowed with --outgoing') if opts.get('continue', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --continue') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, ) = readstate(repo) currentparent, wantnull = repo.dirstate.parents() parentctx = repo[parentctxnode] # discover any nodes the user has added in the interim newchildren = [c for c in parentctx.children() if c.node() not in existing] action, currentnode = rules.pop(0) while newchildren: if action in ['f', 'fold', ]: tmpnodes.extend([n.node() for n in newchildren]) else: created.extend([n.node() for n in newchildren]) newchildren = filter(lambda x: x.node() not in existing, reduce(lambda x, y: x + y, map(lambda r: r.children(), newchildren))) m, a, r, d = repo.status()[:4] oldctx = repo[currentnode] message = oldctx.description() if action in ('e', 'edit', 'm', 'mess'): message = ui.edit(message, ui.username()) elif action in ('f', 'fold', ): message = 'fold-temp-revision %s' % currentnode new = None if m or a or r or d: new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) if action in ('f', 'fold'): if new: tmpnodes.append(new) else: new = newchildren[-1] (parentctx, created_, replaced_, tmpnodes_, ) = finishfold(ui, repo, parentctx, oldctx, new, opts, newchildren) replaced.extend(replaced_) created.extend(created_) tmpnodes.extend(tmpnodes_) elif action not in ('d', 'drop'): if new != oldctx.node(): replaced.append(oldctx.node()) if new: if new != oldctx.node(): created.append(new) parentctx = repo[new] elif opts.get('abort', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --abort') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, ) = readstate(repo) ui.debug('restore wc to old tip %s\n' % node.hex(tip)) hg.clean(repo, tip) ui.debug('should strip created nodes %s\n' % ', '.join([node.hex(n)[:12] for n in created])) ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for nodes in (created, tmpnodes, ): for n in reversed(nodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) return else: bailifchanged(repo) if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise util.Abort('history edit already in progress, try ' '--continue or --abort') tip, empty = repo.dirstate.parents() if len(parent) != 1: raise util.Abort('requires exactly one parent revision') parent = _revsingle(repo, parent[0]).node() keep = opts.get('keep', False) revs = between(repo, parent, tip, keep) ctxs = [repo[r] for r in revs] existing = [r.node() for r in ctxs] rules = opts.get('commands', '') if not rules: rules = '\n'.join([makedesc(c) for c in ctxs]) rules += editcomment % (node.hex(parent)[:12], node.hex(tip)[:12], ) rules = ui.edit(rules, ui.username()) # Save edit rules in .hg/histedit-last-edit.txt in case # the user needs to ask for help after something # surprising happens. f = open(repo.join('histedit-last-edit.txt'), 'w') f.write(rules) f.close() else: f = open(rules) rules = f.read() f.close() rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l[0] == '#'] rules = verifyrules(rules, repo, ctxs) parentctx = repo[parent].parents()[0] keep = opts.get('keep', False) replaced = [] tmpnodes = [] created = [] while rules: writestate(repo, parentctx.node(), created, replaced, tmpnodes, existing, rules, keep, tip) action, ha = rules.pop(0) (parentctx, created_, replaced_, tmpnodes_, ) = actiontable[action](ui, repo, parentctx, ha, opts) created.extend(created_) replaced.extend(replaced_) tmpnodes.extend(tmpnodes_) hg.update(repo, parentctx.node()) if not keep: ui.debug('should strip replaced nodes %s\n' % ', '.join([node.hex(n)[:12] for n in replaced])) for n in sorted(replaced, lambda x, y: cmp(repo[x].rev(), repo[y].rev())): try: repair.strip(ui, repo, n) except error.LookupError: pass ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for n in reversed(tmpnodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state'))
def histedit(ui, repo, *parent, **opts): """hg histedit <parent> """ # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, "mq", None) if mq and mq.applied: raise util.Abort(_("source has mq patches applied")) parent = list(parent) + opts.get("rev", []) if opts.get("outgoing"): if len(parent) > 1: raise util.Abort("only one repo argument allowed with --outgoing") elif parent: parent = parent[0] dest = ui.expandpath(parent or "default-push", parent or "default") dest, revs = hg.parseurl(dest, None)[:2] if isinstance(revs, tuple): # hg >= 1.6 revs, checkout = hg.addbranchrevs(repo, repo, revs, None) other = hg.repository(hg.remoteui(repo, opts), dest) # hg >= 1.9 findoutgoing = getattr(discovery, "findoutgoing", None) if findoutgoing is None: def findoutgoing(repo, other, force=False): common, outheads = discovery.findcommonoutgoing(repo, other, [], force=force) return repo.changelog.findmissing(common, outheads)[0:1] else: other = hg.repository(ui, dest) def findoutgoing(repo, other, force=False): return repo.findoutgoing(other, force=force) if revs: revs = [repo.lookup(rev) for rev in revs] ui.status(_("comparing with %s\n") % hidepassword(dest)) parent = findoutgoing(repo, other, force=opts.get("force")) else: if opts.get("force"): raise util.Abort("--force only allowed with --outgoing") if opts.get("continue", False): if len(parent) != 0: raise util.Abort("no arguments allowed with --continue") (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip) = readstate(repo) currentparent, wantnull = repo.dirstate.parents() parentctx = repo[parentctxnode] # discover any nodes the user has added in the interim newchildren = [c for c in parentctx.children() if c.node() not in existing] action, currentnode = rules.pop(0) while newchildren: if action in ["f", "fold"]: tmpnodes.extend([n.node() for n in newchildren]) else: created.extend([n.node() for n in newchildren]) newchildren = filter( lambda x: x.node() not in existing, reduce(lambda x, y: x + y, map(lambda r: r.children(), newchildren)) ) m, a, r, d = repo.status()[:4] oldctx = repo[currentnode] message = oldctx.description() if action in ("e", "edit"): message = ui.edit(message, ui.username()) elif action in ("f", "fold"): message = "fold-temp-revision %s" % currentnode new = None if m or a or r or d: new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) if action in ("e", "edit", "p", "pick"): if new != oldctx.node(): replaced.append(oldctx.node()) if new: if new != oldctx.node(): created.append(new) parentctx = repo[new] else: # fold if new: tmpnodes.append(new) else: new = newchildren[-1] (parentctx, created_, replaced_, tmpnodes_) = finishfold( ui, repo, parentctx, oldctx, new, opts, newchildren ) replaced.extend(replaced_) created.extend(created_) tmpnodes.extend(tmpnodes_) elif opts.get("abort", False): if len(parent) != 0: raise util.Abort("no arguments allowed with --abort") (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip) = readstate(repo) ui.debug("restore wc to old tip %s\n" % node.hex(tip)) hg.clean(repo, tip) ui.debug("should strip created nodes %s\n" % ", ".join([node.hex(n)[:12] for n in created])) ui.debug("should strip temp nodes %s\n" % ", ".join([node.hex(n)[:12] for n in tmpnodes])) for nodes in (created, tmpnodes): for n in reversed(nodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, "histedit-state")) return else: bailifchanged(repo) if os.path.exists(os.path.join(repo.path, "histedit-state")): raise util.Abort("history edit already in progress, try " "--continue or --abort") tip, empty = repo.dirstate.parents() if len(parent) != 1: raise util.Abort("requires exactly one parent revision") parent = _revsingle(repo, parent[0]).node() keep = opts.get("keep", False) revs = between(repo, parent, tip, keep) ctxs = [repo[r] for r in revs] existing = [r.node() for r in ctxs] rules = opts.get("commands", "") if not rules: rules = "\n".join([("pick %s %s" % (c.hex()[:12], c.description().splitlines()[0]))[:80] for c in ctxs]) rules += editcomment % (node.hex(parent)[:12], node.hex(tip)[:12]) rules = ui.edit(rules, ui.username()) else: f = open(rules) rules = f.read() f.close() rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l[0] == "#"] rules = verifyrules(rules, repo, ctxs) parentctx = repo[parent].parents()[0] keep = opts.get("keep", False) replaced = [] tmpnodes = [] created = [] while rules: writestate(repo, parentctx.node(), created, replaced, tmpnodes, existing, rules, keep, tip) action, ha = rules.pop(0) (parentctx, created_, replaced_, tmpnodes_) = actiontable[action](ui, repo, parentctx, ha, opts) created.extend(created_) replaced.extend(replaced_) tmpnodes.extend(tmpnodes_) hg.update(repo, parentctx.node()) if not keep: ui.debug("should strip replaced nodes %s\n" % ", ".join([node.hex(n)[:12] for n in replaced])) for n in sorted(replaced, lambda x, y: cmp(repo[x].rev(), repo[y].rev())): try: repair.strip(ui, repo, n) except error.LookupError: pass ui.debug("should strip temp nodes %s\n" % ", ".join([node.hex(n)[:12] for n in tmpnodes])) for n in reversed(tmpnodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, "histedit-state"))
def do_collapse(ui, repo, first, last, revs, movelog, timedelta, opts): ui.debug(_('Collapsing revisions %s\n') % revs) if opts['debugdelay']: debug_delay = float(opts['debugdelay']) else: debug_delay = False for r in revs: if repo[r].user() != ui.username() and not opts['force']: raise util.Abort( _('revision %s does not belong to %s\n') % (r, ui.username())) if r != last: children = repo[r].children() if len(children) > 1: for c in children: if not c.rev() in revs: raise util.Abort( _('revision %s has child %s not ' 'being collapsed, please rebase\n') % (r, c.rev())) if r != first: parents = repo[r].parents() if len(parents) > 1: for p in parents: if not p.rev() in revs: raise util.Abort( _('revision %s has parent %s not ' 'being collapsed.') % (r, p.rev())) if len(repo[first].parents()) > 1: raise util.Abort( _('start revision %s has multiple parents, ' 'won\'t collapse.') % first) try: cmdutil.bailifchanged(repo) except AttributeError: cmdutil.bail_if_changed(repo) parent = repo[first].parents()[0] tomove = list(repo.changelog.descendants([last])) head_hgtags = get_hgtags_from_heads(ui, repo, last) if '.hgtags' in parent: parent_hgtags = parent['.hgtags'].data() else: parent_hgtags = False movemap = dict.fromkeys(tomove, nullrev) ui.debug(_('will move revisions: %s\n') % tomove) tagsmap = dict() if opts['noop']: ui.status(_('noop: not collapsing\n')) else: origparent = repo['.'].rev() collapsed = None try: branch = repo[last].branch() collapsed = makecollapsed(ui, repo, parent, revs, branch, tagsmap, parent_hgtags, movelog, opts) movemap[max(revs)] = collapsed movedescendants(ui, repo, collapsed, tomove, movemap, tagsmap, parent_hgtags, movelog, debug_delay) fix_hgtags(ui, repo, head_hgtags, tagsmap) except: merge.update(repo, repo[origparent].rev(), False, True, False) if collapsed: repair.strip(ui, repo, collapsed.node(), "strip") raise if not opts['keep']: ui.debug(_('stripping revision %d\n') % first) repair.strip(ui, repo, repo[first].node(), "strip") ui.status(_('collapse completed\n'))
def uncommit(ui, repo, *pats, **opts): """uncommit part or all of a local changeset This command undoes the effect of a local commit, returning the affected files to their uncommitted state. This means that files modified or deleted in the changeset will be left unchanged, and so will remain modified in the working directory. If no files are specified, the commit will be pruned, unless --keep is given. """ opts = pycompat.byteskwargs(opts) cmdutil.checknotesize(ui, opts) cmdutil.resolvecommitoptions(ui, opts) with repo.wlock(), repo.lock(): m, a, r, d = repo.status()[:4] isdirtypath = any(set(m + a + r + d) & set(pats)) allowdirtywcopy = opts[ b'allow_dirty_working_copy'] or repo.ui.configbool( b'experimental', b'uncommitondirtywdir') if not allowdirtywcopy and (not pats or isdirtypath): cmdutil.bailifchanged( repo, hint=_(b'requires --allow-dirty-working-copy to uncommit'), ) old = repo[b'.'] rewriteutil.precheck(repo, [old.rev()], b'uncommit') if len(old.parents()) > 1: raise error.Abort(_(b"cannot uncommit merge changeset")) match = scmutil.match(old, pats, opts) # Check all explicitly given files; abort if there's a problem. if match.files(): s = old.status(old.p1(), match, listclean=True) eligible = set(s.added) | set(s.modified) | set(s.removed) badfiles = set(match.files()) - eligible # Naming a parent directory of an eligible file is OK, even # if not everything tracked in that directory can be # uncommitted. if badfiles: badfiles -= {f for f in util.dirs(eligible)} for f in sorted(badfiles): if f in s.clean: hint = _( b"file was not changed in working directory parent") elif repo.wvfs.exists(f): hint = _(b"file was untracked in working directory parent") else: hint = _(b"file does not exist") raise error.Abort( _(b'cannot uncommit "%s"') % scmutil.getuipathfn(repo)(f), hint=hint, ) with repo.transaction(b'uncommit'): if not (opts[b'message'] or opts[b'logfile']): opts[b'message'] = old.description() message = cmdutil.logmessage(ui, opts) keepcommit = pats if not keepcommit: if opts.get(b'keep') is not None: keepcommit = opts.get(b'keep') else: keepcommit = ui.configbool(b'experimental', b'uncommit.keep') newid = _commitfiltered( repo, old, match, keepcommit, message=message, user=opts.get(b'user'), date=opts.get(b'date'), ) if newid is None: ui.status(_(b"nothing to uncommit\n")) return 1 mapping = {} if newid != old.p1().node(): # Move local changes on filtered changeset mapping[old.node()] = (newid, ) else: # Fully removed the old commit mapping[old.node()] = () with repo.dirstate.parentchange(): scmutil.movedirstate(repo, repo[newid], match) scmutil.cleanupnodes(repo, mapping, b'uncommit', fixphase=True)
def replacechangesets(repo, oldnodes, createfn, backuptopic="replacing"): """Replace changesets with new versions. This is a generic function used to perform history rewriting. Given an iterable of input nodes, a function will be called which is expected to produce a new changeset to replace the input node. The function signature should be: def createfn(repo, ctx, revmap, copyfilectxfn): It is passed a repo, the changectx being rewritten, a map of old to new revisions that have been changed so far, and a function that can be used as the memctx callback for obtaining memfilectx when no file modifications are to be performed (a common pattern). The function should return an *uncommitted* memctx holding the new changeset info. We currently restrict that the createfn callback must return a new changeset and that no file changes may occur. Restricting file changes satisfies the requirements this function was invented for and keeps the implementation simple. After the memctx is obtained, it is committed. Children changesets are rebased automatically after all changesets have been rewritten. After the old to new mapping is obtained, bookmarks are moved and old changesets are made obsolete or stripped, depending on what is appropriate for the repo configuration. This function handles locking the repository and performing as many actions in a transaction as possible. Before any changes are made, we verify the state of the repo is sufficient for transformation to occur and abort otherwise. """ if not oldnodes: return {} repo = repo.unfiltered() # Validate function called properly. for node in oldnodes: if len(node) != 20: raise util.Abort("replacechangesets expects 20 byte nodes") uoldrevs = [repo[node].rev() for node in oldnodes] oldrevs = sorted(uoldrevs) if oldrevs != uoldrevs: raise util.Abort("must pass oldnodes in changelog order") # We may perform stripping and stripping inside a nested transaction # is a recipe for disaster. # currenttransaction was added in 3.3. Copy the implementation until we # drop 3.2 compatibility. if hasattr(repo, "currenttransaction"): intrans = repo.currenttransaction() else: if repo._transref and repo._transref().running(): intrans = True else: intrans = False if intrans: raise util.Abort( "cannot call replacechangesets when a transaction " "is active" ) # The revisions impacted by the current operation. This is essentially # all non-hidden children. We don't operate on hidden changesets because # there is no point - they are hidden and deemed not important. impactedrevs = list(repo.filtered("visible").revs("%ld::", oldrevs)) # If we'll need to update the working directory, don't do anything if there # are uncommitted changes, as this could cause a giant mess (merge # conflicts, etc). Note the comparison against impacted revs, as children # of rewritten changesets will be rebased below. dirstaterev = repo[repo.dirstate.p1()].rev() if dirstaterev in impactedrevs: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) obsenabled = False if hasattr(obsolete, "isenabled"): obsenabled = obsolete.isenabled(repo, "createmarkers") else: obsenabled = obsolete._enabled def adjustphase(repo, tr, phase, node): # transaction argument added in Mercurial 3.2. try: phases.advanceboundary(repo, tr, phase, [node]) phases.retractboundary(repo, tr, phase, [node]) except TypeError: phases.advanceboundary(repo, phase, [node]) phases.retractboundary(repo, phase, [node]) nodemap = {} wlock, lock, tr = None, None, None try: wlock = repo.wlock() lock = repo.lock() tr = repo.transaction("replacechangesets") # Create the new changesets. revmap = OrderedDict() for oldnode in oldnodes: oldctx = repo[oldnode] # Copy revmap out of paranoia. newctx = createfn(repo, oldctx, dict(revmap), preservefilectx(oldctx)) if not isinstance(newctx, context.memctx): raise util.Abort("createfn must return a context.memctx") if oldctx == newctx: raise util.Abort("createfn must create a new changeset") newnode = newctx.commit() # Needed so .manifestnode() works, which memctx doesn't have. newctx = repo[newnode] # This makes the implementation significantly simpler as we don't # need to worry about merges when we do auto rebasing later. if oldctx.manifestnode() != newctx.manifestnode(): raise util.Abort("we do not allow replacements to modify files") revmap[oldctx.rev()] = newctx.rev() nodemap[oldnode] = newnode # Do phase adjustment ourselves because we want callbacks to be as # dumb as possible. adjustphase(repo, tr, oldctx.phase(), newctx.node()) # Children of rewritten changesets are impacted as well. Rebase as # needed. for rev in impactedrevs: # It was handled by createfn() or by this loop already. if rev in revmap: continue oldctx = repo[rev] if oldctx.p1().rev() not in revmap: raise util.Abort( "unknown parent of child commit: %s" % oldctx.hex(), hint="please report this as a bug", ) parents = newparents(repo, oldctx, revmap) mctx = context.memctx( repo, parents, oldctx.description(), oldctx.files(), preservefilectx(oldctx), user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra(), ) status = oldctx.p1().status(oldctx) mctx.modified = lambda: status[0] mctx.added = lambda: status[1] mctx.removed = lambda: status[2] newnode = mctx.commit() revmap[rev] = repo[newnode].rev() nodemap[oldctx.node()] = newnode # Retain phase. adjustphase(repo, tr, oldctx.phase(), newnode) ph = repo.ui.config("phases", "new-commit") try: repo.ui.setconfig("phases", "new-commit", oldctx.phase(), "rewriting") newnode = mctx.commit() revmap[rev] = repo[newnode].rev() finally: repo.ui.setconfig("phases", "new-commit", ph) # Move bookmarks to new nodes. bmchanges = [] oldactivebookmark = activebookmark(repo) for oldrev, newrev in revmap.items(): oldnode = repo[oldrev].node() for mark, bmnode in repo._bookmarks.items(): if bmnode == oldnode: bmchanges.append((mark, repo[newrev].node())) if bmchanges: # TODO unconditionally call applychanges() when support for # Mercurial 4.1 is dropped. if util.safehasattr(repo._bookmarks, "applychanges"): repo._bookmarks.applychanges(repo, tr, bmchanges) else: for mark, newnode in bmchanges: repo._bookmarks[mark] = newnode repo._bookmarks.recordchange(tr) # Update references to rewritten MQ patches. if hasattr(repo, "mq"): q = repo.mq for e in q.applied: if e.node in nodemap: e.node = nodemap[e.node] q.applieddirty = True # This no-ops if nothing is dirty. q.savedirty() # If obsolescence is enabled, obsolete the old changesets. if obsenabled: markers = [] for oldrev, newrev in revmap.items(): if repo[oldrev] != repo[newrev]: markers.append((repo[oldrev], (repo[newrev],))) if markers: obsolete.createmarkers(repo, markers) # Move the working directory to the new node, if applicable. wdirrev = repo["."].rev() if wdirrev in revmap: hg.updaterepo(repo, repo[revmap[wdirrev]].node(), True) # The active bookmark is tracked by its symbolic name, not its # changeset. Since we didn't do anything that should change the # active bookmark, we shouldn't need to adjust it. if activebookmark(repo) != oldactivebookmark: raise util.Abort( "active bookmark changed; " "this should not occur!", hint="please file a bug", ) tr.close() # Unless obsolescence is enabled, strip any obsolete changesets. if not obsenabled: stripnodes = [] for oldrev, newrev in revmap.items(): if repo[oldrev] != repo[newrev]: stripnodes.append(repo[oldrev].node()) if stripnodes: repair.strip(repo.ui, repo, stripnodes, topic=backuptopic) finally: if tr: tr.release() lockmod.release(wlock, lock) return nodemap
def do_backout(ui, repo, rev, handle_change, commit_change, use_mq=False, reverse_order=False, **opts): if not opts.get('force'): ui.status('checking for uncommitted changes\n') cmdutil.bailifchanged(repo) backout = not opts.get('apply') desc = { 'action': 'backout', 'Actioned': 'Backed out', 'actioning': 'backing out', 'name': 'backout' } if not backout: desc = { 'action': 'apply', 'Actioned': 'Reapplied', 'actioning': 'Reapplying', 'name': 'patch' } rev = scmutil.revrange(repo, rev) if len(rev) == 0: raise error.Abort('at least one revision required') csets = [repo[r] for r in rev] csets.sort(reverse=reverse_order, key=lambda cset: cset.rev()) new_opts = opts.copy() def bugs_suffix(bugs): if len(bugs) == 0: return '' elif len(bugs) == 1: return ' (bug ' + list(bugs)[0] + ')' else: return ' (' + ', '.join(map(lambda b: 'bug %s' % b, bugs)) + ')' def parse_bugs(msg): bugs = set() m = BUG_CONSERVATIVE_RE.search(msg) if m: bugs.add(m.group(2)) return bugs def apply_change(node, reverse, push_patch=True, name=None): p1, p2 = repo.changelog.parents(node) if p2 != nullid: raise error.Abort('cannot %s a merge changeset' % desc['action']) opts = mdiff.defaultopts opts.git = True rpatch = StringIO.StringIO() orig, mod = (node, p1) if reverse else (p1, node) for chunk in patch.diff(repo, node1=orig, node2=mod, opts=opts): rpatch.write(chunk) rpatch.seek(0) saved_stdin = None try: save_fin = ui.fin ui.fin = rpatch except: # Old versions of hg did not use the ui.fin mechanism saved_stdin = sys.stdin sys.stdin = rpatch handle_change(desc, node, qimport=(use_mq and new_opts.get('nopush'))) if saved_stdin is None: ui.fin = save_fin else: sys.stdin = saved_stdin allbugs = set() messages = [] for cset in csets: # Hunt down original description if we might want to use it orig_desc = None orig_desc_cset = None orig_author = None r = cset while len(csets) == 1 or not opts.get('single'): ui.debug("Parsing message for %s\n" % short(r.node())) m = backout_re.match(r.description()) if m: ui.debug(" looks like a backout of %s\n" % m.group(1)) else: m = reapply_re.match(r.description()) if m: ui.debug(" looks like a reapply of %s\n" % m.group(1)) else: ui.debug(" looks like the original description\n") orig_desc = r.description() orig_desc_cset = r orig_author = r.user() break r = repo[m.group(1)] bugs = parse_bugs(cset.description()) allbugs.update(bugs) node = cset.node() shortnode = short(node) ui.status('%s %s\n' % (desc['actioning'], shortnode)) apply_change(node, backout, push_patch=(not opts.get('nopush'))) msg = ('%s changeset %s' % (desc['Actioned'], shortnode)) + bugs_suffix(bugs) user = None if backout: # If backing out a backout, reuse the original commit message & author. if orig_desc_cset is not None and orig_desc_cset != cset: msg = orig_desc user = orig_author else: # If reapplying the original change, reuse the original commit message & author. if orig_desc_cset is not None and orig_desc_cset == cset: msg = orig_desc user = orig_author messages.append(msg) if not opts.get('single') and not opts.get('nopush'): new_opts['message'] = messages[-1] # Override the user to that of the original patch author in the case of --apply if user is not None: new_opts['user'] = user commit_change(ui, repo, desc['name'], node=node, force_name=opts.get('name'), **new_opts) # Iterations of this loop appear to leak memory for unknown reasons. # Work around it by forcing a gc. gc.collect() msg = ('%s %d changesets' % (desc['Actioned'], len(rev))) + bugs_suffix(allbugs) + '\n' messages.insert(0, msg) new_opts['message'] = "\n".join(messages) if opts.get('single'): commit_change(ui, repo, desc['name'], revisions=rev, force_name=opts.get('name'), **new_opts)
def fetch(ui, repo, source=b'default', **opts): """pull changes from a remote repository, merge new changes if needed. This finds all changes from the repository at the specified path or URL and adds them to the local repository. If the pulled changes add a new branch head, the head is automatically merged, and the result of the merge is committed. Otherwise, the working directory is updated to include the new changes. When a merge is needed, the working directory is first updated to the newly pulled changes. Local changes are then merged into the pulled changes. To switch the merge order, use --switch-parent. See :hg:`help dates` for a list of formats valid for -d/--date. Returns 0 on success. """ opts = pycompat.byteskwargs(opts) date = opts.get(b'date') if date: opts[b'date'] = dateutil.parsedate(date) parent = repo.dirstate.p1() branch = repo.dirstate.branch() try: branchnode = repo.branchtip(branch) except error.RepoLookupError: branchnode = None if parent != branchnode: raise error.Abort( _(b'working directory not at branch tip'), hint=_(b"use 'hg update' to check out branch tip"), ) wlock = lock = None try: wlock = repo.wlock() lock = repo.lock() cmdutil.bailifchanged(repo) bheads = repo.branchheads(branch) bheads = [head for head in bheads if len(repo[head].children()) == 0] if len(bheads) > 1: raise error.Abort( _(b'multiple heads in this branch ' b'(use "hg heads ." and "hg merge" to merge)')) other = hg.peer(repo, opts, ui.expandpath(source)) ui.status( _(b'pulling from %s\n') % util.hidepassword(ui.expandpath(source))) revs = None if opts[b'rev']: try: revs = [other.lookup(rev) for rev in opts[b'rev']] except error.CapabilityError: err = _(b"other repository doesn't support revision lookup, " b"so a rev cannot be specified.") raise error.Abort(err) # Are there any changes at all? modheads = exchange.pull(repo, other, heads=revs).cgresult if modheads == 0: return 0 # Is this a simple fast-forward along the current branch? newheads = repo.branchheads(branch) newchildren = repo.changelog.nodesbetween([parent], newheads)[2] if len(newheads) == 1 and len(newchildren): if newchildren[0] != parent: return hg.update(repo, newchildren[0]) else: return 0 # Are there more than one additional branch heads? newchildren = [n for n in newchildren if n != parent] newparent = parent if newchildren: newparent = newchildren[0] hg.clean(repo, newparent) newheads = [n for n in newheads if n != newparent] if len(newheads) > 1: ui.status( _(b'not merging with %d other new branch heads ' b'(use "hg heads ." and "hg merge" to merge them)\n') % (len(newheads) - 1)) return 1 if not newheads: return 0 # Otherwise, let's merge. err = False if newheads: # By default, we consider the repository we're pulling # *from* as authoritative, so we merge our changes into # theirs. if opts[b'switch_parent']: firstparent, secondparent = newparent, newheads[0] else: firstparent, secondparent = newheads[0], newparent ui.status( _(b'updating to %d:%s\n') % (repo.changelog.rev(firstparent), short(firstparent))) hg.clean(repo, firstparent) p2ctx = repo[secondparent] ui.status( _(b'merging with %d:%s\n') % (p2ctx.rev(), short(secondparent))) err = hg.merge(p2ctx, remind=False) if not err: # we don't translate commit messages message = cmdutil.logmessage( ui, opts) or (b'Automated merge with %s' % util.removeauth(other.url())) editopt = opts.get(b'edit') or opts.get(b'force_editor') editor = cmdutil.getcommiteditor(edit=editopt, editform=b'fetch') n = repo.commit(message, opts[b'user'], opts[b'date'], editor=editor) ui.status( _(b'new changeset %d:%s merges remote changes with local\n') % (repo.changelog.rev(n), short(n))) return err finally: release(lock, wlock)
def redo(ui, repo, *args, **opts): """ perform a redo Rolls back the previous undo. """ shiftedindex = _computerelative(repo, 0) preview = opts.get("preview") branch = "" reverseindex = 0 redocount = 0 done = False while not done: # we step back the linear undo log # redoes cancel out undoes, if we have one more undo, we should undo # there, otherwise we continue looking # we are careful to not redo past absolute undoes (bc we loose undoredo # log info) # if we run into something that isn't undo or redo, we Abort (including # gaps in the log) # we extract the --index arguments out of undoes to make sure we update # the undoredo index correctly nodedict = _readindex(repo, reverseindex) commandstr = _readnode(repo, 'command.i', nodedict['command']) commandlist = commandstr.split("\0") if 'True' == nodedict['unfinished']: # don't want to redo to an interupted state reverseindex += 1 elif commandlist[0] == "undo": undoopts = {} fancyopts.fancyopts(commandlist, cmdtable['undo'][1] + commands.globalopts, undoopts, gnu=True) if redocount == 0: # want to go to state before the undo (not after) toshift = undoopts['step'] shiftedindex -= toshift reverseindex += 1 branch = undoopts.get('branch') done = True else: if undoopts['absolute']: raise error.Abort(_("can't redo past absolute undo")) reverseindex += 1 redocount -= 1 elif commandlist[0] == "redo": redocount += 1 reverseindex += 1 else: raise error.Abort(_("nothing to redo")) if preview: _preview(ui, repo, reverseindex) return with repo.wlock(), repo.lock(), repo.transaction("redo"): cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) repo = repo.unfiltered() _undoto(ui, repo, reverseindex) # update undredo by removing what the given undo added _logundoredoindex(repo, shiftedindex, branch)