def _deleteunreachable(repo, ctx): """Deletes all ancestor and descendant commits of the given revision that aren't reachable from another bookmark. """ keepheads = "bookmark() + ." try: extensions.find('remotenames') keepheads += " + remotenames()" except KeyError: pass hiderevs = repo.revs('::%s - ::(%r)', ctx.rev(), keepheads) if hiderevs: lock = None try: lock = repo.lock() if _isobsstoreenabled(repo): markers = [] for rev in hiderevs: markers.append((repo[rev], ())) obsolete.createmarkers(repo, markers) repo.ui.status(_("%d changesets pruned\n") % len(hiderevs)) else: repair.strip(repo.ui, repo, [repo.changelog.node(r) for r in hiderevs]) finally: lockmod.release(lock)
def unshelveabort(ui, repo, state, opts): """subcommand that abort an in-progress unshelve""" wlock = repo.wlock() lock = None try: checkparents(repo, state) util.rename(repo.join('unshelverebasestate'), repo.join('rebasestate')) try: rebase.rebase(ui, repo, **{ 'abort' : True }) except Exception: util.rename(repo.join('rebasestate'), repo.join('unshelverebasestate')) raise lock = repo.lock() mergefiles(ui, repo, state.wctx, state.pendingctx) repair.strip(ui, repo, state.stripnodes, backup='none', topic='shelve') shelvedstate.clear(repo) ui.warn(_("unshelve of '%s' aborted\n") % state.name) finally: lockmod.release(lock, wlock)
def unshelveabort(ui, repo, state, opts): """subcommand that abort an in-progress unshelve""" wlock = repo.wlock() lock = None try: checkparents(repo, state) lock = repo.lock() merge.mergestate(repo).reset() if opts['keep']: repo.setparents(repo.dirstate.parents()[0]) else: revertfiles = readshelvedfiles(repo, state.name) wctx = repo.parents()[0] cmdutil.revert(ui, repo, wctx, [wctx.node(), nullid], *revertfiles, **{'no_backup': True}) # fix up the weird dirstate states the merge left behind mf = wctx.manifest() dirstate = repo.dirstate for f in revertfiles: if f in mf: dirstate.normallookup(f) else: dirstate.drop(f) dirstate._pl = (wctx.node(), nullid) dirstate._dirty = True repair.strip(ui, repo, state.stripnodes, backup='none', topic='shelve') shelvedstate.clear(repo) ui.warn(_("unshelve of '%s' aborted\n") % state.name) finally: lockmod.release(lock, wlock)
def abort(repo, originalwd, target, state): 'Restore the repository to its original state' dstates = [s for s in state.values() if s > nullrev] immutable = [d for d in dstates if not repo[d].mutable()] cleanup = True if immutable: repo.ui.warn(_("warning: can't clean up immutable changesets %s\n") % ', '.join(str(repo[r]) for r in immutable), hint=_('see hg help phases for details')) cleanup = False descendants = set() if dstates: descendants = set(repo.changelog.descendants(dstates)) if descendants - set(dstates): repo.ui.warn(_("warning: new changesets detected on target branch, " "can't strip\n")) cleanup = False if cleanup: # Update away from the rebase if necessary if inrebase(repo, originalwd, state): merge.update(repo, repo[originalwd].rev(), False, True, False) # Strip from the first rebased revision rebased = filter(lambda x: x > -1 and x != target, state.values()) if rebased: strippoints = [c.node() for c in repo.set('roots(%ld)', rebased)] # no backup of rebased cset versions needed repair.strip(repo.ui, repo, strippoints) clearstatus(repo) repo.ui.warn(_('rebase aborted\n')) return 0
def cleanupnode(ui, repo, name, nodes): """strip a group of nodes from the repository The set of node to strip may contains unknown nodes.""" ui.debug('should strip %s nodes %s\n' % (name, ', '.join([node.short(n) for n in nodes]))) lock = None try: lock = repo.lock() # do not let filtering get in the way of the cleanse # we should probably get rid of obsolescence marker created during the # histedit, but we currently do not have such information. repo = repo.unfiltered() # Find all nodes that need to be stripped # (we use %lr instead of %ln to silently ignore unknown items) nm = repo.changelog.nodemap nodes = sorted(n for n in nodes if n in nm) roots = [c.node() for c in repo.set("roots(%ln)", nodes)] for c in roots: # We should process node in reverse order to strip tip most first. # but this trigger a bug in changegroup hook. # This would reduce bundle overhead repair.strip(ui, repo, c) finally: release(lock)
def clearrebased(ui, repo, state, skipped, collapsedas=None): """dispose of rebased revision at the end of the rebase If `collapsedas` is not None, the rebase was a collapse whose result if the `collapsedas` node.""" if obsolete.isenabled(repo, obsolete.createmarkersopt): markers = [] for rev, newrev in sorted(state.items()): if newrev >= 0: if rev in skipped: succs = () elif collapsedas is not None: succs = (repo[collapsedas],) else: succs = (repo[newrev],) markers.append((repo[rev], succs)) if markers: obsolete.createmarkers(repo, markers) else: rebased = [rev for rev in state if state[rev] > nullmerge] if rebased: stripped = [] for root in repo.set('roots(%ld)', rebased): if set(repo.changelog.descendants([root.rev()])) - set(state): ui.warn(_("warning: new changesets detected " "on source branch, not stripping\n")) else: stripped.append(root.node()) if stripped: # backup the old csets by default repair.strip(ui, repo, stripped, "all")
def abort(repo, originalwd, target, state): 'Restore the repository to its original state' dstates = [s for s in state.values() if s != nullrev] immutable = [d for d in dstates if not repo[d].mutable()] if immutable: raise util.Abort(_("can't abort rebase due to immutable changesets %s") % ', '.join(str(repo[r]) for r in immutable), hint=_('see hg help phases for details')) descendants = set() if dstates: descendants = set(repo.changelog.descendants(*dstates)) if descendants - set(dstates): repo.ui.warn(_("warning: new changesets detected on target branch, " "can't abort\n")) return -1 else: # Strip from the first rebased revision merge.update(repo, repo[originalwd].rev(), False, True, False) rebased = filter(lambda x: x > -1 and x != target, state.values()) if rebased: strippoint = min(rebased) # no backup of rebased cset versions needed repair.strip(repo.ui, repo, repo[strippoint].node()) clearstatus(repo) repo.ui.warn(_('rebase aborted\n')) return 0
def strip(ui, repo, revs, update=True, backup=True, force=None, bookmarks=None): wlock = lock = None try: wlock = repo.wlock() lock = repo.lock() if update: checklocalchanges(repo, force=force) urev, p2 = repo.changelog.parents(revs[0]) if (util.safehasattr(repo, 'mq') and p2 != nullid and p2 in [x.node for x in repo.mq.applied]): urev = p2 hg.clean(repo, urev) repo.dirstate.write(repo.currenttransaction()) repair.strip(ui, repo, revs, backup) repomarks = repo._bookmarks if bookmarks: with repo.transaction('strip') as tr: if repo._activebookmark in bookmarks: bookmarksmod.deactivate(repo) for bookmark in bookmarks: del repomarks[bookmark] repomarks.recordchange(tr) for bookmark in sorted(bookmarks): ui.write(_("bookmark '%s' deleted\n") % bookmark) finally: release(lock, wlock)
def strip(ui, repo, revs, update=True, backup=True, force=None, bookmark=None): wlock = lock = None try: wlock = repo.wlock() lock = repo.lock() if update: checklocalchanges(repo, force=force) urev, p2 = repo.changelog.parents(revs[0]) if (util.safehasattr(repo, 'mq') and p2 != nullid and p2 in [x.node for x in repo.mq.applied]): urev = p2 hg.clean(repo, urev) repo.dirstate.write() repair.strip(ui, repo, revs, backup) marks = repo._bookmarks if bookmark: if bookmark == repo._bookmarkcurrent: bookmarks.unsetcurrent(repo) del marks[bookmark] marks.write() ui.write(_("bookmark '%s' deleted\n") % bookmark) finally: release(lock, wlock)
def strip(ui, repo, changesets, *args , **opts): try: repair.strip(ui, repo, changesets, *args, **opts) except TypeError: # only 2.1.2 and later allow strip to take a list of nodes for changeset in changesets: repair.strip(ui, repo, changeset, *args, **opts)
def pushstrip(repo, key, old, new): """pushkey for strip that allows remote stripping. We only allow users in a controlled users list to perform remote stripping. """ if 'USER' not in os.environ: repo.ui.write(_('request not authenticated; cannot perform remote strip\n')) return 0 allowed = repo.ui.configlist('reviewboard', 'remote_strip_users') if os.environ['USER'] not in allowed: repo.ui.write(_('user not in list of users allowed to remote strip\n')) return 0 nodes = [] for node in new.splitlines(): ctx = repo[node] # Stripping changesets that are public carries too much risk that too # many children changesets will also get stripped. Disallow the # practice. if ctx.phase() == phases.public: repo.ui.write(_('cannot strip public changeset: %s\n') % ctx.hex()) return 0 nodes.append(ctx.node()) # The strip extension does higher-level things like remove bookmarks # referencing stripped changesets. We shouldn't need this functionality, so # we use the core API. repair.strip(repo.ui, repo, nodes, backup=True, topic='remotestrip') return 1
def _stripoldcommits(self): nodelist = self.replacemap.keys() # make sure we don't strip innocent children revs = self.repo.revs('%ln - (::(heads(%ln::)-%ln))', nodelist, nodelist, nodelist) tonode = self.repo.changelog.node nodelist = [tonode(r) for r in revs] if nodelist: repair.strip(self.repo.ui, self.repo, nodelist)
def removenodes(self, ui, repo): """Cleanup temporary nodes from the repo""" if self.obsshelve: unfi = repo.unfiltered() relations = [(unfi[n or '.'], ()) for n in self.nodestoremove] obsolete.createmarkers(repo, relations) else: repair.strip(ui, repo, self.nodestoremove, backup=False, topic='shelve')
def timetravel(ui, repo): "Change date of commit." ctx = repo[None].p1() while ctx.phase(): ctx = ctx.p1() parent = ctx date = util.makedate() update_node, strip_nodes = copy_branch(repo, ctx, parent, date) if update_node: hg.update(repo, update_node) if strip_nodes: repair.strip(ui, repo, strip_nodes)
def abort(repo, originalwd, target, state): 'Restore the repository to its original state' if set(repo.changelog.descendants(target)) - set(state.values()): repo.ui.warn(_("warning: new changesets detected on target branch, " "not stripping\n")) else: # Strip from the first rebased revision merge.update(repo, repo[originalwd].rev(), False, True, False) rebased = filter(lambda x: x > -1, state.values()) if rebased: strippoint = min(rebased) repair.strip(repo.ui, repo, repo[strippoint].node(), "strip") clearstatus(repo) repo.ui.status(_('rebase aborted\n'))
def strip(self, repo, revs, update=True, backup="all", force=None): wlock = lock = None try: wlock = repo.wlock() lock = repo.lock() if update: self.checklocalchanges(repo, force=force, refresh=False) urev = self.qparents(repo, revs[0]) hg.clean(repo, urev) repo.dirstate.write() repair.strip(self.ui, repo, revs, backup) finally: release(lock, wlock)
def strip(self, repo, rev): wlock = lock = None try: wlock = repo.wlock() try: lock = repo.lock() self.removeundo(repo) repair.strip(self.ui, repo, rev, 'none') # strip may have unbundled a set of backed up revisions after # the actual strip self.removeundo(repo) finally: lock.release() finally: wlock.release()
def abort(repo, originalwd, target, state, activebookmark=None): '''Restore the repository to its original state. Additional args: activebookmark: the name of the bookmark that should be active after the restore''' try: # If the first commits in the rebased set get skipped during the rebase, # their values within the state mapping will be the target rev id. The # dstates list must must not contain the target rev (issue4896) dstates = [s for s in state.values() if s >= 0 and s != target] immutable = [d for d in dstates if not repo[d].mutable()] cleanup = True if immutable: repo.ui.warn(_("warning: can't clean up public changesets %s\n") % ', '.join(str(repo[r]) for r in immutable), hint=_('see "hg help phases" for details')) cleanup = False descendants = set() if dstates: descendants = set(repo.changelog.descendants(dstates)) if descendants - set(dstates): repo.ui.warn(_("warning: new changesets detected on target branch, " "can't strip\n")) cleanup = False if cleanup: # Update away from the rebase if necessary if needupdate(repo, state): merge.update(repo, originalwd, False, True, False) # Strip from the first rebased revision rebased = filter(lambda x: x >= 0 and x != target, state.values()) if rebased: strippoints = [ c.node() for c in repo.set('roots(%ld)', rebased)] # no backup of rebased cset versions needed repair.strip(repo.ui, repo, strippoints) if activebookmark and activebookmark in repo._bookmarks: bookmarks.activate(repo, activebookmark) finally: clearstatus(repo) repo.ui.warn(_('rebase aborted\n')) return 0
def strip(ui, repo, revs, update=True, backup="all", force=None): wlock = lock = None try: wlock = repo.wlock() lock = repo.lock() if update: checklocalchanges(repo, force=force) urev, p2 = repo.changelog.parents(revs[0]) if p2 != nullid and p2 in [x.node for x in repo.mq.applied]: urev = p2 hg.clean(repo, urev) repo.dirstate.write() repair.strip(ui, repo, revs, backup) finally: release(lock, wlock)
def unshelvecontinue(ui, repo, state, opts): """subcommand to continue an in-progress unshelve""" # We're finishing off a merge. First parent is our original # parent, second is the temporary "fake" commit we're unshelving. wlock = repo.wlock() lock = None try: checkparents(repo, state) ms = merge.mergestate(repo) if [f for f in ms if ms[f] == 'u']: raise error.Abort( _("unresolved conflicts, can't continue"), hint=_("see 'hg resolve', then 'hg unshelve --continue'")) lock = repo.lock() util.rename(repo.join('unshelverebasestate'), repo.join('rebasestate')) try: rebase.rebase(ui, repo, **{ 'continue' : True }) except Exception: util.rename(repo.join('rebasestate'), repo.join('unshelverebasestate')) raise shelvectx = repo['tip'] if not shelvectx in state.pendingctx.children(): # rebase was a no-op, so it produced no child commit shelvectx = state.pendingctx else: # only strip the shelvectx if the rebase produced it state.stripnodes.append(shelvectx.node()) mergefiles(ui, repo, state.wctx, shelvectx) repair.strip(ui, repo, state.stripnodes, backup=False, topic='shelve') shelvedstate.clear(repo) unshelvecleanup(ui, repo, state.name, opts) ui.status(_("unshelve of '%s' complete\n") % state.name) finally: lockmod.release(lock, wlock)
def unshelvecontinue(ui, repo, state, opts): """subcommand to continue an in-progress unshelve""" # We're finishing off a merge. First parent is our original # parent, second is the temporary "fake" commit we're unshelving. wlock = repo.wlock() lock = None try: checkparents(repo, state) ms = merge.mergestate(repo) if [f for f in ms if ms[f] == 'u']: raise util.Abort( _("unresolved conflicts, can't continue"), hint=_("see 'hg resolve', then 'hg unshelve --continue'")) finishmerge(ui, repo, ms, state.stripnodes, state.name, opts) lock = repo.lock() repair.strip(ui, repo, state.stripnodes, backup='none', topic='shelve') unshelvecleanup(ui, repo, state.name, opts) ui.status(_("unshelve of '%s' complete\n") % state.name) finally: lockmod.release(lock, wlock)
def cleanupnode(ui, repo, name, nodes): """strip a group of nodes from the repository The set of node to strip may contains unknown nodes.""" ui.debug("should strip %s nodes %s\n" % (name, ", ".join([node.short(n) for n in nodes]))) lock = None try: lock = repo.lock() # Find all node that need to be stripped # (we hg %lr instead of %ln to silently ignore unknown item nm = repo.changelog.nodemap nodes = [n for n in nodes if n in nm] roots = [c.node() for c in repo.set("roots(%ln)", nodes)] for c in roots: # We should process node in reverse order to strip tip most first. # but this trigger a bug in changegroup hook. # This would reduce bundle overhead repair.strip(ui, repo, c) finally: lockmod.release(lock)
def abort(repo, originalwd, target, state, activebookmark=None): """Restore the repository to its original state. Additional args: activebookmark: the name of the bookmark that should be active after the restore""" dstates = [s for s in state.values() if s >= 0] immutable = [d for d in dstates if not repo[d].mutable()] cleanup = True if immutable: repo.ui.warn( _("warning: can't clean up public changesets %s\n") % ", ".join(str(repo[r]) for r in immutable), hint=_('see "hg help phases" for details'), ) cleanup = False descendants = set() if dstates: descendants = set(repo.changelog.descendants(dstates)) if descendants - set(dstates): repo.ui.warn(_("warning: new changesets detected on target branch, " "can't strip\n")) cleanup = False if cleanup: # Update away from the rebase if necessary if needupdate(repo, state): merge.update(repo, originalwd, False, True, False) # Strip from the first rebased revision rebased = filter(lambda x: x >= 0 and x != target, state.values()) if rebased: strippoints = [c.node() for c in repo.set("roots(%ld)", rebased)] # no backup of rebased cset versions needed repair.strip(repo.ui, repo, strippoints) if activebookmark and activebookmark in repo._bookmarks: bookmarks.activate(repo, activebookmark) clearstatus(repo) repo.ui.warn(_("rebase aborted\n")) return 0
def abort(repo, originalwd, target, state): 'Restore the repository to its original state' descendants = repo.changelog.descendants ispublic = lambda r: repo._phaserev[r] == phases.public if filter(ispublic, descendants(target)): repo.ui.warn(_("warning: immutable rebased changeset detected, " "can't abort\n")) return -1 elif set(descendants(target)) - set(state.values()): repo.ui.warn(_("warning: new changesets detected on target branch, " "can't abort\n")) return -1 else: # Strip from the first rebased revision merge.update(repo, repo[originalwd].rev(), False, True, False) rebased = filter(lambda x: x > -1 and x != target, state.values()) if rebased: strippoint = min(rebased) # no backup of rebased cset versions needed repair.strip(repo.ui, repo, repo[strippoint].node()) clearstatus(repo) repo.ui.warn(_('rebase aborted\n')) return 0
def histedit(ui, repo, *parent, **opts): """hg histedit <parent> """ if opts.get('outgoing'): if len(parent) > 1: raise util.Abort('only one repo argument allowed with --outgoing') elif parent: parent = parent[0] dest, revs, checkout = hg.parseurl( ui.expandpath(parent or 'default-push', parent or 'default'), ['tip']) if revs: revs = [repo.lookup(rev) for rev in revs] other = hg.repository(ui, dest) ui.status(_('comparing with %s\n') % url.hidepassword(dest)) parent = repo.findoutgoing(other, force=opts.get('force')) else: if opts.get('force'): raise util.Abort('--force only allowed with --outgoing') if opts.get('continue', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --continue') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, ) = readstate(repo) currentparent, wantnull = repo.dirstate.parents() parentctx = repo[parentctxnode] # discover any nodes the user has added in the interim newchildren = [c for c in parentctx.children() if c.node() not in existing] action, currentnode = rules.pop(0) while newchildren: if action in ['f', 'fold', ]: tmpnodes.extend([n.node() for n in newchildren]) else: created.extend([n.node() for n in newchildren]) newchildren = filter(lambda x: x.node() not in existing, reduce(lambda x, y: x + y, map(lambda r: r.children(), newchildren))) m, a, r, d = repo.status()[:4] oldctx = repo[currentnode] message = oldctx.description() if action in ('e', 'edit', ): message = ui.edit(message, ui.username()) elif action in ('f', 'fold', ): message = 'fold-temp-revision %s' % currentnode new = None if m or a or r or d: new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) if action in ('e', 'edit', 'p', 'pick', ): replaced.append(oldctx.node()) if new: created.append(new) parentctx = repo[new] else: # fold if new: tmpnodes.append(new) else: new = newchildren[-1] (parentctx, created_, replaced_, tmpnodes_, ) = finishfold(ui, repo, parentctx, oldctx, new, opts, newchildren) replaced.extend(replaced_) created.extend(created_) tmpnodes.extend(tmpnodes_) elif opts.get('abort', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --abort') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, ) = readstate(repo) ui.debug('restore wc to old tip %s\n' % node.hex(tip)) hg.clean(repo, tip) ui.debug('should strip created nodes %s\n' % ', '.join([node.hex(n)[:12] for n in created])) ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for nodes in (created, tmpnodes, ): for n in reversed(nodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) return else: cmdutil.bail_if_changed(repo) if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise util.Abort('history edit already in progress, try --continue or --abort') tip, empty = repo.dirstate.parents() if len(parent) != 1: raise util.Abort('requires exactly one parent revision') parent = parent[0] revs = between(repo, parent, tip) ctxs = [repo[r] for r in revs] existing = [r.node() for r in ctxs] rules = '\n'.join([('pick %s %s' % (c.hex()[:12], c.description().splitlines()[0]))[:80] for c in ctxs]) rules += editcomment % (node.hex(parent)[:12], node.hex(tip)[:12], ) rules = ui.edit(rules, ui.username()) parentctx = repo[parent].parents()[0] rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l[0] == '#'] rules = verifyrules(rules, repo, ctxs) keep = opts.get('keep', False) replaced = [] tmpnodes = [] created = [] while rules: writestate(repo, parentctx.node(), created, replaced, tmpnodes, existing, rules, keep, tip) action, ha = rules.pop(0) (parentctx, created_, replaced_, tmpnodes_, ) = actiontable[action](ui, repo, parentctx, ha, opts) created.extend(created_) replaced.extend(replaced_) tmpnodes.extend(tmpnodes_) hg.update(repo, parentctx.node()) if not keep: ui.debug('should strip replaced nodes %s\n' % ', '.join([node.hex(n)[:12] for n in replaced])) for n in sorted(replaced, lambda x, y: cmp(repo[x].rev(), repo[y].rev())): try: repair.strip(ui, repo, n) except error.LookupError: pass ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for n in reversed(tmpnodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state'))
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history (the source) onto another (the destination). This can be useful for linearizing *local* changes relative to a master development tree. You should not rebase changesets that have already been shared with others. Doing so will force everybody else to perform the same rebase or they will end up with duplicated changesets after pulling in your rebased changesets. If you don't specify a destination changeset (``-d/--dest``), rebase uses the tipmost head of the current named branch as the destination. (The destination changeset is not modified by rebasing, but new changesets are added as its descendants.) You can specify which changesets to rebase in two ways: as a "source" changeset or as a "base" changeset. Both are shorthand for a topologically related set of changesets (the "source branch"). If you specify source (``-s/--source``), rebase will rebase that changeset and all of its descendants onto dest. If you specify base (``-b/--base``), rebase will select ancestors of base back to but not including the common ancestor with dest. Thus, ``-b`` is less precise but more convenient than ``-s``: you can specify any changeset in the source branch, and rebase will select the whole branch. If you specify neither ``-s`` nor ``-b``, rebase uses the parent of the working directory as the base. By default, rebase recreates the changesets in the source branch as descendants of dest and then destroys the originals. Use ``--keep`` to preserve the original source changesets. Some changesets in the source branch (e.g. merges from the destination branch) may be dropped if they no longer contribute any change. One result of the rules for selecting the destination changeset and source branch is that, unlike ``merge``, rebase will do nothing if you are at the latest (tipmost) head of a named branch with two heads. You need to explicitly specify source and/or destination (or ``update`` to the other head, if it's the head of the intended source branch). If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. Returns 0 on success, 1 if nothing to rebase. """ originalwd = target = None external = nullrev state = {} skipped = set() targetancestors = set() editor = None if opts.get('edit'): editor = cmdutil.commitforceeditor lock = wlock = None try: wlock = repo.wlock() lock = repo.lock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) revf = opts.get('rev', []) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) collapsemsg = cmdutil.logmessage(ui, opts) extrafn = opts.get('extrafn') # internal, used by e.g. hgsubversion keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) detachf = opts.get('detach', False) # keepopen is not meant for use on the command line, but by # other extensions keepopen = opts.get('keepopen', False) if collapsemsg and not collapsef: raise util.Abort(_('message can only be specified with collapse')) if contf or abortf: if contf and abortf: raise util.Abort(_('cannot use both abort and continue')) if collapsef: raise util.Abort( _('cannot use collapse with continue or abort')) if detachf: raise util.Abort(_('cannot use detach with continue or abort')) if srcf or basef or destf: raise util.Abort( _('abort and continue do not allow specifying revisions')) if opts.get('tool', False): ui.warn(_('tool option will be ignored\n')) (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external) = restorestatus(repo) if abortf: return abort(repo, originalwd, target, state) else: if srcf and basef: raise util.Abort( _('cannot specify both a ' 'source and a base')) if revf and basef: raise util.Abort( _('cannot specify both a ' 'revision and a base')) if revf and srcf: raise util.Abort( _('cannot specify both a ' 'revision and a source')) if detachf: if not (srcf or revf): raise util.Abort( _('detach requires a revision to be specified')) if basef: raise util.Abort(_('cannot specify a base with detach')) cmdutil.bailifchanged(repo) if not destf: # Destination defaults to the latest revision in the # current branch branch = repo[None].branch() dest = repo[branch] else: dest = repo[destf] if revf: rebaseset = repo.revs('%lr', revf) elif srcf: src = scmutil.revrange(repo, [srcf]) rebaseset = repo.revs('(%ld)::', src) else: base = scmutil.revrange(repo, [basef or '.']) rebaseset = repo.revs( '(children(ancestor(%ld, %d)) and ::(%ld))::', base, dest, base) if rebaseset: root = min(rebaseset) else: root = None if not rebaseset: repo.ui.debug('base is ancestor of destination') result = None elif not keepf and list( repo.revs('first(children(%ld) - %ld)', rebaseset, rebaseset)): raise util.Abort( _("can't remove original changesets with" " unrebased descendants"), hint=_('use --keep to keep original changesets')) elif not keepf and not repo[root].mutable(): raise util.Abort(_("can't rebase immutable changeset %s") % repo[root], hint=_('see hg help phases for details')) else: result = buildstate(repo, dest, rebaseset, detachf) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return 1 else: originalwd, target, state = result if collapsef: targetancestors = set(repo.changelog.ancestors(target)) targetancestors.add(target) external = checkexternal(repo, state, targetancestors) if keepbranchesf: assert not extrafn, 'cannot use both keepbranches and extrafn' def extrafn(ctx, extra): extra['branch'] = ctx.branch() if collapsef: branches = set() for rev in state: branches.add(repo[rev].branch()) if len(branches) > 1: raise util.Abort( _('cannot collapse multiple named ' 'branches')) # Rebase if not targetancestors: targetancestors = set(repo.changelog.ancestors(target)) targetancestors.add(target) # Keep track of the current bookmarks in order to reset them later currentbookmarks = repo._bookmarks.copy() activebookmark = repo._bookmarkcurrent if activebookmark: bookmarks.unsetcurrent(repo) sortedstate = sorted(state) total = len(sortedstate) pos = 0 for rev in sortedstate: pos += 1 if state[rev] == -1: ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])), _('changesets'), total) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external) p1, p2 = defineparents(repo, rev, target, state, targetancestors) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: try: ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) stats = rebasenode(repo, rev, p1, state) if stats and stats[3] > 0: raise util.Abort( _('unresolved conflicts (see hg ' 'resolve, then hg rebase --continue)')) finally: ui.setconfig('ui', 'forcemerge', '') cmdutil.duplicatecopies(repo, rev, target) if not collapsef: newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn, editor=editor) else: # Skip commit if we are collapsing repo.setparents(repo[p1].node()) newrev = None # Update the state if newrev is not None: state[rev] = repo[newrev].rev() else: if not collapsef: ui.note(_('no changes, revision %d skipped\n') % rev) ui.debug('next revision set to %s\n' % p1) skipped.add(rev) state[rev] = p1 ui.progress(_('rebasing'), None) ui.note(_('rebase merging completed\n')) if collapsef and not keepopen: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) if collapsemsg: commitmsg = collapsemsg else: commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] != nullmerge: commitmsg += '\n* %s' % repo[rebased].description() commitmsg = ui.edit(commitmsg, repo.ui.username()) newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn, editor=editor) if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if currentbookmarks: # Nodeids are needed to reset bookmarks nstate = {} for k, v in state.iteritems(): if v != nullmerge: nstate[repo[k].node()] = repo[v].node() if not keepf: # Remove no more useful revisions rebased = [rev for rev in state if state[rev] != nullmerge] if rebased: if set(repo.changelog.descendants(min(rebased))) - set(state): ui.warn( _("warning: new changesets detected " "on source branch, not stripping\n")) else: # backup the old csets by default repair.strip(ui, repo, repo[min(rebased)].node(), "all") if currentbookmarks: updatebookmarks(repo, nstate, currentbookmarks, **opts) clearstatus(repo) ui.note(_("rebase completed\n")) if os.path.exists(repo.sjoin('undo')): util.unlinkpath(repo.sjoin('undo')) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) if (activebookmark and repo['tip'].node() == repo._bookmarks[activebookmark]): bookmarks.setcurrent(repo, activebookmark) finally: release(lock, wlock)
def collapse(ui, repo, **opts): """collapse multiple revisions into one Collapse combines multiple consecutive changesets into a single changeset, preserving any descendants of the final changeset. The commit messages for the collapsed changesets are concatenated and may be edited before the collapse is completed. """ rng = cmdutil.revrange(repo, opts['rev']) first = rng[0] last = rng[len(rng) - 1] revs = inbetween(repo, first, last) if not revs: raise util.Abort(_('revision %s is not an ancestor of revision %s\n') % (first, last)) elif len(revs) == 1: raise util.Abort(_('only one revision specified')) ui.debug(_('Collapsing revisions %s\n') % revs) for r in revs: if repo[r].user() != ui.username() and not opts['force']: raise util.Abort(_('revision %s does not belong to %s\n') % (r, ui.username())) if r != last: children = repo[r].children() if len(children) > 1: for c in children: if not c.rev() in revs: raise util.Abort(_('revision %s has child %s not ' 'being collapsed, please rebase\n') % (r, c.rev())) if r != first: parents = repo[r].parents() if len(parents) > 1: for p in parents: if not p.rev() in revs: raise util.Abort(_('revision %s has parent %s not ' 'being collapsed.') % (r, p.rev())) if len(repo[first].parents()) > 1: raise util.Abort(_('start revision %s has multiple parents, ' 'won\'t collapse.') % first) cmdutil.bail_if_changed(repo) parent = repo[first].parents()[0] tomove = list(repo.changelog.descendants(last)) movemap = dict.fromkeys(tomove, nullrev) ui.debug(_('will move revisions: %s\n') % tomove) origparent = repo['.'].rev() collapsed = None try: collapsed = makecollapsed(ui, repo, parent, revs) movemap[max(revs)] = collapsed movedescendants(ui, repo, collapsed, tomove, movemap) except: merge.update(repo, repo[origparent].rev(), False, True, False) if collapsed: repair.strip(ui, repo, collapsed.node(), "strip") raise if not opts['keep']: ui.debug(_('stripping revision %d\n') % first) repair.strip(ui, repo, repo[first].node(), "strip") ui.status(_('collapse completed\n'))
def cmddrop(ui, repo, *revs, **opts): """I'm hacky do not use me! This command strip a changeset, its precursors and all obsolescence marker associated to its chain. There is no way to limit the extend of the purge yet. You may have to repull from other source to get some changeset and obsolescence marker back. This intended for Matt Mackall usage only. do not use me. """ revs = list(revs) revs.extend(opts['rev']) if not revs: revs = ['.'] # get the changeset revs = scmutil.revrange(repo, revs) if not revs: ui.write_err('no revision to drop\n') return 1 # lock from the beginning to prevent race wlock = lock = None try: wlock = repo.wlock() lock = repo.lock() # check they have no children if repo.revs('%ld and public()', revs): ui.write_err('cannot drop public revision') return 1 if repo.revs('children(%ld) - %ld', revs, revs): ui.write_err('cannot drop revision with children') return 1 if repo.revs('. and %ld', revs): newrevs = repo.revs('max(::. - %ld)', revs) if newrevs: assert len(newrevs) == 1 newrev = newrevs.first() else: newrev = -1 commands.update(ui, repo, newrev) ui.status(_('working directory now at %s\n') % repo[newrev]) # get all markers and successors up to root nodes = [repo[r].node() for r in revs] with timed(ui, 'search obsmarker'): markers = set(obsmarkerchainfrom(repo.obsstore, nodes)) ui.write('%i obsmarkers found\n' % len(markers)) cl = repo.unfiltered().changelog with timed(ui, 'search nodes'): allnodes = set(nodes) allnodes.update(m[0] for m in markers if cl.hasnode(m[0])) ui.write('%i nodes found\n' % len(allnodes)) cl = repo.changelog visiblenodes = set(n for n in allnodes if cl.hasnode(n)) # check constraint again if repo.revs('%ln and public()', visiblenodes): ui.write_err('cannot drop public revision') return 1 if repo.revs('children(%ln) - %ln', visiblenodes, visiblenodes): ui.write_err('cannot drop revision with children') return 1 if markers: # strip them with timed(ui, 'strip obsmarker'): stripmarker(ui, repo, markers) # strip the changeset with timed(ui, 'strip nodes'): repair.strip(ui, repo, list(allnodes), backup="all", topic='drophack') finally: lockmod.release(lock, wlock)
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history (the source) onto another (the destination). This can be useful for linearizing *local* changes relative to a master development tree. You should not rebase changesets that have already been shared with others. Doing so will force everybody else to perform the same rebase or they will end up with duplicated changesets after pulling in your rebased changesets. If you don't specify a destination changeset (``-d/--dest``), rebase uses the tipmost head of the current named branch as the destination. (The destination changeset is not modified by rebasing, but new changesets are added as its descendants.) You can specify which changesets to rebase in two ways: as a "source" changeset or as a "base" changeset. Both are shorthand for a topologically related set of changesets (the "source branch"). If you specify source (``-s/--source``), rebase will rebase that changeset and all of its descendants onto dest. If you specify base (``-b/--base``), rebase will select ancestors of base back to but not including the common ancestor with dest. Thus, ``-b`` is less precise but more convenient than ``-s``: you can specify any changeset in the source branch, and rebase will select the whole branch. If you specify neither ``-s`` nor ``-b``, rebase uses the parent of the working directory as the base. By default, rebase recreates the changesets in the source branch as descendants of dest and then destroys the originals. Use ``--keep`` to preserve the original source changesets. Some changesets in the source branch (e.g. merges from the destination branch) may be dropped if they no longer contribute any change. One result of the rules for selecting the destination changeset and source branch is that, unlike ``merge``, rebase will do nothing if you are at the latest (tipmost) head of a named branch with two heads. You need to explicitly specify source and/or destination (or ``update`` to the other head, if it's the head of the intended source branch). If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. Returns 0 on success, 1 if nothing to rebase. """ originalwd = target = None external = nullrev state = {} skipped = set() targetancestors = set() lock = wlock = None try: lock = repo.lock() wlock = repo.wlock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) extrafn = opts.get('extrafn') keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) detachf = opts.get('detach', False) # keepopen is not meant for use on the command line, but by # other extensions keepopen = opts.get('keepopen', False) if contf or abortf: if contf and abortf: raise util.Abort(_('cannot use both abort and continue')) if collapsef: raise util.Abort( _('cannot use collapse with continue or abort')) if detachf: raise util.Abort(_('cannot use detach with continue or abort')) if srcf or basef or destf: raise util.Abort( _('abort and continue do not allow specifying revisions')) (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external) = restorestatus(repo) if abortf: return abort(repo, originalwd, target, state) else: if srcf and basef: raise util.Abort(_('cannot specify both a ' 'revision and a base')) if detachf: if not srcf: raise util.Abort( _('detach requires a revision to be specified')) if basef: raise util.Abort(_('cannot specify a base with detach')) cmdutil.bail_if_changed(repo) result = buildstate(repo, destf, srcf, basef, detachf) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return 1 else: originalwd, target, state = result if collapsef: targetancestors = set(repo.changelog.ancestors(target)) external = checkexternal(repo, state, targetancestors) if keepbranchesf: if extrafn: raise util.Abort(_('cannot use both keepbranches and extrafn')) def extrafn(ctx, extra): extra['branch'] = ctx.branch() # Rebase if not targetancestors: targetancestors = set(repo.changelog.ancestors(target)) targetancestors.add(target) sortedstate = sorted(state) total = len(sortedstate) pos = 0 for rev in sortedstate: pos += 1 if state[rev] == -1: ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])), _('changesets'), total) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external) p1, p2 = defineparents(repo, rev, target, state, targetancestors) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: stats = rebasenode(repo, rev, p1, p2, state) if stats and stats[3] > 0: raise util.Abort(_('unresolved conflicts (see hg ' 'resolve, then hg rebase --continue)')) updatedirstate(repo, rev, target, p2) if not collapsef: newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn) else: # Skip commit if we are collapsing repo.dirstate.setparents(repo[p1].node()) newrev = None # Update the state if newrev is not None: state[rev] = repo[newrev].rev() else: if not collapsef: ui.note(_('no changes, revision %d skipped\n') % rev) ui.debug('next revision set to %s\n' % p1) skipped.add(rev) state[rev] = p1 ui.progress(_('rebasing'), None) ui.note(_('rebase merging completed\n')) if collapsef and not keepopen: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] != nullmerge: commitmsg += '\n* %s' % repo[rebased].description() commitmsg = ui.edit(commitmsg, repo.ui.username()) newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn) if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if not keepf: # Remove no more useful revisions rebased = [rev for rev in state if state[rev] != nullmerge] if rebased: if set(repo.changelog.descendants(min(rebased))) - set(state): ui.warn(_("warning: new changesets detected " "on source branch, not stripping\n")) else: # backup the old csets by default repair.strip(ui, repo, repo[min(rebased)].node(), "all") clearstatus(repo) ui.note(_("rebase completed\n")) if os.path.exists(repo.sjoin('undo')): util.unlink(repo.sjoin('undo')) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) finally: release(lock, wlock)
class WorkSpace(object): def __init__(self, repository): self.repo = repository self.ui = self.repo.ui self.name = self.repo.root self.activecache = {} def parent(self, spec=None): '''Return the canonical workspace parent, either SPEC (which will be expanded) if provided or the default parent otherwise.''' if spec: return self.ui.expandpath(spec) p = self.ui.expandpath('default') if p == 'default': return None else: return p def _localtip(self, outgoing, wctx): '''Return the most representative changeset to act as the localtip. If the working directory is modified (has file changes, is a merge, or has switched branches), this will be a workingctx. If the working directory is unmodified, this will be the most recent (highest revision number) local (outgoing) head on the current branch, if no heads are determined to be outgoing, it will be the most recent head on the current branch. ''' # # A modified working copy is seen as a proto-branch, and thus # our only option as the local tip. # if (wctx.files() or len(wctx.parents()) > 1 or wctx.branch() != wctx.parents()[0].branch()): return wctx heads = self.repo.heads(start=wctx.parents()[0].node()) headctxs = [self.repo.changectx(n) for n in heads] localctxs = [c for c in headctxs if c.node() in outgoing] ltip = sorted(localctxs or headctxs, key=lambda x: x.rev())[-1] if len(heads) > 1: self.ui.warn('The current branch has more than one head, ' 'using %s\n' % ltip.rev()) return ltip def _parenttip(self, heads, outgoing): '''Return the highest-numbered, non-outgoing changeset that is an ancestor of a changeset in heads. This is intended to find the most recent changeset on a given branch that is shared between a parent and child workspace, such that it can act as a stand-in for the parent workspace. ''' def tipmost_shared(head, outnodes): '''Return the tipmost node on the same branch as head that is not in outnodes. We walk from head to the bottom of the workspace (revision 0) collecting nodes not in outnodes during the add phase and return the first node we see in the iter phase that was previously collected. If no node is found (all revisions >= 0 are outgoing), the only possible parenttip is the null node (node.nullid) which is returned explicitly. See the docstring of mercurial.cmdutil.walkchangerevs() for the phased approach to the iterator returned. The important part to note is that the 'add' phase gathers nodes, which the 'iter' phase then iterates through.''' opts = {'rev': ['%s:0' % head.rev()], 'follow': True} get = util.cachefunc(lambda r: self.repo.changectx(r).changeset()) changeiter = cmdutil.walkchangerevs(self.repo.ui, self.repo, [], get, opts)[0] seen = [] for st, rev, fns in changeiter: n = self.repo.changelog.node(rev) if st == 'add': if n not in outnodes: seen.append(n) elif st == 'iter': if n in seen: return rev return self.repo.changelog.rev(node.nullid) nodes = set(outgoing) ptips = map(lambda x: tipmost_shared(x, nodes), heads) return self.repo.changectx(sorted(ptips)[-1]) def status(self, base='.', head=None): '''Translate from the hg 6-tuple status format to a hash keyed on change-type''' states = [ 'modified', 'added', 'removed', 'deleted', 'unknown', 'ignored' ] chngs = self.repo.status(base, head) return dict(zip(states, chngs)) def findoutgoing(self, parent): '''Return the base set of outgoing nodes. A caching wrapper around mercurial.localrepo.findoutgoing(). Complains (to the user), if the parent workspace is non-existent or inaccessible''' self.ui.pushbuffer() try: try: ui = self.ui if hasattr(cmdutil, 'remoteui'): ui = cmdutil.remoteui(ui, {}) pws = hg.repository(ui, parent) return self.repo.findoutgoing(pws) except HgRepoError: self.ui.warn("Warning: Parent workspace '%s' is not " "accessible\n" "active list will be incomplete\n\n" % parent) return [] finally: self.ui.popbuffer() findoutgoing = util.cachefunc(findoutgoing) def modified(self): '''Return a list of files modified in the workspace''' wctx = self.workingctx() return sorted(wctx.files() + wctx.deleted()) or None def merged(self): '''Return boolean indicating whether the workspace has an uncommitted merge''' wctx = self.workingctx() return len(wctx.parents()) > 1 def branched(self): '''Return boolean indicating whether the workspace has an uncommitted named branch''' wctx = self.workingctx() return wctx.branch() != wctx.parents()[0].branch() def active(self, parent=None): '''Return an ActiveList describing changes between workspace and parent workspace (including uncommitted changes). If workspace has no parent ActiveList will still describe any uncommitted changes''' parent = self.parent(parent) if parent in self.activecache: return self.activecache[parent] if parent: outgoing = self.findoutgoing(parent) outnodes = self.repo.changelog.nodesbetween(outgoing)[0] else: outgoing = [] # No parent, no outgoing nodes outnodes = [] localtip = self._localtip(outnodes, self.workingctx()) if localtip.rev() is None: heads = localtip.parents() else: heads = [localtip] ctxs = [ self.repo.changectx(n) for n in self.repo.changelog.nodesbetween( outgoing, [h.node() for h in heads])[0] ] if localtip.rev() is None: ctxs.append(localtip) act = ActiveList(self, self._parenttip(heads, outnodes), ctxs) self.activecache[parent] = act return act def pdiff(self, pats, opts, parent=None): 'Return diffs relative to PARENT, as best as we can make out' parent = self.parent(parent) act = self.active(parent) # # act.localtip maybe nil, in the case of uncommitted local # changes. # if not act.revs: return matchfunc = cmdutil.match(self.repo, pats, opts) opts = patch.diffopts(self.ui, opts) return self.diff(act.parenttip.node(), act.localtip.node(), match=matchfunc, opts=opts) def squishdeltas(self, active, message, user=None): '''Create a single conglomerate changeset based on a given active list. Removes the original changesets comprising the given active list, and any tags pointing to them. Operation: - Commit an activectx object representing the specified active list, - Remove any local tags pointing to changesets in the specified active list. - Remove the changesets comprising the specified active list. - Remove any metadata that may refer to changesets that were removed. Calling code is expected to hold both the working copy lock and repository lock of the destination workspace ''' def strip_local_tags(active): '''Remove any local tags referring to the specified nodes.''' if os.path.exists(self.repo.join('localtags')): fh = None try: fh = self.repo.opener('localtags') tags = active.prune_tags(fh) fh.close() fh = self.repo.opener('localtags', 'w', atomictemp=True) fh.writelines(tags) fh.rename() finally: if fh and not fh.closed: fh.close() if active.files(): for entry in active: # # Work around Mercurial issue #1666, if the source # file of a rename exists in the working copy # Mercurial will complain, and remove the file. # # We preemptively remove the file to avoid the # complaint (the user was asked about this in # cdm_recommit) # if entry.is_renamed(): path = self.repo.wjoin(entry.parentname) if os.path.exists(path): os.unlink(path) self.repo.commitctx(active.context(message, user)) wsstate = "recommitted" destination = self.repo.changelog.tip() else: # # If all we're doing is stripping the old nodes, we want to # update the working copy such that we're not at a revision # that's about to go away. # wsstate = "tip" destination = active.parenttip.node() self.clean(destination) # # Tags were elided by the activectx object. Local tags, # however, must be removed manually. # try: strip_local_tags(active) except EnvironmentError, e: raise util.Abort('Could not recommit tags: %s\n' % e) # Silence all the strip and update fun self.ui.pushbuffer() # # Remove the active lists component changesets by stripping # the base of any active branch (of which there may be # several) # try: try: for base in active.bases(): # # Any cached information about the repository is # likely to be invalid during the strip. The # caching of branch tags is especially # problematic. # self.repo.invalidate() repair.strip(self.ui, self.repo, base.node(), backup=False) except: # # If this fails, it may leave us in a surprising place in # the history. # # We want to warn the user that something went wrong, # and what will happen next, re-raise the exception, and # bring the working copy back into a consistent state # (which the finally block will do) # self.ui.warn("stripping failed, your workspace will have " "superfluous heads.\n" "your workspace has been updated to the " "%s changeset.\n" % wsstate) raise # Re-raise the exception finally: self.clean() self.repo.dirstate.write() # Flush the dirstate self.repo.invalidate() # Invalidate caches # # We need to remove Hg's undo information (used for rollback), # since it refers to data that will probably not exist after # the strip. # if os.path.exists(self.repo.sjoin('undo')): try: os.unlink(self.repo.sjoin('undo')) except EnvironmentError, e: raise util.Abort('failed to remove undo data: %s\n' % e) self.ui.popbuffer()
def replacechangesets(repo, oldnodes, createfn, backuptopic='replacing'): """Replace changesets with new versions. This is a generic function used to perform history rewriting. Given an iterable of input nodes, a function will be called which is expected to produce a new changeset to replace the input node. The function signature should be: def createfn(repo, ctx, revmap, copyfilectxfn): It is passed a repo, the changectx being rewritten, a map of old to new revisions that have been changed so far, and a function that can be used as the memctx callback for obtaining memfilectx when no file modifications are to be performed (a common pattern). The function should return an *uncommitted* memctx holding the new changeset info. We currently restrict that the createfn callback must return a new changeset and that no file changes may occur. Restricting file changes satisfies the requirements this function was invented for and keeps the implementation simple. After the memctx is obtained, it is committed. Children changesets are rebased automatically after all changesets have been rewritten. After the old to new mapping is obtained, bookmarks are moved and old changesets are made obsolete or stripped, depending on what is appropriate for the repo configuration. This function handles locking the repository and performing as many actions in a transaction as possible. Before any changes are made, we verify the state of the repo is sufficient for transformation to occur and abort otherwise. """ if not oldnodes: return {} repo = repo.unfiltered() # Validate function called properly. for node in oldnodes: if len(node) != 20: raise util.Abort('replacechangesets expects 20 byte nodes') uoldrevs = [repo[node].rev() for node in oldnodes] oldrevs = sorted(uoldrevs) if oldrevs != uoldrevs: raise util.Abort('must pass oldnodes in changelog order') # We may perform stripping and stripping inside a nested transaction # is a recipe for disaster. # currenttransaction was added in 3.3. Copy the implementation until we # drop 3.2 compatibility. if hasattr(repo, 'currenttransaction'): intrans = repo.currenttransaction() else: if repo._transref and repo._transref().running(): intrans = True else: intrans = False if intrans: raise util.Abort('cannot call replacechangesets when a transaction ' 'is active') # The revisions impacted by the current operation. This is essentially # all non-hidden children. We don't operate on hidden changesets because # there is no point - they are hidden and deemed not important. impactedrevs = list(repo.filtered('visible').revs('%ld::', oldrevs)) # If we'll need to update the working directory, don't do anything if there # are uncommitted changes, as this could cause a giant mess (merge # conflicts, etc). Note the comparison against impacted revs, as children # of rewritten changesets will be rebased below. dirstaterev = repo[repo.dirstate.p1()].rev() if dirstaterev in impactedrevs: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) obsenabled = False if hasattr(obsolete, 'isenabled'): obsenabled = obsolete.isenabled(repo, 'createmarkers') else: obsenabled = obsolete._enabled def adjustphase(repo, tr, phase, node): # transaction argument added in Mercurial 3.2. try: phases.advanceboundary(repo, tr, phase, [node]) phases.retractboundary(repo, tr, phase, [node]) except TypeError: phases.advanceboundary(repo, phase, [node]) phases.retractboundary(repo, phase, [node]) nodemap = {} wlock, lock, tr = None, None, None try: wlock = repo.wlock() lock = repo.lock() tr = repo.transaction('replacechangesets') # Create the new changesets. revmap = OrderedDict() for oldnode in oldnodes: oldctx = repo[oldnode] # Copy revmap out of paranoia. newctx = createfn(repo, oldctx, dict(revmap), preservefilectx(oldctx)) if not isinstance(newctx, context.memctx): raise util.Abort('createfn must return a context.memctx') if oldctx == newctx: raise util.Abort('createfn must create a new changeset') newnode = newctx.commit() # Needed so .manifestnode() works, which memctx doesn't have. newctx = repo[newnode] # This makes the implementation significantly simpler as we don't # need to worry about merges when we do auto rebasing later. if oldctx.manifestnode() != newctx.manifestnode(): raise util.Abort('we do not allow replacements to modify files') revmap[oldctx.rev()] = newctx.rev() nodemap[oldnode] = newnode # Do phase adjustment ourselves because we want callbacks to be as # dumb as possible. adjustphase(repo, tr, oldctx.phase(), newctx.node()) # Children of rewritten changesets are impacted as well. Rebase as # needed. for rev in impactedrevs: # It was handled by createfn() or by this loop already. if rev in revmap: continue oldctx = repo[rev] if oldctx.p1().rev() not in revmap: raise util.Abort('unknown parent of child commit: %s' % oldctx.hex(), hint='please report this as a bug') parents = newparents(repo, oldctx, revmap) mctx = context.memctx(repo, parents, oldctx.description(), oldctx.files(), preservefilectx(oldctx), user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) status = oldctx.p1().status(oldctx) mctx.modified = lambda: status[0] mctx.added = lambda: status[1] mctx.removed = lambda: status[2] newnode = mctx.commit() revmap[rev] = repo[newnode].rev() nodemap[oldctx.node()] = newnode # Retain phase. adjustphase(repo, tr, oldctx.phase(), newnode) ph = repo.ui.config('phases', 'new-commit') try: repo.ui.setconfig('phases', 'new-commit', oldctx.phase(), 'rewriting') newnode = mctx.commit() revmap[rev] = repo[newnode].rev() finally: repo.ui.setconfig('phases', 'new-commit', ph) # Move bookmarks to new nodes. bmchanges = [] oldactivebookmark = activebookmark(repo) for oldrev, newrev in revmap.items(): oldnode = repo[oldrev].node() for mark, bmnode in repo._bookmarks.items(): if bmnode == oldnode: bmchanges.append((mark, repo[newrev].node())) if bmchanges: # TODO unconditionally call applychanges() when support for # Mercurial 4.1 is dropped. if util.safehasattr(repo._bookmarks, 'applychanges'): repo._bookmarks.applychanges(repo, tr, bmchanges) else: for mark, newnode in bmchanges: repo._bookmarks[mark] = newnode repo._bookmarks.recordchange(tr) # Update references to rewritten MQ patches. if hasattr(repo, 'mq'): q = repo.mq for e in q.applied: if e.node in nodemap: e.node = nodemap[e.node] q.applieddirty = True # This no-ops if nothing is dirty. q.savedirty() # If obsolescence is enabled, obsolete the old changesets. if obsenabled: markers = [] for oldrev, newrev in revmap.items(): if repo[oldrev] != repo[newrev]: markers.append((repo[oldrev], (repo[newrev],))) if markers: obsolete.createmarkers(repo, markers) # Move the working directory to the new node, if applicable. wdirrev = repo['.'].rev() if wdirrev in revmap: hg.updaterepo(repo, repo[revmap[wdirrev]].node(), True) # The active bookmark is tracked by its symbolic name, not its # changeset. Since we didn't do anything that should change the # active bookmark, we shouldn't need to adjust it. if activebookmark(repo) != oldactivebookmark: raise util.Abort('active bookmark changed; ' 'this should not occur!', hint='please file a bug') tr.close() # Unless obsolescence is enabled, strip any obsolete changesets. if not obsenabled: stripnodes = [] for oldrev, newrev in revmap.items(): if repo[oldrev] != repo[newrev]: stripnodes.append(repo[oldrev].node()) if stripnodes: repair.strip(repo.ui, repo, stripnodes, topic=backuptopic) finally: if tr: tr.release() lockmod.release(wlock, lock) return nodemap
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history onto another. This can be useful for linearizing local changes relative to a master development tree. If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. """ originalwd = target = None external = nullrev state = {} skipped = set() lock = wlock = None try: lock = repo.lock() wlock = repo.wlock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) extrafn = opts.get('extrafn') keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) if contf or abortf: if contf and abortf: raise error.ParseError('rebase', _('cannot use both abort and continue')) if collapsef: raise error.ParseError( 'rebase', _('cannot use collapse with continue or abort')) if srcf or basef or destf: raise error.ParseError( 'rebase', _('abort and continue do not allow specifying revisions')) (originalwd, target, state, collapsef, keepf, keepbranchesf, external) = restorestatus(repo) if abortf: abort(repo, originalwd, target, state) return else: if srcf and basef: raise error.ParseError( 'rebase', _('cannot specify both a ' 'revision and a base')) cmdutil.bail_if_changed(repo) result = buildstate(repo, destf, srcf, basef, collapsef) if result: originalwd, target, state, external = result else: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return if keepbranchesf: if extrafn: raise error.ParseError( 'rebase', _('cannot use both keepbranches and extrafn')) def extrafn(ctx, extra): extra['branch'] = ctx.branch() # Rebase targetancestors = list(repo.changelog.ancestors(target)) targetancestors.append(target) for rev in sorted(state): if state[rev] == -1: storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external) rebasenode(repo, rev, target, state, skipped, targetancestors, collapsef, extrafn) ui.note(_('rebase merging completed\n')) if collapsef: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) concludenode(repo, rev, p1, external, state, collapsef, last=True, skipped=skipped, extrafn=extrafn) if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if not keepf: # Remove no more useful revisions if set(repo.changelog.descendants(min(state))) - set(state): ui.warn( _("warning: new changesets detected on source branch, " "not stripping\n")) else: repair.strip(ui, repo, repo[min(state)].node(), "strip") clearstatus(repo) ui.status(_("rebase completed\n")) if os.path.exists(repo.sjoin('undo')): util.unlink(repo.sjoin('undo')) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) finally: release(lock, wlock)
class WorkSpace(object): def __init__(self, repository): self.repo = repository self.ui = self.repo.ui self.name = self.repo.root self.activecache = {} def parent(self, spec=None): '''Return the canonical workspace parent, either SPEC (which will be expanded) if provided or the default parent otherwise.''' if spec: return self.ui.expandpath(spec) p = self.ui.expandpath('default') if p == 'default': return None else: return p def _localtip(self, outgoing, wctx): '''Return the most representative changeset to act as the localtip. If the working directory is modified (has file changes, is a merge, or has switched branches), this will be a workingctx. If the working directory is unmodified, this will be the most recent (highest revision number) local (outgoing) head on the current branch, if no heads are determined to be outgoing, it will be the most recent head on the current branch. ''' if (wctx.files() or len(wctx.parents()) > 1 or wctx.branch() != wctx.parents()[0].branch()): return wctx heads = self.repo.heads(start=wctx.parents()[0].node()) headctxs = [self.repo.changectx(n) for n in heads] localctxs = [c for c in headctxs if c.node() in outgoing] ltip = sorted(localctxs or headctxs, key=lambda x: x.rev())[-1] if len(heads) > 1: self.ui.warn('The current branch has more than one head, ' 'using %s\n' % ltip.rev()) return ltip def parenttip(self, heads, outgoing): '''Return the highest-numbered, non-outgoing changeset that is an ancestor of a changeset in heads. This returns the most recent changeset on a given branch that is shared between a parent and child workspace, in effect the common ancestor of the chosen local tip and the parent workspace. ''' def tipmost_shared(head, outnodes): '''Return the changeset on the same branch as head that is not in outnodes and is closest to the tip. Walk outgoing changesets from head to the bottom of the workspace (revision 0) and return the the first changeset we see that is not in outnodes. If none is found (all revisions >= 0 are outgoing), the only possible parenttip is the null node (node.nullid) which is returned explicitly. ''' for ctx in self._walkctxs(head, self.repo.changectx(0), follow=True, pick=lambda c: c.node() not in outnodes): return ctx return self.repo.changectx(node.nullid) nodes = set(outgoing) ptips = map(lambda x: tipmost_shared(x, nodes), heads) return sorted(ptips, key=lambda x: x.rev(), reverse=True)[0] def status(self, base='.', head=None, files=None): '''Translate from the hg 6-tuple status format to a hash keyed on change-type''' states = [ 'modified', 'added', 'removed', 'deleted', 'unknown', 'ignored' ] match = self.matcher(files=files) chngs = self.repo.status(base, head, match=match) ret = {} for paths, change in zip(chngs, states): ret.update((f, change) for f in paths) return ret def findoutgoing(self, parent): '''Return the base set of outgoing nodes. A caching wrapper around mercurial.localrepo.findoutgoing(). Complains (to the user), if the parent workspace is non-existent or inaccessible''' self.ui.pushbuffer() try: try: ui = self.ui if hasattr(cmdutil, 'remoteui'): ui = cmdutil.remoteui(ui, {}) pws = hg.repository(ui, parent) if Version.at_least("1.6"): return discovery.findoutgoing(self.repo, pws) else: return self.repo.findoutgoing(pws) except error.RepoError: self.ui.warn("Warning: Parent workspace '%s' is not " "accessible\n" "active list will be incomplete\n\n" % parent) return [] finally: self.ui.popbuffer() findoutgoing = util.cachefunc(findoutgoing) def modified(self): '''Return a list of files modified in the workspace''' wctx = self.workingctx() return sorted(wctx.files() + wctx.deleted()) or None def merged(self): '''Return boolean indicating whether the workspace has an uncommitted merge''' wctx = self.workingctx() return len(wctx.parents()) > 1 def branched(self): '''Return boolean indicating whether the workspace has an uncommitted named branch''' wctx = self.workingctx() return wctx.branch() != wctx.parents()[0].branch() def active(self, parent=None, thorough=False): '''Return an ActiveList describing changes between workspace and parent workspace (including uncommitted changes). If the workspace has no parent, ActiveList will still describe any uncommitted changes. If thorough is True use neither the WorkList nor any cached results (though the result of this call will be cached for future, non-thorough, calls).''' parent = self.parent(parent) # # Use the cached copy if we can (we have one, and weren't # asked to be thorough) # if not thorough and parent in self.activecache: return self.activecache[parent] # # outbases: The set of outgoing nodes with no outgoing ancestors # outnodes: The full set of outgoing nodes # if parent: outbases = self.findoutgoing(parent) outnodes = self.repo.changelog.nodesbetween(outbases)[0] else: # No parent, no outgoing nodes outbases = [] outnodes = [] wctx = self.workingctx(worklist=not thorough) localtip = self._localtip(outnodes, wctx) if localtip.rev() is None: heads = localtip.parents() else: heads = [localtip] parenttip = self.parenttip(heads, outnodes) # # If we couldn't find a parenttip, the two repositories must # be unrelated (Hg catches most of this, but this case is # valid for it but invalid for us) # if parenttip == None: raise util.Abort('repository is unrelated') headnodes = [h.node() for h in heads] ctxs = [ self.repo.changectx(n) for n in self.repo.changelog.nodesbetween(outbases, headnodes)[0] ] if localtip.rev() is None: ctxs.append(localtip) act = ActiveList(self, parenttip, ctxs) self.activecache[parent] = act return act def squishdeltas(self, active, message, user=None): '''Create a single conglomerate changeset based on a given active list. Removes the original changesets comprising the given active list, and any tags pointing to them. Operation: - Commit an activectx object representing the specified active list, - Remove any local tags pointing to changesets in the specified active list. - Remove the changesets comprising the specified active list. - Remove any metadata that may refer to changesets that were removed. Calling code is expected to hold both the working copy lock and repository lock of the destination workspace ''' def strip_local_tags(active): '''Remove any local tags referring to the specified nodes.''' if os.path.exists(self.repo.join('localtags')): fh = None try: fh = self.repo.opener('localtags') tags = active.prune_tags(fh) fh.close() fh = self.repo.opener('localtags', 'w', atomictemp=True) fh.writelines(tags) fh.rename() finally: if fh and not fh.closed: fh.close() if active.files(): for entry in active: # # Work around Mercurial issue #1666, if the source # file of a rename exists in the working copy # Mercurial will complain, and remove the file. # # We preemptively remove the file to avoid the # complaint (the user was asked about this in # cdm_recommit) # if entry.is_renamed(): path = self.repo.wjoin(entry.parentname) if os.path.exists(path): os.unlink(path) self.repo.commitctx(active.context(message, user)) wsstate = "recommitted" destination = self.repo.changelog.tip() else: # # If all we're doing is stripping the old nodes, we want to # update the working copy such that we're not at a revision # that's about to go away. # wsstate = "tip" destination = active.parenttip.node() self.clean(destination) # # Tags were elided by the activectx object. Local tags, # however, must be removed manually. # try: strip_local_tags(active) except EnvironmentError, e: raise util.Abort('Could not recommit tags: %s\n' % e) # Silence all the strip and update fun self.ui.pushbuffer() # # Remove the previous child-local changes by stripping the # nodes that form the base of the ActiveList (removing their # children in the process). # try: try: for base in active.bases(): # # Any cached information about the repository is # likely to be invalid during the strip. The # caching of branch tags is especially # problematic. # self.repo.invalidate() repair.strip(self.ui, self.repo, base.node(), backup=False) except: # # If this fails, it may leave us in a surprising place in # the history. # # We want to warn the user that something went wrong, # and what will happen next, re-raise the exception, and # bring the working copy back into a consistent state # (which the finally block will do) # self.ui.warn("stripping failed, your workspace will have " "superfluous heads.\n" "your workspace has been updated to the " "%s changeset.\n" % wsstate) raise # Re-raise the exception finally: self.clean() self.repo.dirstate.write() # Flush the dirstate self.repo.invalidate() # Invalidate caches # # We need to remove Hg's undo information (used for rollback), # since it refers to data that will probably not exist after # the strip. # if os.path.exists(self.repo.sjoin('undo')): try: os.unlink(self.repo.sjoin('undo')) except EnvironmentError, e: raise util.Abort('failed to remove undo data: %s\n' % e) self.ui.popbuffer()
def do_collapse(ui, repo, first, last, revs, movelog, timedelta, opts): ui.debug(_('Collapsing revisions %s\n') % revs) if opts['debugdelay']: debug_delay = float(opts['debugdelay']) else: debug_delay = False for r in revs: if repo[r].user() != ui.username() and not opts['force']: raise util.Abort( _('revision %s does not belong to %s\n') % (r, ui.username())) if r != last: children = repo[r].children() if len(children) > 1: for c in children: if not c.rev() in revs: raise util.Abort( _('revision %s has child %s not ' 'being collapsed, please rebase\n') % (r, c.rev())) if r != first: parents = repo[r].parents() if len(parents) > 1: for p in parents: if not p.rev() in revs: raise util.Abort( _('revision %s has parent %s not ' 'being collapsed.') % (r, p.rev())) if len(repo[first].parents()) > 1: raise util.Abort( _('start revision %s has multiple parents, ' 'won\'t collapse.') % first) try: cmdutil.bailifchanged(repo) except AttributeError: cmdutil.bail_if_changed(repo) parent = repo[first].parents()[0] tomove = list(repo.changelog.descendants([last])) head_hgtags = get_hgtags_from_heads(ui, repo, last) if '.hgtags' in parent: parent_hgtags = parent['.hgtags'].data() else: parent_hgtags = False movemap = dict.fromkeys(tomove, nullrev) ui.debug(_('will move revisions: %s\n') % tomove) tagsmap = dict() if opts['noop']: ui.status(_('noop: not collapsing\n')) else: origparent = repo['.'].rev() collapsed = None try: branch = repo[last].branch() collapsed = makecollapsed(ui, repo, parent, revs, branch, tagsmap, parent_hgtags, movelog, opts) movemap[max(revs)] = collapsed movedescendants(ui, repo, collapsed, tomove, movemap, tagsmap, parent_hgtags, movelog, debug_delay) fix_hgtags(ui, repo, head_hgtags, tagsmap) except: merge.update(repo, repo[origparent].rev(), False, True, False) if collapsed: repair.strip(ui, repo, collapsed.node(), "strip") raise if not opts['keep']: ui.debug(_('stripping revision %d\n') % first) repair.strip(ui, repo, repo[first].node(), "strip") ui.status(_('collapse completed\n'))
def histedit(ui, repo, *parent, **opts): """hg histedit <parent> """ # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) if mq and mq.applied: raise util.Abort(_('source has mq patches applied')) parent = list(parent) + opts.get('rev', []) if opts.get('outgoing'): if len(parent) > 1: raise util.Abort('only one repo argument allowed with --outgoing') elif parent: parent = parent[0] dest = ui.expandpath(parent or 'default-push', parent or 'default') dest, revs = hg.parseurl(dest, None)[:2] if isinstance(revs, tuple): # hg >= 1.6 revs, checkout = hg.addbranchrevs(repo, repo, revs, None) other = hg.repository(hg.remoteui(repo, opts), dest) # hg >= 1.9 findoutgoing = getattr(discovery, 'findoutgoing', None) if findoutgoing is None: if getattr(discovery, 'outgoing', None) is not None: def findoutgoing(repo, other, force=False): out = discovery.findcommonoutgoing(repo, other, [], force=force) return out.missing[0:1] else: # hg 1.9 and 2.0 def findoutgoing(repo, other, force=False): common, outheads = discovery.findcommonoutgoing( repo, other, [], force=force) return repo.changelog.findmissing(common, outheads)[0:1] else: other = hg.repository(ui, dest) def findoutgoing(repo, other, force=False): return repo.findoutgoing(other, force=force) if revs: revs = [repo.lookup(rev) for rev in revs] ui.status(_('comparing with %s\n') % hidepassword(dest)) parent = findoutgoing(repo, other, force=opts.get('force')) else: if opts.get('force'): raise util.Abort('--force only allowed with --outgoing') if opts.get('continue', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --continue') ( parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, ) = readstate(repo) currentparent, wantnull = repo.dirstate.parents() parentctx = repo[parentctxnode] # discover any nodes the user has added in the interim newchildren = [ c for c in parentctx.children() if c.node() not in existing ] action, currentnode = rules.pop(0) while newchildren: if action in [ 'f', 'fold', ]: tmpnodes.extend([n.node() for n in newchildren]) else: created.extend([n.node() for n in newchildren]) newchildren = filter( lambda x: x.node() not in existing, reduce(lambda x, y: x + y, map(lambda r: r.children(), newchildren))) m, a, r, d = repo.status()[:4] oldctx = repo[currentnode] message = oldctx.description() if action in ('e', 'edit', 'm', 'mess'): message = ui.edit(message, ui.username()) elif action in ( 'f', 'fold', ): message = 'fold-temp-revision %s' % currentnode new = None if m or a or r or d: new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) if action in ('f', 'fold'): if new: tmpnodes.append(new) else: new = newchildren[-1] ( parentctx, created_, replaced_, tmpnodes_, ) = finishfold(ui, repo, parentctx, oldctx, new, opts, newchildren) replaced.extend(replaced_) created.extend(created_) tmpnodes.extend(tmpnodes_) elif action not in ('d', 'drop'): if new != oldctx.node(): replaced.append(oldctx.node()) if new: if new != oldctx.node(): created.append(new) parentctx = repo[new] elif opts.get('abort', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --abort') ( parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, ) = readstate(repo) ui.debug('restore wc to old tip %s\n' % node.hex(tip)) hg.clean(repo, tip) ui.debug('should strip created nodes %s\n' % ', '.join([node.hex(n)[:12] for n in created])) ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for nodes in ( created, tmpnodes, ): for n in reversed(nodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) return else: bailifchanged(repo) if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise util.Abort('history edit already in progress, try ' '--continue or --abort') tip, empty = repo.dirstate.parents() if len(parent) != 1: raise util.Abort('requires exactly one parent revision') parent = _revsingle(repo, parent[0]).node() keep = opts.get('keep', False) revs = between(repo, parent, tip, keep) ctxs = [repo[r] for r in revs] existing = [r.node() for r in ctxs] rules = opts.get('commands', '') if not rules: rules = '\n'.join([makedesc(c) for c in ctxs]) rules += editcomment % ( node.hex(parent)[:12], node.hex(tip)[:12], ) rules = ui.edit(rules, ui.username()) # Save edit rules in .hg/histedit-last-edit.txt in case # the user needs to ask for help after something # surprising happens. f = open(repo.join('histedit-last-edit.txt'), 'w') f.write(rules) f.close() else: f = open(rules) rules = f.read() f.close() rules = [ l for l in (r.strip() for r in rules.splitlines()) if l and not l[0] == '#' ] rules = verifyrules(rules, repo, ctxs) parentctx = repo[parent].parents()[0] keep = opts.get('keep', False) replaced = [] tmpnodes = [] created = [] while rules: writestate(repo, parentctx.node(), created, replaced, tmpnodes, existing, rules, keep, tip) action, ha = rules.pop(0) ( parentctx, created_, replaced_, tmpnodes_, ) = actiontable[action](ui, repo, parentctx, ha, opts) created.extend(created_) replaced.extend(replaced_) tmpnodes.extend(tmpnodes_) hg.update(repo, parentctx.node()) if not keep: ui.debug('should strip replaced nodes %s\n' % ', '.join([node.hex(n)[:12] for n in replaced])) for n in sorted(replaced, lambda x, y: cmp(repo[x].rev(), repo[y].rev())): try: repair.strip(ui, repo, n) except error.LookupError: pass ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for n in reversed(tmpnodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state'))
def collapse(ui, repo, **opts): """collapse multiple revisions into one Collapse combines multiple consecutive changesets into a single changeset, preserving any descendants of the final changeset. The commit messages for the collapsed changesets are concatenated and may be edited before the collapse is completed. """ rng = cmdutil.revrange(repo, opts['rev']) if not rng: raise util.Abort(_('no revisions specified')) first = rng[0] last = rng[-1] revs = inbetween(repo, first, last) if not revs: raise util.Abort(_('revision %s is not an ancestor of revision %s\n') % (first, last)) elif len(revs) == 1: raise util.Abort(_('only one revision specified')) ui.debug(_('Collapsing revisions %s\n') % revs) for r in revs: if repo[r].user() != ui.username() and not opts['force']: raise util.Abort(_('revision %s does not belong to %s\n') % (r, ui.username())) if r != last: children = repo[r].children() if len(children) > 1: for c in children: if not c.rev() in revs: raise util.Abort(_('revision %s has child %s not ' 'being collapsed, please rebase\n') % (r, c.rev())) if r != first: parents = repo[r].parents() if len(parents) > 1: for p in parents: if not p.rev() in revs: raise util.Abort(_('revision %s has parent %s not ' 'being collapsed.') % (r, p.rev())) if len(repo[first].parents()) > 1: raise util.Abort(_('start revision %s has multiple parents, ' 'won\'t collapse.') % first) cmdutil.bail_if_changed(repo) parent = repo[first].parents()[0] tomove = list(repo.changelog.descendants(last)) movemap = dict.fromkeys(tomove, nullrev) ui.debug(_('will move revisions: %s\n') % tomove) origparent = repo['.'].rev() collapsed = None try: branch = repo[last].branch() collapsed = makecollapsed(ui, repo, parent, revs, branch, opts) movemap[max(revs)] = collapsed movedescendants(ui, repo, collapsed, tomove, movemap) except: merge.update(repo, repo[origparent].rev(), False, True, False) if collapsed: repair.strip(ui, repo, collapsed.node(), "strip") raise if not opts['keep']: ui.debug(_('stripping revision %d\n') % first) repair.strip(ui, repo, repo[first].node(), "strip") ui.status(_('collapse completed\n'))
def histedit(ui, repo, *parent, **opts): """hg histedit <parent> """ # TODO only abort if we try and histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) if mq and mq.applied: raise util.Abort(_('source has mq patches applied')) parent = list(parent) + opts.get('rev', []) if opts.get('outgoing'): if len(parent) > 1: raise util.Abort('only one repo argument allowed with --outgoing') elif parent: parent = parent[0] dest = ui.expandpath(parent or 'default-push', parent or 'default') dest, revs = hg.parseurl(dest, None)[:2] if isinstance(revs, tuple): # hg >= 1.6 revs, checkout = hg.addbranchrevs(repo, repo, revs, None) other = hg.repository(hg.remoteui(repo, opts), dest) # hg >= 1.9 findoutgoing = getattr(discovery, 'findoutgoing', None) if findoutgoing is None: if getattr(discovery, 'outgoing', None) is not None: def findoutgoing(repo, other, force=False): out = discovery.findcommonoutgoing(repo, other, [], force=force) return out.missing[0:1] else: # hg 1.9 and 2.0 def findoutgoing(repo, other, force=False): common, outheads = discovery.findcommonoutgoing( repo, other, [], force=force) return repo.changelog.findmissing(common, outheads)[0:1] else: other = hg.repository(ui, dest) def findoutgoing(repo, other, force=False): return repo.findoutgoing(other, force=force) if revs: revs = [repo.lookup(rev) for rev in revs] ui.status(_('comparing with %s\n') % hidepassword(dest)) parent = findoutgoing(repo, other, force=opts.get('force')) else: if opts.get('force'): raise util.Abort('--force only allowed with --outgoing') if opts.get('continue', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --continue') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, replacemap) = readstate(repo) currentparent, wantnull = repo.dirstate.parents() parentctx = repo[parentctxnode] # discover any nodes the user has added in the interim newchildren = [ c for c in parentctx.children() if c.node() not in existing ] action, currentnode = rules.pop(0) while newchildren: if action in [ 'f', 'fold', ]: tmpnodes.extend([n.node() for n in newchildren]) else: created.extend([n.node() for n in newchildren]) newchildren = filter( lambda x: x.node() not in existing, reduce(lambda x, y: x + y, map(lambda r: r.children(), newchildren))) m, a, r, d = repo.status()[:4] oldctx = repo[currentnode] message = oldctx.description() if action in ('e', 'edit', 'm', 'mess'): message = ui.edit(message, ui.username()) elif action in ( 'f', 'fold', ): message = 'fold-temp-revision %s' % currentnode new = None if m or a or r or d: new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) if action in ('f', 'fold'): if new: tmpnodes.append(new) else: new = newchildren[-1] ( parentctx, created_, replaced_, tmpnodes_, ) = finishfold(ui, repo, parentctx, oldctx, new, opts, newchildren) replaced.extend(replaced_) created.extend(created_) tmpnodes.extend(tmpnodes_) elif action not in ('d', 'drop'): if new != oldctx.node(): replaced.append(oldctx.node()) if new: if new != oldctx.node(): created.append(new) parentctx = repo[new] elif opts.get('abort', False): if len(parent) != 0: raise util.Abort('no arguments allowed with --abort') (parentctxnode, created, replaced, tmpnodes, existing, rules, keep, tip, replacemap) = readstate(repo) ui.debug('restore wc to old tip %s\n' % node.hex(tip)) hg.clean(repo, tip) ui.debug('should strip created nodes %s\n' % ', '.join([node.hex(n)[:12] for n in created])) ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for nodes in ( created, tmpnodes, ): for n in reversed(nodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) return else: bailifchanged(repo) if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise util.Abort('history edit already in progress, try ' '--continue or --abort') tip, empty = repo.dirstate.parents() if len(parent) != 1: raise util.Abort('requires exactly one parent revision') parent = _revsingle(repo, parent[0]).node() keep = opts.get('keep', False) revs = between(repo, parent, tip, keep) ctxs = [repo[r] for r in revs] existing = [r.node() for r in ctxs] rules = opts.get('commands', '') if not rules: rules = '\n'.join([makedesc(c) for c in ctxs]) rules += editcomment % ( node.hex(parent)[:12], node.hex(tip)[:12], ) rules = ui.edit(rules, ui.username()) # Save edit rules in .hg/histedit-last-edit.txt in case # the user needs to ask for help after something # surprising happens. f = open(repo.join('histedit-last-edit.txt'), 'w') f.write(rules) f.close() else: f = open(rules) rules = f.read() f.close() rules = [ l for l in (r.strip() for r in rules.splitlines()) if l and not l[0] == '#' ] rules = verifyrules(rules, repo, ctxs) parentctx = repo[parent].parents()[0] keep = opts.get('keep', False) replaced = [] replacemap = {} tmpnodes = [] created = [] while rules: writestate(repo, parentctx.node(), created, replaced, tmpnodes, existing, rules, keep, tip, replacemap) action, ha = rules.pop(0) ( parentctx, created_, replaced_, tmpnodes_, ) = actiontable[action](ui, repo, parentctx, ha, opts) hexshort = lambda x: node.hex(x)[:12] if replaced_: clen, rlen = len(created_), len(replaced_) if clen == rlen == 1: ui.debug('histedit: exact replacement of %s with %s\n' % (hexshort(replaced_[0]), hexshort(created_[0]))) replacemap[replaced_[0]] = created_[0] elif clen > rlen: assert rlen == 1, ('unexpected replacement of ' '%d changes with %d changes' % (rlen, clen)) # made more changesets than we're replacing # TODO synthesize patch names for created patches replacemap[replaced_[0]] = created_[-1] ui.debug('histedit: created many, assuming %s replaced by %s' % (hexshort(replaced_[0]), hexshort(created_[-1]))) elif rlen > clen: if not created_: # This must be a drop. Try and put our metadata on # the parent change. assert rlen == 1 r = replaced_[0] ui.debug('histedit: %s seems replaced with nothing, ' 'finding a parent\n' % (hexshort(r))) pctx = repo[r].parents()[0] if pctx.node() in replacemap: ui.debug('histedit: parent is already replaced\n') replacemap[r] = replacemap[pctx.node()] else: replacemap[r] = pctx.node() ui.debug('histedit: %s best replaced by %s\n' % (hexshort(r), hexshort(replacemap[r]))) else: assert len(created_) == 1 for r in replaced_: ui.debug('histedit: %s replaced by %s\n' % (hexshort(r), hexshort(created_[0]))) replacemap[r] = created_[0] else: assert False, ('Unhandled case in replacement mapping! ' 'replacing %d changes with %d changes' % (rlen, clen)) created.extend(created_) replaced.extend(replaced_) tmpnodes.extend(tmpnodes_) hg.update(repo, parentctx.node()) if not keep: if replacemap: ui.note('histedit: Should update metadata for the following ' 'changes:\n') for old, new in replacemap.iteritems(): if old in tmpnodes or old in created: # can't have any metadata we'd want to update continue while new in replacemap: new = replacemap[new] ui.note('histedit: %s to %s\n' % (hexshort(old), hexshort(new))) octx = repo[old] if bookmarks is not None: marks = octx.bookmarks() if marks: ui.note('histedit: moving bookmarks %s\n' % ', '.join(marks)) for mark in marks: repo._bookmarks[mark] = new bookmarks.write(repo) # TODO update mq state ui.debug('should strip replaced nodes %s\n' % ', '.join([node.hex(n)[:12] for n in replaced])) for n in sorted(replaced, key=lambda x: repo[x].rev()): try: repair.strip(ui, repo, n) except error.LookupError: pass ui.debug('should strip temp nodes %s\n' % ', '.join([node.hex(n)[:12] for n in tmpnodes])) for n in reversed(tmpnodes): try: repair.strip(ui, repo, n) except error.LookupError: pass os.unlink(os.path.join(repo.path, 'histedit-state')) if os.path.exists(repo.sjoin('undo')): os.unlink(repo.sjoin('undo'))
def unshelvecontinue(ui, repo, state, opts): """subcommand to continue an in-progress unshelve""" # We're finishing off a merge. First parent is our original # parent, second is the temporary "fake" commit we're unshelving. with repo.lock(): checkparents(repo, state) ms = merge.mergestate.read(repo) if list(ms.unresolved()): raise error.Abort( _("unresolved conflicts, can't continue"), hint=_("see 'hg resolve', then 'hg unshelve --continue'")) shelvectx = repo[state.parents[1]] pendingctx = state.pendingctx with repo.dirstate.parentchange(): repo.setparents(state.pendingctx.node(), nodemod.nullid) repo.dirstate.write(repo.currenttransaction()) targetphase = phases.internal if not phases.supportinternal(repo): targetphase = phases.secret overrides = {('phases', 'new-commit'): targetphase} with repo.ui.configoverride(overrides, 'unshelve'): with repo.dirstate.parentchange(): repo.setparents(state.parents[0], nodemod.nullid) newnode = repo.commit(text=shelvectx.description(), extra=shelvectx.extra(), user=shelvectx.user(), date=shelvectx.date()) if newnode is None: # If it ended up being a no-op commit, then the normal # merge state clean-up path doesn't happen, so do it # here. Fix issue5494 merge.mergestate.clean(repo) shelvectx = state.pendingctx msg = _('note: unshelved changes already existed ' 'in the working copy\n') ui.status(msg) else: # only strip the shelvectx if we produced one state.nodestoremove.append(newnode) shelvectx = repo[newnode] hg.updaterepo(repo, pendingctx.node(), overwrite=False) if repo.vfs.exists('unshelverebasestate'): repo.vfs.rename('unshelverebasestate', 'rebasestate') rebase.clearstatus(repo) mergefiles(ui, repo, state.wctx, shelvectx) restorebranch(ui, repo, state.branchtorestore) if not phases.supportinternal(repo): repair.strip(ui, repo, state.nodestoremove, backup=False, topic='shelve') _restoreactivebookmark(repo, state.activebookmark) shelvedstate.clear(repo) unshelvecleanup(ui, repo, state.name, opts) ui.status(_("unshelve of '%s' complete\n") % state.name)
def _narrow( ui, repo, remote, commoninc, oldincludes, oldexcludes, newincludes, newexcludes, force, ): oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes) newmatch = narrowspec.match(repo.root, newincludes, newexcludes) # This is essentially doing "hg outgoing" to find all local-only # commits. We will then check that the local-only commits don't # have any changes to files that will be untracked. unfi = repo.unfiltered() outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc) ui.status(_(b'looking for local changes to affected paths\n')) localnodes = [] for n in itertools.chain(outgoing.missing, outgoing.excluded): if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()): localnodes.append(n) revstostrip = unfi.revs(b'descendants(%ln)', localnodes) hiddenrevs = repoview.filterrevs(repo, b'visible') visibletostrip = list( repo.changelog.node(r) for r in (revstostrip - hiddenrevs)) if visibletostrip: ui.status( _(b'The following changeset(s) or their ancestors have ' b'local changes not on the remote:\n')) maxnodes = 10 if ui.verbose or len(visibletostrip) <= maxnodes: for n in visibletostrip: ui.status(b'%s\n' % node.short(n)) else: for n in visibletostrip[:maxnodes]: ui.status(b'%s\n' % node.short(n)) ui.status( _(b'...and %d more, use --verbose to list all\n') % (len(visibletostrip) - maxnodes)) if not force: raise error.Abort( _(b'local changes found'), hint=_(b'use --force-delete-local-changes to ignore'), ) with ui.uninterruptible(): if revstostrip: tostrip = [unfi.changelog.node(r) for r in revstostrip] if repo[b'.'].node() in tostrip: # stripping working copy, so move to a different commit first urev = max( repo.revs( b'(::%n) - %ln + null', repo[b'.'].node(), visibletostrip, )) hg.clean(repo, urev) overrides = {(b'devel', b'strip-obsmarkers'): False} with ui.configoverride(overrides, b'narrow'): repair.strip(ui, unfi, tostrip, topic=b'narrow') todelete = [] for f, f2, size in repo.store.datafiles(): if f.startswith(b'data/'): file = f[5:-2] if not newmatch(file): todelete.append(f) elif f.startswith(b'meta/'): dir = f[5:-13] dirs = sorted(util.dirs({dir})) + [dir] include = True for d in dirs: visit = newmatch.visitdir(d) if not visit: include = False break if visit == b'all': break if not include: todelete.append(f) repo.destroying() with repo.transaction(b'narrowing'): # Update narrowspec before removing revlogs, so repo won't be # corrupt in case of crash repo.setnarrowpats(newincludes, newexcludes) for f in todelete: ui.status(_(b'deleting %s\n') % f) util.unlinkpath(repo.svfs.join(f)) repo.store.markremoved(f) narrowspec.updateworkingcopy(repo, assumeclean=True) narrowspec.copytoworkingcopy(repo) repo.destroyed()
class WorkSpace(object): def __init__(self, repository): self.repo = repository self.ui = self.repo.ui self.name = self.repo.root parent = self.repo.ui.expandpath('default') if parent == 'default': parent = None self.parentrepo = parent self.activecache = {} self.outgoingcache = {} def parent(self, spec=None): '''Return canonical workspace parent, either SPEC if passed, or default parent otherwise''' return spec or self.parentrepo def _localtip(self, bases, heads): '''Return a tuple (changectx, workingctx) representing the most representative head to act as the local tip. If the working directory is modified, the changectx is its tipmost local parent (or tipmost parent, if neither is local), and the workingctx is non-null. If the working directory is clean, the workingctx is null. The changectx is the tip-most local head on the current branch. If this can't be determined for some reason (e.g., the parent repo is inacessible), changectx is the tip-most head on the current branch. If the workingctx is non-null it is the actual local tip (and would be the local tip in any generated ActiveList, for instance), the better parent revision is returned also to aid callers needing a real changeset to act as a surrogate for an uncommitted change.''' def tipmost_of(nodes): return sorted(nodes, cmp=lambda x, y: cmp(x.rev(), y.rev()))[-1] # # We need a full set of outgoing nodes such that we can limit # local branch heads to those which are outgoing # outnodes = self.repo.changelog.nodesbetween(bases, heads)[0] wctx = self.repo.workingctx() # # A modified working context is seen as a proto-branch, where # the 'heads' from our view are the parent revisions of that # context. # (and the working head is it) # if (wctx.files() or len(wctx.parents()) > 1 or wctx.branch() != wctx.parents()[0].branch()): heads = wctx.parents() else: heads = [self.repo.changectx(n) for n in heads] wctx = None localchoices = [n for n in heads if n.node() in outnodes] return (tipmost_of(localchoices or heads), wctx) def _parenttip(self, localtip, parent=None): '''Find the closest approximation of the parents tip, as best as we can. In parent-less workspaces returns our tip (given the best we can do is deal with uncommitted changes)''' def tipmost_shared(head, outnodes): '''Return the tipmost node on the same branch as head that is not in outnodes. We walk from head to the bottom of the workspace (revision 0) collecting nodes not in outnodes during the add phase and return the first node we see in the iter phase that was previously collected. See the docstring of mercurial.cmdutil.walkchangerevs() for the phased approach to the iterator returned. The important part to note is that the 'add' phase gathers nodes, which the 'iter' phase then iterates through.''' get = util.cachefunc(lambda r: self.repo.changectx(r).changeset()) changeiter = cmdutil.walkchangerevs(self.repo.ui, self.repo, [], get, {'rev': ['%s:0' % head], 'follow': True})[0] seen = [] for st, rev, fns in changeiter: n = self.repo.changelog.node(rev) if st == 'add': if n not in outnodes: seen.append(n) elif st == 'iter': if n in seen: return rev return None tipctx, wctx = localtip parent = self.parent(parent) outgoing = None if parent: outgoing = self.findoutgoing(parent) if wctx: possible_branches = wctx.parents() else: possible_branches = [tipctx] nodes = self.repo.changelog.nodesbetween(outgoing)[0] ptips = map(lambda x: tipmost_shared(x.rev(), nodes), possible_branches) return self.repo.changectx(sorted(ptips)[-1]) def status(self, base=None, head=None): '''Translate from the hg 6-tuple status format to a hash keyed on change-type''' states = ['modified', 'added', 'removed', 'deleted', 'unknown', 'ignored'] chngs = self.repo.status(base, head) return dict(zip(states, chngs)) # # Cache findoutgoing results # def findoutgoing(self, parent): ret = [] if parent in self.outgoingcache: ret = self.outgoingcache[parent] else: self.ui.pushbuffer() try: pws = hg.repository(self.ui, parent) ret = self.repo.findoutgoing(pws) except repo.RepoError: self.ui.warn( "Warning: Parent workspace %s is not accessible\n" % parent) self.ui.warn("active list will be incomplete\n\n") self.outgoingcache[parent] = ret self.ui.popbuffer() return ret def modified(self): '''Return a list of files modified in the workspace''' wctx = self.repo.workingctx() return sorted(wctx.files() + wctx.deleted()) or None def merged(self): '''Return boolean indicating whether the workspace has an uncommitted merge''' wctx = self.repo.workingctx() return len(wctx.parents()) > 1 def branched(self): '''Return boolean indicating whether the workspace has an uncommitted named branch''' wctx = self.repo.workingctx() return wctx.branch() != wctx.parents()[0].branch() def active(self, parent=None): '''Return an ActiveList describing changes between workspace and parent workspace (including uncommitted changes). If workspace has no parent ActiveList will still describe any uncommitted changes''' parent = self.parent(parent) if parent in self.activecache: return self.activecache[parent] if parent: outgoing = self.findoutgoing(parent) else: outgoing = [] # No parent, no outgoing nodes branchheads = self.repo.heads(start=self.repo.dirstate.parents()[0]) ourhead, workinghead = self._localtip(outgoing, branchheads) if len(branchheads) > 1: self.ui.warn('The current branch has more than one head, ' 'using %s\n' % ourhead.rev()) if workinghead: parents = workinghead.parents() ctxs = [self.repo.changectx(n) for n in self.repo.changelog.nodesbetween(outgoing, [h.node() for h in parents])[0]] ctxs.append(workinghead) else: ctxs = [self.repo.changectx(n) for n in self.repo.changelog.nodesbetween(outgoing, [ourhead.node()])[0]] act = ActiveList(self, self._parenttip((ourhead, workinghead), parent), ctxs) self.activecache[parent] = act return act def pdiff(self, pats, opts, parent=None): 'Return diffs relative to PARENT, as best as we can make out' parent = self.parent(parent) act = self.active(parent) # # act.localtip maybe nil, in the case of uncommitted local # changes. # if not act.revs: return names, match = cmdutil.matchpats(self.repo, pats, opts)[:2] opts = patch.diffopts(self.ui, opts) ret = cStringIO.StringIO() patch.diff(self.repo, act.parenttip.node(), act.localtip.node(), names, fp=ret, opts=opts, match=match) return ret.getvalue() # # Theory: # # We wish to go from a single series of consecutive changesets # (possibly including merges with the parent) to a single # changeset faithfully representing contents and copy history. # # We achieve this in a somewhat confusing fashion. # # - Sanity check the workspace # - Update the workspace to tip # - Enter into the dirstate the sum total of file contents in the # to-be-squished changesets # - Commit this in-progress change (which has no changes at all, # in reality) On top of the effective parent tip. # - Strip the child-local branch(es) (see ActiveList.bases()) # def squishdeltas(self, active, message, user=None): '''Create a single conglomerate changeset, with log message MESSAGE containing the changes from ACTIVE. USER, if set, is used as the author name. The old changes are removed.''' def strip_tags(nodes): '''Remove any tags referring to the specified nodes.''' if os.path.exists(self.repo.join('localtags')): fh = self.repo.opener('localtags').readlines() tags = [t for t in fh if t.split(' ')[0] not in nodes] fh = self.repo.opener('localtags', 'w', atomictemp=True) fh.writelines(tags) fh.rename() if os.path.exists(self.repo.wjoin('.hgtags')): fh = self.repo.wopener('.hgtags', 'rb').readlines() tags = [t for t in fh if t.split(' ')[0] not in nodes] fh = self.repo.wopener('.hgtags', 'wb', atomictemp=True) fh.writelines(tags) fh.rename() wlock = self.repo.wlock() lock = self.repo.lock() # # The files involved need to be present in the workspace and # not otherwise molested, rather than the workspace not being # modified we also need to prevent files being deleted (but # left versioned) too. # # The easiest way to achieve this is to update the working # copy to tip. # self.clean() try: strip_tags([node.hex(ctx.node()) for ctx in active.revs]) except EnvironmentError, e: raise util.Abort('Could not recommit tags: %s\n' % e) # # For copied files, we need to enter the copy into the # dirstate before we force the commit such that the # file logs of both branches (old and new) contain # representation of the copy. # parentman = active.parenttip.manifest() for entry in active: if not entry.is_renamed() and not entry.is_copied(): continue assert entry.parentname in parentman, \ ("parentname '%s' (of '%s') not in parent" % (entry.parentname, entry.name)) # # If the source file exists, and used to be versioned # this will cause this to become a true copy # (re-introducing the source file) # # We bandaid this, by removing the source file in this # case. If we're here, the user has already agreed to this # from above. # if (entry.is_renamed() and os.path.exists(self.repo.wjoin(entry.parentname))): os.unlink(self.repo.wjoin(entry.parentname)) self.repo.copy(entry.parentname, entry.name) if active.files(): extra = {'branch': active.localtip.branch()} self.repo.commit(files=active.files(), text=message, user=user, p1=active.parenttip.node(), p2=None, extra=extra) wsstate = "recommitted changeset" self.clean() else: # # If all we're doing is stripping the old nodes, we want to # update the working copy such that we're not at a revision # that's about to go away. # wsstate = "tip changeset" self.clean(rev=active.parenttip.node()) # Silence all the strip and update fun self.ui.pushbuffer() # # We must strip away the old representation of the child # branch(es). This may involve stripping a theoretically # large number of branches in certain cases # bases = active.bases() try: try: for basenode in bases: repair.strip(self.ui, self.repo, basenode, backup=False) except: # # If this fails, it may leave us in a surprising place in # the history. # # We want to warn the user that something went wrong, # and what will happen next, re-raise the exception, and # bring the working copy back into a consistent state # (which the finally block will do) # self.ui.warn("stripping failed, your workspace will have " "superfluous heads.\n" "your workspace has been updated to the " "%s.\n" % wsstate) raise # Re-raise the exception finally: # # We need to remove Hg's undo information (used for rollback), # since it refers to data that will probably not exist after # the strip. # self.clean() self.repo.dirstate.write() # Flush the dirstate self.repo.invalidate() # Invalidate caches if os.path.exists(self.repo.sjoin('undo')): try: os.unlink(self.repo.sjoin('undo')) except EnvironmentError, e: raise util.Abort('failed to remove undo data: %s\n' % e) self.ui.popbuffer()
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history (the source) onto another (the destination). This can be useful for linearizing *local* changes relative to a master development tree. You should not rebase changesets that have already been shared with others. Doing so will force everybody else to perform the same rebase or they will end up with duplicated changesets after pulling in your rebased changesets. If you don't specify a destination changeset (``-d/--dest``), rebase uses the tipmost head of the current named branch as the destination. (The destination changeset is not modified by rebasing, but new changesets are added as its descendants.) You can specify which changesets to rebase in two ways: as a "source" changeset or as a "base" changeset. Both are shorthand for a topologically related set of changesets (the "source branch"). If you specify source (``-s/--source``), rebase will rebase that changeset and all of its descendants onto dest. If you specify base (``-b/--base``), rebase will select ancestors of base back to but not including the common ancestor with dest. Thus, ``-b`` is less precise but more convenient than ``-s``: you can specify any changeset in the source branch, and rebase will select the whole branch. If you specify neither ``-s`` nor ``-b``, rebase uses the parent of the working directory as the base. By default, rebase recreates the changesets in the source branch as descendants of dest and then destroys the originals. Use ``--keep`` to preserve the original source changesets. Some changesets in the source branch (e.g. merges from the destination branch) may be dropped if they no longer contribute any change. One result of the rules for selecting the destination changeset and source branch is that, unlike ``merge``, rebase will do nothing if you are at the latest (tipmost) head of a named branch with two heads. You need to explicitly specify source and/or destination (or ``update`` to the other head, if it's the head of the intended source branch). If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. Returns 0 on success, 1 if nothing to rebase. """ originalwd = target = None external = nullrev state = {} skipped = set() targetancestors = set() lock = wlock = None try: lock = repo.lock() wlock = repo.wlock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) extrafn = opts.get('extrafn') keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) detachf = opts.get('detach', False) # keepopen is not meant for use on the command line, but by # other extensions keepopen = opts.get('keepopen', False) if contf or abortf: if contf and abortf: raise util.Abort(_('cannot use both abort and continue')) if collapsef: raise util.Abort( _('cannot use collapse with continue or abort')) if detachf: raise util.Abort(_('cannot use detach with continue or abort')) if srcf or basef or destf: raise util.Abort( _('abort and continue do not allow specifying revisions')) (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external) = restorestatus(repo) if abortf: return abort(repo, originalwd, target, state) else: if srcf and basef: raise util.Abort( _('cannot specify both a ' 'revision and a base')) if detachf: if not srcf: raise util.Abort( _('detach requires a revision to be specified')) if basef: raise util.Abort(_('cannot specify a base with detach')) cmdutil.bail_if_changed(repo) result = buildstate(repo, destf, srcf, basef, detachf) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return 1 else: originalwd, target, state = result if collapsef: targetancestors = set(repo.changelog.ancestors(target)) external = checkexternal(repo, state, targetancestors) if keepbranchesf: if extrafn: raise util.Abort(_('cannot use both keepbranches and extrafn')) def extrafn(ctx, extra): extra['branch'] = ctx.branch() # Rebase if not targetancestors: targetancestors = set(repo.changelog.ancestors(target)) targetancestors.add(target) sortedstate = sorted(state) total = len(sortedstate) pos = 0 for rev in sortedstate: pos += 1 if state[rev] == -1: ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])), _('changesets'), total) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external) p1, p2 = defineparents(repo, rev, target, state, targetancestors) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: stats = rebasenode(repo, rev, p1, p2, state) if stats and stats[3] > 0: raise util.Abort( _('unresolved conflicts (see hg ' 'resolve, then hg rebase --continue)')) updatedirstate(repo, rev, target, p2) if not collapsef: newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn) else: # Skip commit if we are collapsing repo.dirstate.setparents(repo[p1].node()) newrev = None # Update the state if newrev is not None: state[rev] = repo[newrev].rev() else: if not collapsef: ui.note(_('no changes, revision %d skipped\n') % rev) ui.debug('next revision set to %s\n' % p1) skipped.add(rev) state[rev] = p1 ui.progress(_('rebasing'), None) ui.note(_('rebase merging completed\n')) if collapsef and not keepopen: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] != nullmerge: commitmsg += '\n* %s' % repo[rebased].description() commitmsg = ui.edit(commitmsg, repo.ui.username()) newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn) if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if not keepf: # Remove no more useful revisions rebased = [rev for rev in state if state[rev] != nullmerge] if rebased: if set(repo.changelog.descendants(min(rebased))) - set(state): ui.warn( _("warning: new changesets detected " "on source branch, not stripping\n")) else: # backup the old csets by default repair.strip(ui, repo, repo[min(rebased)].node(), "all") clearstatus(repo) ui.note(_("rebase completed\n")) if os.path.exists(repo.sjoin('undo')): util.unlinkpath(repo.sjoin('undo')) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) finally: release(lock, wlock)
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history onto another. This can be useful for linearizing local changes relative to a master development tree. If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. """ originalwd = target = None external = nullrev state = {} skipped = set() lock = wlock = None try: lock = repo.lock() wlock = repo.wlock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) extrafn = opts.get('extrafn') keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) if contf or abortf: if contf and abortf: raise error.ParseError('rebase', _('cannot use both abort and continue')) if collapsef: raise error.ParseError( 'rebase', _('cannot use collapse with continue or abort')) if srcf or basef or destf: raise error.ParseError('rebase', _('abort and continue do not allow specifying revisions')) (originalwd, target, state, collapsef, keepf, keepbranchesf, external) = restorestatus(repo) if abortf: abort(repo, originalwd, target, state) return else: if srcf and basef: raise error.ParseError('rebase', _('cannot specify both a ' 'revision and a base')) cmdutil.bail_if_changed(repo) result = buildstate(repo, destf, srcf, basef, collapsef) if result: originalwd, target, state, external = result else: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return if keepbranchesf: if extrafn: raise error.ParseError( 'rebase', _('cannot use both keepbranches and extrafn')) def extrafn(ctx, extra): extra['branch'] = ctx.branch() # Rebase targetancestors = list(repo.changelog.ancestors(target)) targetancestors.append(target) for rev in sorted(state): if state[rev] == -1: storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external) rebasenode(repo, rev, target, state, skipped, targetancestors, collapsef, extrafn) ui.note(_('rebase merging completed\n')) if collapsef: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) concludenode(repo, rev, p1, external, state, collapsef, last=True, skipped=skipped, extrafn=extrafn) if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if not keepf: # Remove no more useful revisions if set(repo.changelog.descendants(min(state))) - set(state): ui.warn(_("warning: new changesets detected on source branch, " "not stripping\n")) else: repair.strip(ui, repo, repo[min(state)].node(), "strip") clearstatus(repo) ui.status(_("rebase completed\n")) if os.path.exists(repo.sjoin('undo')): util.unlink(repo.sjoin('undo')) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) finally: release(lock, wlock)
def replacechangesets(repo, oldnodes, createfn, backuptopic='replacing'): """Replace changesets with new versions. This is a generic function used to perform history rewriting. Given an iterable of input nodes, a function will be called which is expected to produce a new changeset to replace the input node. The function signature should be: def createfn(repo, ctx, revmap, copyfilectxfn): It is passed a repo, the changectx being rewritten, a map of old to new revisions that have been changed so far, and a function that can be used as the memctx callback for obtaining memfilectx when no file modifications are to be performed (a common pattern). The function should return an *uncommitted* memctx holding the new changeset info. We currently restrict that the createfn callback must return a new changeset and that no file changes may occur. Restricting file changes satisfies the requirements this function was invented for and keeps the implementation simple. After the memctx is obtained, it is committed. Children changesets are rebased automatically after all changesets have been rewritten. After the old to new mapping is obtained, bookmarks are moved and old changesets are made obsolete or stripped, depending on what is appropriate for the repo configuration. This function handles locking the repository and performing as many actions in a transaction as possible. Before any changes are made, we verify the state of the repo is sufficient for transformation to occur and abort otherwise. """ if not oldnodes: return {} repo = repo.unfiltered() # Validate function called properly. for node in oldnodes: if len(node) != 20: raise util.Abort('replacechangesets expects 20 byte nodes') uoldrevs = [repo[node].rev() for node in oldnodes] oldrevs = sorted(uoldrevs) if oldrevs != uoldrevs: raise util.Abort('must pass oldnodes in changelog order') # We may perform stripping and stripping inside a nested transaction # is a recipe for disaster. # currenttransaction was added in 3.3. Copy the implementation until we # drop 3.2 compatibility. if hasattr(repo, 'currenttransaction'): intrans = repo.currenttransaction() else: if repo._transref and repo._transref().running(): intrans = True else: intrans = False if intrans: raise util.Abort('cannot call replacechangesets when a transaction ' 'is active') # The revisions impacted by the current operation. This is essentially # all non-hidden children. We don't operate on hidden changesets because # there is no point - they are hidden and deemed not important. impactedrevs = list(repo.filtered('visible').revs('%ld::', oldrevs)) # If we'll need to update the working directory, don't do anything if there # are uncommitted changes, as this could cause a giant mess (merge # conflicts, etc). Note the comparison against impacted revs, as children # of rewritten changesets will be rebased below. dirstaterev = repo[repo.dirstate.p1()].rev() if dirstaterev in impactedrevs: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) obsenabled = False if hasattr(obsolete, 'isenabled'): obsenabled = obsolete.isenabled(repo, 'createmarkers') else: obsenabled = obsolete._enabled def adjustphase(repo, tr, phase, node): # transaction argument added in Mercurial 3.2. try: phases.advanceboundary(repo, tr, phase, [node]) phases.retractboundary(repo, tr, phase, [node]) except TypeError: phases.advanceboundary(repo, phase, [node]) phases.retractboundary(repo, phase, [node]) nodemap = {} wlock, lock, tr = None, None, None try: wlock = repo.wlock() lock = repo.lock() tr = repo.transaction('replacechangesets') # Create the new changesets. revmap = OrderedDict() for oldnode in oldnodes: oldctx = repo[oldnode] # Copy revmap out of paranoia. newctx = createfn(repo, oldctx, dict(revmap), preservefilectx(oldctx)) if not isinstance(newctx, context.memctx): raise util.Abort('createfn must return a context.memctx') if oldctx == newctx: raise util.Abort('createfn must create a new changeset') newnode = newctx.commit() # Needed so .manifestnode() works, which memctx doesn't have. newctx = repo[newnode] # This makes the implementation significantly simpler as we don't # need to worry about merges when we do auto rebasing later. if oldctx.manifestnode() != newctx.manifestnode(): raise util.Abort('we do not allow replacements to modify files') revmap[oldctx.rev()] = newctx.rev() nodemap[oldnode] = newnode # Do phase adjustment ourselves because we want callbacks to be as # dumb as possible. adjustphase(repo, tr, oldctx.phase(), newctx.node()) # Children of rewritten changesets are impacted as well. Rebase as # needed. for rev in impactedrevs: # It was handled by createfn() or by this loop already. if rev in revmap: continue oldctx = repo[rev] if oldctx.p1().rev() not in revmap: raise util.Abort('unknown parent of child commit: %s' % oldctx.hex(), hint='please report this as a bug') parents = newparents(repo, oldctx, revmap) mctx = context.memctx(repo, parents, oldctx.description(), oldctx.files(), preservefilectx(oldctx), user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra()) status = oldctx.p1().status(oldctx) mctx.modified = lambda: status[0] mctx.added = lambda: status[1] mctx.removed = lambda: status[2] newnode = mctx.commit() revmap[rev] = repo[newnode].rev() nodemap[oldctx.node()] = newnode # Retain phase. adjustphase(repo, tr, oldctx.phase(), newnode) ph = repo.ui.config('phases', 'new-commit') try: repo.ui.setconfig('phases', 'new-commit', oldctx.phase(), 'rewriting') newnode = mctx.commit() revmap[rev] = repo[newnode].rev() finally: repo.ui.setconfig('phases', 'new-commit', ph) # Move bookmarks to new nodes. bmchanges = [] oldactivebookmark = activebookmark(repo) for oldrev, newrev in revmap.items(): oldnode = repo[oldrev].node() for mark, bmnode in repo._bookmarks.items(): if bmnode == oldnode: bmchanges.append((mark, repo[newrev].node())) for mark, newnode in bmchanges: repo._bookmarks[mark] = newnode if bmchanges: repo._bookmarks.recordchange(tr) # Update references to rewritten MQ patches. if hasattr(repo, 'mq'): q = repo.mq for e in q.applied: if e.node in nodemap: e.node = nodemap[e.node] q.applieddirty = True # This no-ops if nothing is dirty. q.savedirty() # If obsolescence is enabled, obsolete the old changesets. if obsenabled: markers = [] for oldrev, newrev in revmap.items(): markers.append((repo[oldrev], (repo[newrev],))) obsolete.createmarkers(repo, markers) # Move the working directory to the new node, if applicable. wdirrev = repo['.'].rev() if wdirrev in revmap: hg.updaterepo(repo, repo[revmap[wdirrev]].node(), True) # The active bookmark is tracked by its symbolic name, not its # changeset. Since we didn't do anything that should change the # active bookmark, we shouldn't need to adjust it. if activebookmark(repo) != oldactivebookmark: raise util.Abort('active bookmark changed; ' 'this should not occur!', hint='please file a bug') tr.close() # Unless obsolescence is enabled, strip the old changesets. if not obsenabled: stripnodes = [repo[rev].node() for rev in revmap.keys()] repair.strip(repo.ui, repo, stripnodes, topic=backuptopic) finally: if tr: tr.release() lockmod.release(wlock, lock) return nodemap
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history onto another. This can be useful for linearizing local changes relative to a master development tree. If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. """ originalwd = target = None external = nullrev state = {} skipped = set() targetancestors = set() lock = wlock = None try: lock = repo.lock() wlock = repo.wlock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) extrafn = opts.get('extrafn') keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) detachf = opts.get('detach', False) if contf or abortf: if contf and abortf: raise error.ParseError('rebase', _('cannot use both abort and continue')) if collapsef: raise error.ParseError( 'rebase', _('cannot use collapse with continue or abort')) if detachf: raise error.ParseError( 'rebase', _('cannot use detach with continue or abort')) if srcf or basef or destf: raise error.ParseError( 'rebase', _('abort and continue do not allow specifying revisions')) (originalwd, target, state, collapsef, keepf, keepbranchesf, external) = restorestatus(repo) if abortf: abort(repo, originalwd, target, state) return else: if srcf and basef: raise error.ParseError( 'rebase', _('cannot specify both a ' 'revision and a base')) if detachf: if not srcf: raise error.ParseError( 'rebase', _('detach requires a revision to be specified')) if basef: raise error.ParseError( 'rebase', _('cannot specify a base with detach')) cmdutil.bail_if_changed(repo) result = buildstate(repo, destf, srcf, basef, detachf) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return else: originalwd, target, state = result if collapsef: targetancestors = set(repo.changelog.ancestors(target)) external = checkexternal(repo, state, targetancestors) if keepbranchesf: if extrafn: raise error.ParseError( 'rebase', _('cannot use both keepbranches and extrafn')) def extrafn(ctx, extra): extra['branch'] = ctx.branch() # Rebase if not targetancestors: targetancestors = set(repo.changelog.ancestors(target)) targetancestors.add(target) for rev in sorted(state): if state[rev] == -1: ui.debug("rebasing %d:%s\n" % (rev, repo[rev])) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external) p1, p2 = defineparents(repo, rev, target, state, targetancestors) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: stats = rebasenode(repo, rev, p1, p2, state) if stats and stats[3] > 0: raise util.Abort( _('fix unresolved conflicts with hg ' 'resolve then run hg rebase --continue')) updatedirstate(repo, rev, target, p2) if not collapsef: extra = {'rebase_source': repo[rev].hex()} if extrafn: extrafn(repo[rev], extra) newrev = concludenode(repo, rev, p1, p2, extra=extra) else: # Skip commit if we are collapsing repo.dirstate.setparents(repo[p1].node()) newrev = None # Update the state if newrev is not None: state[rev] = repo[newrev].rev() else: if not collapsef: ui.note(_('no changes, revision %d skipped\n') % rev) ui.debug('next revision set to %s\n' % p1) skipped.add(rev) state[rev] = p1 ui.note(_('rebase merging completed\n')) if collapsef: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] != nullmerge: commitmsg += '\n* %s' % repo[rebased].description() commitmsg = ui.edit(commitmsg, repo.ui.username()) newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extra=extrafn) if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if not keepf: # Remove no more useful revisions rebased = [rev for rev in state if state[rev] != nullmerge] if rebased: if set(repo.changelog.descendants(min(rebased))) - set(state): ui.warn( _("warning: new changesets detected " "on source branch, not stripping\n")) else: repair.strip(ui, repo, repo[min(rebased)].node(), "strip") clearstatus(repo) ui.status(_("rebase completed\n")) if os.path.exists(repo.sjoin('undo')): util.unlink(repo.sjoin('undo')) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) finally: release(lock, wlock)