Beispiel #1
0
def clearrebased(ui, repo, state, skipped, collapsedas=None):
    """dispose of rebased revision at the end of the rebase

    If `collapsedas` is not None, the rebase was a collapse whose result if the
    `collapsedas` node."""
    if obsolete.isenabled(repo, obsolete.createmarkersopt):
        markers = []
        for rev, newrev in sorted(state.items()):
            if newrev >= 0:
                if rev in skipped:
                    succs = ()
                elif collapsedas is not None:
                    succs = (repo[collapsedas],)
                else:
                    succs = (repo[newrev],)
                markers.append((repo[rev], succs))
        if markers:
            obsolete.createmarkers(repo, markers)
    else:
        rebased = [rev for rev in state if state[rev] > nullmerge]
        if rebased:
            stripped = []
            for root in repo.set('roots(%ld)', rebased):
                if set(repo.changelog.descendants([root.rev()])) - set(state):
                    ui.warn(_("warning: new changesets detected "
                              "on source branch, not stripping\n"))
                else:
                    stripped.append(root.node())
            if stripped:
                # backup the old csets by default
                repair.strip(ui, repo, stripped, "all")
def _pull(orig, ui, repo, *args, **opts):
    if not obsolete.isenabled(repo, obsolete.createmarkersopt):
        return orig(ui, repo, *args, **opts)
    maxrevbeforepull = len(repo.changelog)
    r = orig(ui, repo, *args, **opts)
    maxrevafterpull = len(repo.changelog)

    # Collect the diff number of the landed diffs
    landeddiffs = {}
    for rev in range(maxrevbeforepull, maxrevafterpull):
        n = repo[rev]
        if n.phase() == phases.public:
            diff = getdiff(n)
            if diff is not None:
                landeddiffs[diff] = n

    if not landeddiffs:
        return r

    # Try to find match with the drafts
    tocreate = []
    unfiltered = repo.unfiltered()
    for rev in unfiltered.revs("draft() - obsolete()"):
        n = unfiltered[rev]
        diff = getdiff(n)
        if diff in landeddiffs and landeddiffs[diff].rev() != n.rev():
            tocreate.append((n, (landeddiffs[diff],)))

    if not tocreate:
        return r

    with unfiltered.lock(), unfiltered.transaction('pullcreatemarkers'):
        obsolete.createmarkers(unfiltered, tocreate)

    return r
Beispiel #3
0
def _deleteunreachable(repo, ctx):
    """Deletes all ancestor and descendant commits of the given revision that
    aren't reachable from another bookmark.
    """
    keepheads = "bookmark() + ."
    try:
        extensions.find('remotenames')
        keepheads += " + remotenames()"
    except KeyError:
        pass
    hiderevs = repo.revs('::%s - ::(%r)', ctx.rev(), keepheads)
    if hiderevs:
        lock = None
        try:
            lock = repo.lock()
            if _isobsstoreenabled(repo):
                markers = []
                for rev in hiderevs:
                    markers.append((repo[rev], ()))
                obsolete.createmarkers(repo, markers)
                repo.ui.status(_("%d changesets pruned\n") % len(hiderevs))
            else:
                repair.strip(repo.ui, repo,
                             [repo.changelog.node(r) for r in hiderevs])
        finally:
            lockmod.release(lock)
Beispiel #4
0
def clearrebased(ui, repo, state, skipped, collapsedas=None):
    """dispose of rebased revision at the end of the rebase

    If `collapsedas` is not None, the rebase was a collapse whose result if the
    `collapsedas` node."""
    if obsolete.isenabled(repo, obsolete.createmarkersopt):
        markers = []
        for rev, newrev in sorted(state.items()):
            if newrev >= 0:
                if rev in skipped:
                    succs = ()
                elif collapsedas is not None:
                    succs = (repo[collapsedas],)
                else:
                    succs = (repo[newrev],)
                markers.append((repo[rev], succs))
        if markers:
            obsolete.createmarkers(repo, markers)
    else:
        rebased = [rev for rev in state if state[rev] > nullmerge]
        if rebased:
            stripped = []
            for root in repo.set('roots(%ld)', rebased):
                if set(repo.changelog.descendants([root.rev()])) - set(state):
                    ui.warn(_("warning: new changesets detected "
                              "on source branch, not stripping\n"))
                else:
                    stripped.append(root.node())
            if stripped:
                # backup the old csets by default
                repair.strip(ui, repo, stripped, "all")
Beispiel #5
0
def _buildobsolete(replacements, oldrepo, newrepo):
    'adds obsolete markers in replacements if enabled in newrepo'
    if obsolete.isenabled(newrepo, obsolete.createmarkersopt):
        markers = [(oldrepo[oldrev], (newrepo[newrev],))
                   for oldrev, newrev in replacements.items()
                   if newrev != oldrev]

        obsolete.createmarkers(newrepo, markers)
Beispiel #6
0
def _buildobsolete(replacements, oldrepo, newrepo):
    'adds obsolete markers in replacements if enabled in newrepo'
    if obsolete.isenabled(newrepo, obsolete.createmarkersopt):
        markers = [(oldrepo[oldrev], (newrepo[newrev],))
                   for oldrev, newrev in replacements.items()
                   if newrev != oldrev]

        obsolete.createmarkers(newrepo, markers)
Beispiel #7
0
def _buildobsolete(replacements, oldrepo, newrepo, date):
    '''return obsmarkers, add them locally (server-side) if obsstore enabled'''
    markers = [(oldrepo[oldrev], (newrepo[newrev],),
                {'operation': 'push', 'user': newrepo[newrev].user()})
               for oldrev, newrev in replacements.items()
               if newrev != oldrev]
    if obsolete.isenabled(newrepo, obsolete.createmarkersopt):
        obsolete.createmarkers(newrepo, markers, date=date)
    return markers
Beispiel #8
0
 def removenodes(self, ui, repo):
     """Cleanup temporary nodes from the repo"""
     if self.obsshelve:
         unfi = repo.unfiltered()
         relations = [(unfi[n or '.'], ()) for n in self.nodestoremove]
         obsolete.createmarkers(repo, relations)
     else:
         repair.strip(ui, repo, self.nodestoremove, backup=False,
                      topic='shelve')
Beispiel #9
0
def unamend(ui, repo, **opts):
    """undo the amend operation on a current changeset

    This command will roll back to the previous version of a changeset,
    leaving working directory in state in which it was before running
    `hg amend` (e.g. files modified as part of an amend will be
    marked as modified `hg status`)"""
    try:
        extensions.find('inhibit')
    except KeyError:
        hint = _("please add inhibit to the list of enabled extensions")
        e = _("unamend requires inhibit extension to be enabled")
        raise error.Abort(e, hint=hint)

    unfi = repo.unfiltered()

    # identify the commit from which to unamend
    curctx = repo['.']

    # identify the commit to which to unamend
    markers = list(predecessormarkers(curctx))
    if len(markers) != 1:
        e = _("changeset must have one predecessor, found %i predecessors")
        raise error.Abort(e % len(markers))

    prednode = markers[0].prednode()
    predctx = unfi[prednode]

    if curctx.children():
        raise error.Abort(_("cannot unamend in the middle of a stack"))

    with repo.wlock(), repo.lock():
        ctxbookmarks = curctx.bookmarks()
        changedfiles = []
        wctx = repo[None]
        wm = wctx.manifest()
        cm = predctx.manifest()
        dirstate = repo.dirstate
        diff = cm.diff(wm)
        changedfiles.extend(diff.iterkeys())

        tr = repo.transaction('unamend')
        with dirstate.parentchange():
            dirstate.rebuild(prednode, cm, changedfiles)
            # we want added and removed files to be shown
            # properly, not with ? and ! prefixes
            for filename, data in diff.iteritems():
                if data[0][0] is None:
                    dirstate.add(filename)
                if data[1][0] is None:
                    dirstate.remove(filename)
        changes = []
        for book in ctxbookmarks:
            changes.append((book, prednode))
        repo._bookmarks.applychanges(repo, tr, changes)
        obsolete.createmarkers(repo, [(curctx, (predctx,))])
        tr.close()
Beispiel #10
0
def hide(ui, repo, *revs, **opts):
    """hide changesets and their descendants

    Hidden changesets are still accessible by their hashes which can be found
    in ``hg journal``.

    If a parent of the working directory is hidden, then the working directory
    will automatically be updated to the most recent available ancestor of the
    hidden parent.

    If there is a bookmark pointing to the commit it will be removed.
    """
    revs = list(revs) + opts.pop('rev', [])
    revs = set(scmutil.revrange(repo, revs))
    hidectxs = list(repo.set("(%ld)::", revs))

    if not hidectxs:
        raise error.Abort(_('nothing to hide'))

    with repo.wlock(), repo.lock(), repo.transaction('hide') as tr:
        # revs to be hidden
        for ctx in hidectxs:
            if not ctx.mutable():
                raise error.Abort(
                    _('cannot hide immutable changeset: %s') % ctx,
                    hint="see 'hg help phases' for details")

        wdp = repo['.']
        newnode = wdp

        while newnode in hidectxs:
            newnode = newnode.parents()[0]

        if newnode.node() != wdp.node():
            cmdutil.bailifchanged(repo, merge=False)
            hg.update(repo, newnode, False)
            ui.status(_('working directory now at %s\n')
                      % ui.label(str(newnode), 'node'))

        # create markers
        obsolete.createmarkers(repo, [(r, []) for r in hidectxs],
                               operation='hide')
        ui.status(_('%i changesets hidden\n') % len(hidectxs))

        # remove bookmarks pointing to hidden changesets
        hnodes = [r.node() for r in hidectxs]
        bmchanges = []
        for book, node in bookmarksmod.listbinbookmarks(repo):
            if node in hnodes:
                bmchanges.append((book, None))
        repo._bookmarks.applychanges(repo, tr, bmchanges)

        if len(bmchanges) > 0:
            ui.status(_('%i bookmarks removed\n') % len(bmchanges))
Beispiel #11
0
def _obsoleteredundantnodes(repo, tr, pctx, shelvectx, tmpwctx):
    # order is important in the list of [shelvectx, tmpwctx] below
    # some nodes may already be obsolete
    unfi = repo.unfiltered()
    nodestoobsolete = filter(lambda x: x != pctx, [shelvectx, tmpwctx])
    seen = set()
    relations = []
    for nto in nodestoobsolete:
        if nto in seen:
            continue
        seen.add(nto)
        relations.append((unfi[nto.rev()], ()))
    obsolete.createmarkers(unfi, relations)
def replacechangesets(repo, oldnodes, createfn, backuptopic='replacing'):
    """Replace changesets with new versions.

    This is a generic function used to perform history rewriting.

    Given an iterable of input nodes, a function will be called which is
    expected to produce a new changeset to replace the input node. The
    function signature should be:

        def createfn(repo, ctx, revmap, copyfilectxfn):

    It is passed a repo, the changectx being rewritten, a map of old to new
    revisions that have been changed so far, and a function that can be used
    as the memctx callback for obtaining memfilectx when no file modifications
    are to be performed (a common pattern). The function should return an
    *uncommitted* memctx holding the new changeset info.

    We currently restrict that the createfn callback must return a new
    changeset and that no file changes may occur. Restricting file changes
    satisfies the requirements this function was invented for and keeps the
    implementation simple.

    After the memctx is obtained, it is committed. Children changesets are
    rebased automatically after all changesets have been rewritten.

    After the old to new mapping is obtained, bookmarks are moved and old
    changesets are made obsolete or stripped, depending on what is appropriate
    for the repo configuration.

    This function handles locking the repository and performing as many actions
    in a transaction as possible.

    Before any changes are made, we verify the state of the repo is sufficient
    for transformation to occur and abort otherwise.
    """
    if not oldnodes:
        return {}

    repo = repo.unfiltered()

    # Validate function called properly.
    for node in oldnodes:
        if len(node) != 20:
            raise util.Abort('replacechangesets expects 20 byte nodes')

    uoldrevs = [repo[node].rev() for node in oldnodes]
    oldrevs = sorted(uoldrevs)
    if oldrevs != uoldrevs:
        raise util.Abort('must pass oldnodes in changelog order')

    # We may perform stripping and stripping inside a nested transaction
    # is a recipe for disaster.
    # currenttransaction was added in 3.3. Copy the implementation until we
    # drop 3.2 compatibility.
    if hasattr(repo, 'currenttransaction'):
        intrans = repo.currenttransaction()
    else:
        if repo._transref and repo._transref().running():
            intrans = True
        else:
            intrans = False

    if intrans:
        raise util.Abort('cannot call replacechangesets when a transaction '
                         'is active')

    # The revisions impacted by the current operation. This is essentially
    # all non-hidden children. We don't operate on hidden changesets because
    # there is no point - they are hidden and deemed not important.
    impactedrevs = list(repo.filtered('visible').revs('%ld::', oldrevs))

    # If we'll need to update the working directory, don't do anything if there
    # are uncommitted changes, as this could cause a giant mess (merge
    # conflicts, etc). Note the comparison against impacted revs, as children
    # of rewritten changesets will be rebased below.
    dirstaterev = repo[repo.dirstate.p1()].rev()
    if dirstaterev in impactedrevs:
        cmdutil.checkunfinished(repo)
        cmdutil.bailifchanged(repo)

    obsenabled = False
    if hasattr(obsolete, 'isenabled'):
        obsenabled = obsolete.isenabled(repo, 'createmarkers')
    else:
        obsenabled = obsolete._enabled

    def adjustphase(repo, tr, phase, node):
        # transaction argument added in Mercurial 3.2.
        try:
            phases.advanceboundary(repo, tr, phase, [node])
            phases.retractboundary(repo, tr, phase, [node])
        except TypeError:
            phases.advanceboundary(repo, phase, [node])
            phases.retractboundary(repo, phase, [node])

    nodemap = {}
    wlock, lock, tr = None, None, None
    try:
        wlock = repo.wlock()
        lock = repo.lock()
        tr = repo.transaction('replacechangesets')

        # Create the new changesets.
        revmap = OrderedDict()
        for oldnode in oldnodes:
            oldctx = repo[oldnode]

            # Copy revmap out of paranoia.
            newctx = createfn(repo, oldctx, dict(revmap),
                              preservefilectx(oldctx))

            if not isinstance(newctx, context.memctx):
                raise util.Abort('createfn must return a context.memctx')

            if oldctx == newctx:
                raise util.Abort('createfn must create a new changeset')

            newnode = newctx.commit()
            # Needed so .manifestnode() works, which memctx doesn't have.
            newctx = repo[newnode]

            # This makes the implementation significantly simpler as we don't
            # need to worry about merges when we do auto rebasing later.
            if oldctx.manifestnode() != newctx.manifestnode():
                raise util.Abort('we do not allow replacements to modify files')

            revmap[oldctx.rev()] = newctx.rev()
            nodemap[oldnode] = newnode

            # Do phase adjustment ourselves because we want callbacks to be as
            # dumb as possible.
            adjustphase(repo, tr, oldctx.phase(), newctx.node())

        # Children of rewritten changesets are impacted as well. Rebase as
        # needed.
        for rev in impactedrevs:
            # It was handled by createfn() or by this loop already.
            if rev in revmap:
                continue

            oldctx = repo[rev]
            if oldctx.p1().rev() not in revmap:
                raise util.Abort('unknown parent of child commit: %s' %
                                 oldctx.hex(),
                                 hint='please report this as a bug')

            parents = newparents(repo, oldctx, revmap)
            mctx = context.memctx(repo, parents, oldctx.description(),
                                  oldctx.files(), preservefilectx(oldctx),
                                  user=oldctx.user(), date=oldctx.date(),
                                  extra=oldctx.extra())
            status = oldctx.p1().status(oldctx)
            mctx.modified = lambda: status[0]
            mctx.added = lambda: status[1]
            mctx.removed = lambda: status[2]

            newnode = mctx.commit()
            revmap[rev] = repo[newnode].rev()
            nodemap[oldctx.node()] = newnode

            # Retain phase.
            adjustphase(repo, tr, oldctx.phase(), newnode)

            ph = repo.ui.config('phases', 'new-commit')
            try:
                repo.ui.setconfig('phases', 'new-commit', oldctx.phase(),
                                  'rewriting')
                newnode = mctx.commit()
                revmap[rev] = repo[newnode].rev()
            finally:
                repo.ui.setconfig('phases', 'new-commit', ph)

        # Move bookmarks to new nodes.
        bmchanges = []
        oldactivebookmark = activebookmark(repo)

        for oldrev, newrev in revmap.items():
            oldnode = repo[oldrev].node()
            for mark, bmnode in repo._bookmarks.items():
                if bmnode == oldnode:
                    bmchanges.append((mark, repo[newrev].node()))

        for mark, newnode in bmchanges:
            repo._bookmarks[mark] = newnode

        if bmchanges:
            repo._bookmarks.recordchange(tr)

        # Update references to rewritten MQ patches.
        if hasattr(repo, 'mq'):
            q = repo.mq
            for e in q.applied:
                if e.node in nodemap:
                    e.node = nodemap[e.node]
                    q.applieddirty = True

            # This no-ops if nothing is dirty.
            q.savedirty()

        # If obsolescence is enabled, obsolete the old changesets.
        if obsenabled:
            markers = []
            for oldrev, newrev in revmap.items():
                markers.append((repo[oldrev], (repo[newrev],)))
            obsolete.createmarkers(repo, markers)

        # Move the working directory to the new node, if applicable.
        wdirrev = repo['.'].rev()
        if wdirrev in revmap:
            hg.updaterepo(repo, repo[revmap[wdirrev]].node(), True)

        # The active bookmark is tracked by its symbolic name, not its
        # changeset. Since we didn't do anything that should change the
        # active bookmark, we shouldn't need to adjust it.
        if activebookmark(repo) != oldactivebookmark:
            raise util.Abort('active bookmark changed; '
                             'this should not occur!',
                             hint='please file a bug')

        tr.close()

        # Unless obsolescence is enabled, strip the old changesets.
        if not obsenabled:
            stripnodes = [repo[rev].node() for rev in revmap.keys()]
            repair.strip(repo.ui, repo, stripnodes, topic=backuptopic)

    finally:
        if tr:
            tr.release()
        lockmod.release(wlock, lock)

    return nodemap
Beispiel #13
0
def hidecommits(repo, curctx, predctxs):
    obsolete.createmarkers(repo, [(curctx, predctxs)], operation='undo')
Beispiel #14
0
def replacechangesets(repo, oldnodes, createfn, backuptopic="replacing"):
    """Replace changesets with new versions.

    This is a generic function used to perform history rewriting.

    Given an iterable of input nodes, a function will be called which is
    expected to produce a new changeset to replace the input node. The
    function signature should be:

        def createfn(repo, ctx, revmap, copyfilectxfn):

    It is passed a repo, the changectx being rewritten, a map of old to new
    revisions that have been changed so far, and a function that can be used
    as the memctx callback for obtaining memfilectx when no file modifications
    are to be performed (a common pattern). The function should return an
    *uncommitted* memctx holding the new changeset info.

    We currently restrict that the createfn callback must return a new
    changeset and that no file changes may occur. Restricting file changes
    satisfies the requirements this function was invented for and keeps the
    implementation simple.

    After the memctx is obtained, it is committed. Children changesets are
    rebased automatically after all changesets have been rewritten.

    After the old to new mapping is obtained, bookmarks are moved and old
    changesets are made obsolete or stripped, depending on what is appropriate
    for the repo configuration.

    This function handles locking the repository and performing as many actions
    in a transaction as possible.

    Before any changes are made, we verify the state of the repo is sufficient
    for transformation to occur and abort otherwise.
    """
    if not oldnodes:
        return {}

    repo = repo.unfiltered()

    # Validate function called properly.
    for node in oldnodes:
        if len(node) != 20:
            raise util.Abort("replacechangesets expects 20 byte nodes")

    uoldrevs = [repo[node].rev() for node in oldnodes]
    oldrevs = sorted(uoldrevs)
    if oldrevs != uoldrevs:
        raise util.Abort("must pass oldnodes in changelog order")

    # We may perform stripping and stripping inside a nested transaction
    # is a recipe for disaster.
    # currenttransaction was added in 3.3. Copy the implementation until we
    # drop 3.2 compatibility.
    if hasattr(repo, "currenttransaction"):
        intrans = repo.currenttransaction()
    else:
        if repo._transref and repo._transref().running():
            intrans = True
        else:
            intrans = False

    if intrans:
        raise util.Abort(
            "cannot call replacechangesets when a transaction " "is active"
        )

    # The revisions impacted by the current operation. This is essentially
    # all non-hidden children. We don't operate on hidden changesets because
    # there is no point - they are hidden and deemed not important.
    impactedrevs = list(repo.filtered("visible").revs("%ld::", oldrevs))

    # If we'll need to update the working directory, don't do anything if there
    # are uncommitted changes, as this could cause a giant mess (merge
    # conflicts, etc). Note the comparison against impacted revs, as children
    # of rewritten changesets will be rebased below.
    dirstaterev = repo[repo.dirstate.p1()].rev()
    if dirstaterev in impactedrevs:
        cmdutil.checkunfinished(repo)
        cmdutil.bailifchanged(repo)

    obsenabled = False
    if hasattr(obsolete, "isenabled"):
        obsenabled = obsolete.isenabled(repo, "createmarkers")
    else:
        obsenabled = obsolete._enabled

    def adjustphase(repo, tr, phase, node):
        # transaction argument added in Mercurial 3.2.
        try:
            phases.advanceboundary(repo, tr, phase, [node])
            phases.retractboundary(repo, tr, phase, [node])
        except TypeError:
            phases.advanceboundary(repo, phase, [node])
            phases.retractboundary(repo, phase, [node])

    nodemap = {}
    wlock, lock, tr = None, None, None
    try:
        wlock = repo.wlock()
        lock = repo.lock()
        tr = repo.transaction("replacechangesets")

        # Create the new changesets.
        revmap = OrderedDict()
        for oldnode in oldnodes:
            oldctx = repo[oldnode]

            # Copy revmap out of paranoia.
            newctx = createfn(repo, oldctx, dict(revmap), preservefilectx(oldctx))

            if not isinstance(newctx, context.memctx):
                raise util.Abort("createfn must return a context.memctx")

            if oldctx == newctx:
                raise util.Abort("createfn must create a new changeset")

            newnode = newctx.commit()
            # Needed so .manifestnode() works, which memctx doesn't have.
            newctx = repo[newnode]

            # This makes the implementation significantly simpler as we don't
            # need to worry about merges when we do auto rebasing later.
            if oldctx.manifestnode() != newctx.manifestnode():
                raise util.Abort("we do not allow replacements to modify files")

            revmap[oldctx.rev()] = newctx.rev()
            nodemap[oldnode] = newnode

            # Do phase adjustment ourselves because we want callbacks to be as
            # dumb as possible.
            adjustphase(repo, tr, oldctx.phase(), newctx.node())

        # Children of rewritten changesets are impacted as well. Rebase as
        # needed.
        for rev in impactedrevs:
            # It was handled by createfn() or by this loop already.
            if rev in revmap:
                continue

            oldctx = repo[rev]
            if oldctx.p1().rev() not in revmap:
                raise util.Abort(
                    "unknown parent of child commit: %s" % oldctx.hex(),
                    hint="please report this as a bug",
                )

            parents = newparents(repo, oldctx, revmap)
            mctx = context.memctx(
                repo,
                parents,
                oldctx.description(),
                oldctx.files(),
                preservefilectx(oldctx),
                user=oldctx.user(),
                date=oldctx.date(),
                extra=oldctx.extra(),
            )
            status = oldctx.p1().status(oldctx)
            mctx.modified = lambda: status[0]
            mctx.added = lambda: status[1]
            mctx.removed = lambda: status[2]

            newnode = mctx.commit()
            revmap[rev] = repo[newnode].rev()
            nodemap[oldctx.node()] = newnode

            # Retain phase.
            adjustphase(repo, tr, oldctx.phase(), newnode)

            ph = repo.ui.config("phases", "new-commit")
            try:
                repo.ui.setconfig("phases", "new-commit", oldctx.phase(), "rewriting")
                newnode = mctx.commit()
                revmap[rev] = repo[newnode].rev()
            finally:
                repo.ui.setconfig("phases", "new-commit", ph)

        # Move bookmarks to new nodes.
        bmchanges = []
        oldactivebookmark = activebookmark(repo)

        for oldrev, newrev in revmap.items():
            oldnode = repo[oldrev].node()
            for mark, bmnode in repo._bookmarks.items():
                if bmnode == oldnode:
                    bmchanges.append((mark, repo[newrev].node()))

        if bmchanges:
            # TODO unconditionally call applychanges() when support for
            # Mercurial 4.1 is dropped.
            if util.safehasattr(repo._bookmarks, "applychanges"):
                repo._bookmarks.applychanges(repo, tr, bmchanges)
            else:
                for mark, newnode in bmchanges:
                    repo._bookmarks[mark] = newnode

                repo._bookmarks.recordchange(tr)

        # Update references to rewritten MQ patches.
        if hasattr(repo, "mq"):
            q = repo.mq
            for e in q.applied:
                if e.node in nodemap:
                    e.node = nodemap[e.node]
                    q.applieddirty = True

            # This no-ops if nothing is dirty.
            q.savedirty()

        # If obsolescence is enabled, obsolete the old changesets.
        if obsenabled:
            markers = []
            for oldrev, newrev in revmap.items():
                if repo[oldrev] != repo[newrev]:
                    markers.append((repo[oldrev], (repo[newrev],)))
            if markers:
                obsolete.createmarkers(repo, markers)

        # Move the working directory to the new node, if applicable.
        wdirrev = repo["."].rev()
        if wdirrev in revmap:
            hg.updaterepo(repo, repo[revmap[wdirrev]].node(), True)

        # The active bookmark is tracked by its symbolic name, not its
        # changeset. Since we didn't do anything that should change the
        # active bookmark, we shouldn't need to adjust it.
        if activebookmark(repo) != oldactivebookmark:
            raise util.Abort(
                "active bookmark changed; " "this should not occur!",
                hint="please file a bug",
            )

        tr.close()

        # Unless obsolescence is enabled, strip any obsolete changesets.
        if not obsenabled:
            stripnodes = []
            for oldrev, newrev in revmap.items():
                if repo[oldrev] != repo[newrev]:
                    stripnodes.append(repo[oldrev].node())
            if stripnodes:
                repair.strip(repo.ui, repo, stripnodes, topic=backuptopic)

    finally:
        if tr:
            tr.release()
        lockmod.release(wlock, lock)

    return nodemap
Beispiel #15
0
def _histedit(ui, repo, state, *freeargs, **opts):
    # TODO only abort if we try and histedit mq patches, not just
    # blanket if mq patches are applied somewhere
    mq = getattr(repo, 'mq', None)
    if mq and mq.applied:
        raise util.Abort(_('source has mq patches applied'))

    # basic argument incompatibility processing
    outg = opts.get('outgoing')
    cont = opts.get('continue')
    editplan = opts.get('edit_plan')
    abort = opts.get('abort')
    force = opts.get('force')
    rules = opts.get('commands', '')
    revs = opts.get('rev', [])
    goal = 'new' # This invocation goal, in new, continue, abort
    if force and not outg:
        raise util.Abort(_('--force only allowed with --outgoing'))
    if cont:
        if util.any((outg, abort, revs, freeargs, rules, editplan)):
            raise util.Abort(_('no arguments allowed with --continue'))
        goal = 'continue'
    elif abort:
        if util.any((outg, revs, freeargs, rules, editplan)):
            raise util.Abort(_('no arguments allowed with --abort'))
        goal = 'abort'
    elif editplan:
        if util.any((outg, revs, freeargs)):
            raise util.Abort(_('only --commands argument allowed with '
                               '--edit-plan'))
        goal = 'edit-plan'
    else:
        if os.path.exists(os.path.join(repo.path, 'histedit-state')):
            raise util.Abort(_('history edit already in progress, try '
                               '--continue or --abort'))
        if outg:
            if revs:
                raise util.Abort(_('no revisions allowed with --outgoing'))
            if len(freeargs) > 1:
                raise util.Abort(
                    _('only one repo argument allowed with --outgoing'))
        else:
            revs.extend(freeargs)
            if len(revs) == 0:
                histeditdefault = ui.config('histedit', 'defaultrev')
                if histeditdefault:
                    revs.append(histeditdefault)
            if len(revs) != 1:
                raise util.Abort(
                    _('histedit requires exactly one ancestor revision'))


    replacements = []
    keep = opts.get('keep', False)

    # rebuild state
    if goal == 'continue':
        state.read()
        state = bootstrapcontinue(ui, state, opts)
    elif goal == 'edit-plan':
        state.read()
        if not rules:
            comment = editcomment % (state.parentctx, node.short(state.topmost))
            rules = ruleeditor(repo, ui, state.rules, comment)
        else:
            if rules == '-':
                f = sys.stdin
            else:
                f = open(rules)
            rules = f.read()
            f.close()
        rules = [l for l in (r.strip() for r in rules.splitlines())
                 if l and not l.startswith('#')]
        rules = verifyrules(rules, repo, [repo[c] for [_a, c] in state.rules])
        state.rules = rules
        state.write()
        return
    elif goal == 'abort':
        state.read()
        mapping, tmpnodes, leafs, _ntm = processreplacement(state)
        ui.debug('restore wc to old parent %s\n' % node.short(state.topmost))

        # Recover our old commits if necessary
        if not state.topmost in repo and state.backupfile:
            backupfile = repo.join(state.backupfile)
            f = hg.openpath(ui, backupfile)
            gen = exchange.readbundle(ui, f, backupfile)
            changegroup.addchangegroup(repo, gen, 'histedit',
                                       'bundle:' + backupfile)
            os.remove(backupfile)

        # check whether we should update away
        parentnodes = [c.node() for c in repo[None].parents()]
        for n in leafs | set([state.parentctxnode]):
            if n in parentnodes:
                hg.clean(repo, state.topmost)
                break
        else:
            pass
        cleanupnode(ui, repo, 'created', tmpnodes)
        cleanupnode(ui, repo, 'temp', leafs)
        state.clear()
        return
    else:
        cmdutil.checkunfinished(repo)
        cmdutil.bailifchanged(repo)

        topmost, empty = repo.dirstate.parents()
        if outg:
            if freeargs:
                remote = freeargs[0]
            else:
                remote = None
            root = findoutgoing(ui, repo, remote, force, opts)
        else:
            rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs)))
            if len(rr) != 1:
                raise util.Abort(_('The specified revisions must have '
                    'exactly one common root'))
            root = rr[0].node()

        revs = between(repo, root, topmost, keep)
        if not revs:
            raise util.Abort(_('%s is not an ancestor of working directory') %
                             node.short(root))

        ctxs = [repo[r] for r in revs]
        if not rules:
            comment = editcomment % (node.short(root), node.short(topmost))
            rules = ruleeditor(repo, ui, [['pick', c] for c in ctxs], comment)
        else:
            if rules == '-':
                f = sys.stdin
            else:
                f = open(rules)
            rules = f.read()
            f.close()
        rules = [l for l in (r.strip() for r in rules.splitlines())
                 if l and not l.startswith('#')]
        rules = verifyrules(rules, repo, ctxs)

        parentctxnode = repo[root].parents()[0].node()

        state.parentctxnode = parentctxnode
        state.rules = rules
        state.keep = keep
        state.topmost = topmost
        state.replacements = replacements

        # Create a backup so we can always abort completely.
        backupfile = None
        if not obsolete.isenabled(repo, obsolete.createmarkersopt):
            backupfile = repair._bundle(repo, [parentctxnode], [topmost], root,
                                        'histedit')
        state.backupfile = backupfile

    while state.rules:
        state.write()
        action, ha = state.rules.pop(0)
        ui.debug('histedit: processing %s %s\n' % (action, ha[:12]))
        actobj = actiontable[action].fromrule(state, ha)
        parentctx, replacement_ = actobj.run()
        state.parentctxnode = parentctx.node()
        state.replacements.extend(replacement_)
    state.write()

    hg.update(repo, state.parentctxnode)

    mapping, tmpnodes, created, ntm = processreplacement(state)
    if mapping:
        for prec, succs in mapping.iteritems():
            if not succs:
                ui.debug('histedit: %s is dropped\n' % node.short(prec))
            else:
                ui.debug('histedit: %s is replaced by %s\n' % (
                    node.short(prec), node.short(succs[0])))
                if len(succs) > 1:
                    m = 'histedit:                            %s'
                    for n in succs[1:]:
                        ui.debug(m % node.short(n))

    if not keep:
        if mapping:
            movebookmarks(ui, repo, mapping, state.topmost, ntm)
            # TODO update mq state
        if obsolete.isenabled(repo, obsolete.createmarkersopt):
            markers = []
            # sort by revision number because it sound "right"
            for prec in sorted(mapping, key=repo.changelog.rev):
                succs = mapping[prec]
                markers.append((repo[prec],
                                tuple(repo[s] for s in succs)))
            if markers:
                obsolete.createmarkers(repo, markers)
        else:
            cleanupnode(ui, repo, 'replaced', mapping)

    cleanupnode(ui, repo, 'temp', tmpnodes)
    state.clear()
    if os.path.exists(repo.sjoin('undo')):
        os.unlink(repo.sjoin('undo'))
Beispiel #16
0
def debugdrawdag(ui, repo, **opts):
    """read an ASCII graph from stdin and create changesets

    The ASCII graph is like what :hg:`log -G` outputs, with each `o` replaced
    to the name of the node. The command will create dummy changesets and local
    tags with those names to make the dummy changesets easier to be referred
    to.

    If the name of a node is a single character 'o', It will be replaced by the
    word to the right. This makes it easier to reuse
    :hg:`log -G -T '{desc}'` outputs.

    For root (no parents) nodes, revset can be used to query existing repo.
    Note that the revset cannot have confusing characters which can be seen as
    the part of the graph edges, like `|/+-\`.
    """
    text = ui.fin.read()

    # parse the graph and make sure len(parents) <= 2 for each node
    edges = _parseasciigraph(text)
    for k, v in edges.items():
        if len(v) > 2:
            raise error.Abort(
                _('%s: too many parents: %s') % (k, b' '.join(v)))

    # parse comments to get extra file content instructions
    files = collections.defaultdict(dict)  # {(name, path): content}
    comments = list(_getcomments(text))
    filere = re.compile(br'^(\w+)/([\w/]+)\s*=\s*(.*)$', re.M)
    for name, path, content in filere.findall(b'\n'.join(comments)):
        content = content.replace(br'\n', b'\n').replace(br'\1', b'\1')
        files[name][path] = content

    committed = {None: node.nullid}  # {name: node}

    # for leaf nodes, try to find existing nodes in repo
    for name, parents in edges.items():
        if len(parents) == 0:
            try:
                committed[name] = scmutil.revsingle(repo, name)
            except error.RepoLookupError:
                pass

    # commit in topological order
    for name, parents in _walkgraph(edges):
        if name in committed:
            continue
        pctxs = [repo[committed[n]] for n in parents]
        pctxs.sort(key=lambda c: c.node())
        added = {}
        if len(parents) > 1:
            # If it's a merge, take the files and contents from the parents
            for f in pctxs[1].manifest():
                if f not in pctxs[0].manifest():
                    added[f] = pctxs[1][f].data()
        else:
            # If it's not a merge, add a single file
            added[name] = name
        # add extra file contents in comments
        for path, content in files.get(name, {}).items():
            added[path] = content
        ctx = simplecommitctx(repo, name, pctxs, added)
        n = ctx.commit()
        committed[name] = n
        tagsmod.tag(repo, [name],
                    n,
                    message=None,
                    user=None,
                    date=None,
                    local=True)

    # handle special comments
    with repo.wlock(), repo.lock(), repo.transaction(b'drawdag'):
        getctx = lambda x: repo.unfiltered()[committed[x.strip()]]
        for comment in comments:
            rels = []  # obsolete relationships
            args = comment.split(b':', 1)
            if len(args) <= 1:
                continue

            cmd = args[0].strip()
            arg = args[1].strip()

            if cmd in (b'replace', b'rebase', b'amend'):
                nodes = [getctx(m) for m in arg.split(b'->')]
                for i in range(len(nodes) - 1):
                    rels.append((nodes[i], (nodes[i + 1], )))
            elif cmd in (b'split', ):
                pre, succs = arg.split(b'->')
                succs = succs.split(b',')
                rels.append((getctx(pre), (getctx(s) for s in succs)))
            elif cmd in (b'prune', ):
                for n in arg.split(b','):
                    rels.append((getctx(n), ()))
            if rels:
                obsolete.createmarkers(repo, rels, date=(0, 0), operation=cmd)
Beispiel #17
0
def push(repo, dest, force, revs):
    """push revisions starting at a specified head back to Subversion.
    """
    assert not revs, 'designated revisions for push remains unimplemented.'
    cmdutil.bailifchanged(repo)
    checkpush = getattr(repo, 'checkpush', None)
    if checkpush:
        try:
            # The checkpush function changed as of e10000369b47 (first
            # in 3.0) in mercurial
            from mercurial.exchange import pushoperation
            pushop = pushoperation(repo, dest, force, revs, False)
            checkpush(pushop)
        except (ImportError, TypeError):
            checkpush(force, revs)

    ui = repo.ui
    old_encoding = util.swap_out_encoding()

    try:
        hasobsolete = (obsolete._enabled or
                       obsolete.isenabled(repo, obsolete.createmarkersopt))
    except:
        hasobsolete = False

    temporary_commits = []
    obsmarkers = []
    try:
        # TODO: implement --rev/#rev support
        # TODO: do credentials specified in the URL still work?
        svn = dest.svn
        meta = repo.svnmeta(svn.uuid, svn.subdir)

        # Strategy:
        # 1. Find all outgoing commits from this head
        if len(repo[None].parents()) != 1:
            ui.status('Cowardly refusing to push branch merge\n')
            return 0 # results in nonzero exit status, see hg's commands.py
        workingrev = repo[None].parents()[0]
        workingbranch = workingrev.branch()
        ui.status('searching for changes\n')
        hashes = meta.revmap.hashes()
        outgoing = util.outgoing_revisions(repo, hashes, workingrev.node())
        if not (outgoing and len(outgoing)):
            ui.status('no changes found\n')
            return 1 # so we get a sane exit status, see hg's commands.push

        tip_ctx = repo[outgoing[-1]].p1()
        svnbranch = tip_ctx.branch()
        modified_files = {}
        for i in range(len(outgoing) - 1, -1, -1):
            # 2. Pick the oldest changeset that needs to be pushed
            current_ctx = repo[outgoing[i]]
            original_ctx = current_ctx

            if len(current_ctx.parents()) != 1:
                ui.status('Found a branch merge, this needs discussion and '
                          'implementation.\n')
                # results in nonzero exit status, see hg's commands.py
                return 0

            # 3. Move the changeset to the tip of the branch if necessary
            conflicts = False
            for file in current_ctx.files():
                if file in modified_files:
                    conflicts = True
                    break

            if conflicts or current_ctx.branch() != svnbranch:
                util.swap_out_encoding(old_encoding)
                try:
                    def extrafn(ctx, extra):
                        extra['branch'] = ctx.branch()

                    ui.note('rebasing %s onto %s \n' % (current_ctx, tip_ctx))
                    hgrebase.rebase(ui, repo,
                                    dest=node.hex(tip_ctx.node()),
                                    rev=[node.hex(current_ctx.node())],
                                    extrafn=extrafn, keep=True)
                finally:
                    util.swap_out_encoding()

                # Don't trust the pre-rebase repo and context.
                repo = getlocalpeer(ui, {}, meta.path)
                meta = repo.svnmeta(svn.uuid, svn.subdir)
                hashes = meta.revmap.hashes()
                tip_ctx = repo[tip_ctx.node()]
                for c in tip_ctx.descendants():
                    rebasesrc = c.extra().get('rebase_source')
                    if rebasesrc and node.bin(rebasesrc) == current_ctx.node():
                        current_ctx = c
                        temporary_commits.append(c.node())
                        break

            # 4. Push the changeset to subversion
            tip_hash = hashes[tip_ctx.node()][0]
            try:
                ui.status('committing %s\n' % current_ctx)
                pushedrev = pushmod.commit(ui, repo, current_ctx, meta,
                                           tip_hash, svn)
            except pushmod.NoFilesException:
                ui.warn("Could not push revision %s because it had no changes "
                        "in svn.\n" % current_ctx)
                return

            # This hook is here purely for testing.  It allows us to
            # onsistently trigger hit the race condition between
            # pushing and pulling here.  In particular, we use it to
            # trigger another revision landing between the time we
            # push a revision and pull it back.
            repo.hook('debug-hgsubversion-between-push-and-pull-for-tests')

            # 5. Pull the latest changesets from subversion, which will
            # include the one we just committed (and possibly others).
            r = pull(repo, dest, force=force, meta=meta)
            assert not r or r == 0

            # 6. Move our tip to the latest pulled tip
            for c in tip_ctx.descendants():
                if c.node() in hashes and c.branch() == svnbranch:
                    if meta.get_source_rev(ctx=c)[0] == pushedrev.revnum:
                        # This is corresponds to the changeset we just pushed
                        if hasobsolete:
                            obsmarkers.append([(original_ctx, [c])])

                    tip_ctx = c

                    # Remember what files have been modified since the
                    # whole push started.
                    for file in c.files():
                        modified_files[file] = True

            # 7. Rebase any children of the commit we just pushed
            # that are not in the outgoing set
            for c in original_ctx.children():
                if not c.node() in hashes and not c.node() in outgoing:
                    util.swap_out_encoding(old_encoding)
                    try:
                        # Path changed as subdirectories were getting
                        # deleted during push.
                        saved_path = os.getcwd()
                        os.chdir(repo.root)

                        def extrafn(ctx, extra):
                            extra['branch'] = ctx.branch()

                        ui.status('rebasing non-outgoing %s onto %s\n' % (c, tip_ctx))
                        needs_rebase_set = "%s::" % node.hex(c.node())
                        hgrebase.rebase(ui, repo,
                                        dest=node.hex(tip_ctx.node()),
                                        rev=[needs_rebase_set],
                                        extrafn=extrafn,
                                        keep=not hasobsolete)
                    finally:
                        os.chdir(saved_path)
                        util.swap_out_encoding()


        util.swap_out_encoding(old_encoding)
        try:
            hg.update(repo, repo.branchtip(workingbranch))
        finally:
            util.swap_out_encoding()

        with repo.wlock():
            with repo.lock():
                if hasobsolete:
                    for marker in obsmarkers:
                        obsolete.createmarkers(repo, marker)
                        beforepush = marker[0][0]
                        afterpush = marker[0][1][0]
                        ui.note('marking %s as obsoleted by %s\n' %
                                (beforepush.hex(), afterpush.hex()))
                else:
                    # strip the original changesets since the push was
                    # successful and changeset obsolescence is unavailable
                    util.strip(ui, repo, outgoing, "all")
    finally:
        try:
            # It's always safe to delete the temporary commits.
            # The originals are not deleted unless the push
            # completely succeeded.
            if temporary_commits:
                # If the repo is on a temporary commit, get off before
                # the strip.
                parent = repo[None].p1()
                if parent.node() in temporary_commits:
                    hg.update(repo, parent.p1().node())
                with repo.wlock():
                    with repo.lock():
                        if hasobsolete:
                            relations = (
                                (repo[n], ()) for n in temporary_commits)
                            obsolete.createmarkers(repo, relations)
                        else:
                            util.strip(
                                ui, repo, temporary_commits, backup=None)

        finally:
            util.swap_out_encoding(old_encoding)
    return 1 # so we get a sane exit status, see hg's commands.push
Beispiel #18
0
def split(ui, repo, *revs, **opts):
    """split a changeset into smaller changesets

    By default, split the current revision by prompting for all its hunks to be
    redistributed into new changesets.

    Use --rev to split a given changeset instead.
    """
    tr = wlock = lock = None
    newcommits = []

    revarg = (list(revs) + opts.get('rev')) or ['.']
    if len(revarg) != 1:
        msg = _("more than one revset is given")
        hnt = _("use either `hg split <rs>` or `hg split --rev <rs>`, not both")
        raise error.Abort(msg, hint=hnt)

    rev = scmutil.revsingle(repo, revarg[0])
    if opts.get('no_rebase'):
        torebase = ()
    else:
        torebase = repo.revs('descendants(%d) - (%d)', rev, rev)
    try:
        wlock = repo.wlock()
        lock = repo.lock()
        cmdutil.bailifchanged(repo)
        if torebase:
            cmdutil.checkunfinished(repo)
        tr = repo.transaction('split')
        ctx = repo[rev]
        r = ctx.rev()
        disallowunstable = not obsolete.isenabled(repo,
                                                  obsolete.allowunstableopt)
        if disallowunstable:
            # XXX We should check head revs
            if repo.revs("(%d::) - %d", rev, rev):
                raise error.Abort(_("cannot split commit: %s not a head") % ctx)

        if len(ctx.parents()) > 1:
            raise error.Abort(_("cannot split merge commits"))
        prev = ctx.p1()
        bmupdate = common.bookmarksupdater(repo, ctx.node(), tr)
        bookactive = repo._activebookmark
        if bookactive is not None:
            repo.ui.status(_("(leaving bookmark %s)\n") % repo._activebookmark)
        bookmarks.deactivate(repo)
        hg.update(repo, prev)

        commands.revert(ui, repo, rev=r, all=True)

        def haschanges():
            modified, added, removed, deleted = repo.status()[:4]
            return modified or added or removed or deleted
        msg = ("HG: This is the original pre-split commit message. "
               "Edit it as appropriate.\n\n")
        msg += ctx.description()
        opts['message'] = msg
        opts['edit'] = True
        while haschanges():
            pats = ()
            cmdutil.dorecord(ui, repo, commands.commit, 'commit', False,
                             cmdutil.recordfilter, *pats, **opts)
            # TODO: Does no seem like the best way to do this
            # We should make dorecord return the newly created commit
            newcommits.append(repo['.'])
            if haschanges():
                if ui.prompt('Done splitting? [yN]', default='n') == 'y':
                    commands.commit(ui, repo, **opts)
                    newcommits.append(repo['.'])
                    break
            else:
                ui.status(_("no more change to split\n"))

        if newcommits:
            tip = repo[newcommits[-1]]
            bmupdate(tip.node())
            if bookactive is not None:
                bookmarks.activate(repo, bookactive)
            obsolete.createmarkers(repo, [(repo[r], newcommits)],
                                   operation='split')

            if torebase:
                top = repo.revs('allsuccessors(%d)', rev).last()
                common.restackonce(ui, repo, top)
        tr.close()
    finally:
        lockmod.release(tr, lock, wlock)
Beispiel #19
0
 def _obsoleteoldcommits(self):
     relations = [(self.repo[k], v and (self.repo[v],) or ())
                  for k, v in self.replacemap.iteritems()]
     if relations:
         obsolete.createmarkers(self.repo, relations)
Beispiel #20
0
def histedit(ui, repo, *freeargs, **opts):
    """interactively edit changeset history

    This command edits changesets between ANCESTOR and the parent of
    the working directory.

    With --outgoing, this edits changesets not found in the
    destination repository. If URL of the destination is omitted, the
    'default-push' (or 'default') path will be used.

    For safety, this command is aborted, also if there are ambiguous
    outgoing revisions which may confuse users: for example, there are
    multiple branches containing outgoing revisions.

    Use "min(outgoing() and ::.)" or similar revset specification
    instead of --outgoing to specify edit target revision exactly in
    such ambiguous situation. See :hg:`help revsets` for detail about
    selecting revisions.
    """
    # TODO only abort if we try and histedit mq patches, not just
    # blanket if mq patches are applied somewhere
    mq = getattr(repo, "mq", None)
    if mq and mq.applied:
        raise util.Abort(_("source has mq patches applied"))

    # basic argument incompatibility processing
    outg = opts.get("outgoing")
    cont = opts.get("continue")
    abort = opts.get("abort")
    force = opts.get("force")
    rules = opts.get("commands", "")
    revs = opts.get("rev", [])
    goal = "new"  # This invocation goal, in new, continue, abort
    if force and not outg:
        raise util.Abort(_("--force only allowed with --outgoing"))
    if cont:
        if util.any((outg, abort, revs, freeargs, rules)):
            raise util.Abort(_("no arguments allowed with --continue"))
        goal = "continue"
    elif abort:
        if util.any((outg, revs, freeargs, rules)):
            raise util.Abort(_("no arguments allowed with --abort"))
        goal = "abort"
    else:
        if os.path.exists(os.path.join(repo.path, "histedit-state")):
            raise util.Abort(_("history edit already in progress, try " "--continue or --abort"))
        if outg:
            if revs:
                raise util.Abort(_("no revisions allowed with --outgoing"))
            if len(freeargs) > 1:
                raise util.Abort(_("only one repo argument allowed with --outgoing"))
        else:
            revs.extend(freeargs)
            if len(revs) != 1:
                raise util.Abort(_("histedit requires exactly one ancestor revision"))

    if goal == "continue":
        (parentctxnode, rules, keep, topmost, replacements) = readstate(repo)
        parentctx = repo[parentctxnode]
        parentctx, repl = bootstrapcontinue(ui, repo, parentctx, rules, opts)
        replacements.extend(repl)
    elif goal == "abort":
        (parentctxnode, rules, keep, topmost, replacements) = readstate(repo)
        mapping, tmpnodes, leafs, _ntm = processreplacement(repo, replacements)
        ui.debug("restore wc to old parent %s\n" % node.short(topmost))
        # check whether we should update away
        parentnodes = [c.node() for c in repo[None].parents()]
        for n in leafs | set([parentctxnode]):
            if n in parentnodes:
                hg.clean(repo, topmost)
                break
        else:
            pass
        cleanupnode(ui, repo, "created", tmpnodes)
        cleanupnode(ui, repo, "temp", leafs)
        os.unlink(os.path.join(repo.path, "histedit-state"))
        return
    else:
        cmdutil.checkunfinished(repo)
        cmdutil.bailifchanged(repo)

        topmost, empty = repo.dirstate.parents()
        if outg:
            if freeargs:
                remote = freeargs[0]
            else:
                remote = None
            root = findoutgoing(ui, repo, remote, force, opts)
        else:
            root = revs[0]
            root = scmutil.revsingle(repo, root).node()

        keep = opts.get("keep", False)
        revs = between(repo, root, topmost, keep)
        if not revs:
            raise util.Abort(_("%s is not an ancestor of working directory") % node.short(root))

        ctxs = [repo[r] for r in revs]
        if not rules:
            rules = "\n".join([makedesc(c) for c in ctxs])
            rules += "\n\n"
            rules += editcomment % (node.short(root), node.short(topmost))
            rules = ui.edit(rules, ui.username())
            # Save edit rules in .hg/histedit-last-edit.txt in case
            # the user needs to ask for help after something
            # surprising happens.
            f = open(repo.join("histedit-last-edit.txt"), "w")
            f.write(rules)
            f.close()
        else:
            if rules == "-":
                f = sys.stdin
            else:
                f = open(rules)
            rules = f.read()
            f.close()
        rules = [l for l in (r.strip() for r in rules.splitlines()) if l and not l[0] == "#"]
        rules = verifyrules(rules, repo, ctxs)

        parentctx = repo[root].parents()[0]
        keep = opts.get("keep", False)
        replacements = []

    while rules:
        writestate(repo, parentctx.node(), rules, keep, topmost, replacements)
        action, ha = rules.pop(0)
        ui.debug("histedit: processing %s %s\n" % (action, ha))
        actfunc = actiontable[action]
        parentctx, replacement_ = actfunc(ui, repo, parentctx, ha, opts)
        replacements.extend(replacement_)

    hg.update(repo, parentctx.node())

    mapping, tmpnodes, created, ntm = processreplacement(repo, replacements)
    if mapping:
        for prec, succs in mapping.iteritems():
            if not succs:
                ui.debug("histedit: %s is dropped\n" % node.short(prec))
            else:
                ui.debug("histedit: %s is replaced by %s\n" % (node.short(prec), node.short(succs[0])))
                if len(succs) > 1:
                    m = "histedit:                            %s"
                    for n in succs[1:]:
                        ui.debug(m % node.short(n))

    if not keep:
        if mapping:
            movebookmarks(ui, repo, mapping, topmost, ntm)
            # TODO update mq state
        if obsolete._enabled:
            markers = []
            # sort by revision number because it sound "right"
            for prec in sorted(mapping, key=repo.changelog.rev):
                succs = mapping[prec]
                markers.append((repo[prec], tuple(repo[s] for s in succs)))
            if markers:
                obsolete.createmarkers(repo, markers)
        else:
            cleanupnode(ui, repo, "replaced", mapping)

    cleanupnode(ui, repo, "temp", tmpnodes)
    os.unlink(os.path.join(repo.path, "histedit-state"))
    if os.path.exists(repo.sjoin("undo")):
        os.unlink(repo.sjoin("undo"))
Beispiel #21
0
def push(repo, dest, force, revs):
    """push revisions starting at a specified head back to Subversion.
    """
    assert not revs, 'designated revisions for push remains unimplemented.'
    cmdutil.bailifchanged(repo)
    checkpush = getattr(repo, 'checkpush', None)
    if checkpush:
        try:
            # The checkpush function changed as of e10000369b47 (first
            # in 3.0) in mercurial
            from mercurial.exchange import pushoperation
            pushop = pushoperation(repo, dest, force, revs, False)
            checkpush(pushop)
        except (ImportError, TypeError):
            checkpush(force, revs)

    ui = repo.ui
    old_encoding = util.swap_out_encoding()

    try:
        hasobsolete = obsolete._enabled
    except:
        hasobsolete = False

    temporary_commits = []
    obsmarkers = []
    try:
        # TODO: implement --rev/#rev support
        # TODO: do credentials specified in the URL still work?
        svn = dest.svn
        meta = repo.svnmeta(svn.uuid, svn.subdir)

        # Strategy:
        # 1. Find all outgoing commits from this head
        if len(repo[None].parents()) != 1:
            ui.status('Cowardly refusing to push branch merge\n')
            return 0 # results in nonzero exit status, see hg's commands.py
        workingrev = repo[None].parents()[0]
        workingbranch = workingrev.branch()
        ui.status('searching for changes\n')
        hashes = meta.revmap.hashes()
        outgoing = util.outgoing_revisions(repo, hashes, workingrev.node())
        if not (outgoing and len(outgoing)):
            ui.status('no changes found\n')
            return 1 # so we get a sane exit status, see hg's commands.push

        tip_ctx = repo[outgoing[-1]].p1()
        svnbranch = tip_ctx.branch()
        modified_files = {}
        for i in range(len(outgoing) - 1, -1, -1):
            # 2. Pick the oldest changeset that needs to be pushed
            current_ctx = repo[outgoing[i]]
            original_ctx = current_ctx

            if len(current_ctx.parents()) != 1:
                ui.status('Found a branch merge, this needs discussion and '
                          'implementation.\n')
                # results in nonzero exit status, see hg's commands.py
                return 0

            # 3. Move the changeset to the tip of the branch if necessary
            conflicts = False
            for file in current_ctx.files():
                if file in modified_files:
                    conflicts = True
                    break

            if conflicts or current_ctx.branch() != svnbranch:
                util.swap_out_encoding(old_encoding)
                try:
                    def extrafn(ctx, extra):
                        extra['branch'] = ctx.branch()

                    ui.note('rebasing %s onto %s \n' % (current_ctx, tip_ctx))
                    hgrebase.rebase(ui, repo,
                                    dest=node.hex(tip_ctx.node()),
                                    rev=[node.hex(current_ctx.node())],
                                    extrafn=extrafn, keep=True)
                finally:
                    util.swap_out_encoding()

                # Don't trust the pre-rebase repo and context.
                repo = getlocalpeer(ui, {}, meta.path)
                meta = repo.svnmeta(svn.uuid, svn.subdir)
                hashes = meta.revmap.hashes()
                tip_ctx = repo[tip_ctx.node()]
                for c in tip_ctx.descendants():
                    rebasesrc = c.extra().get('rebase_source')
                    if rebasesrc and node.bin(rebasesrc) == current_ctx.node():
                        current_ctx = c
                        temporary_commits.append(c.node())
                        break

            # 4. Push the changeset to subversion
            tip_hash = hashes[tip_ctx.node()][0]
            try:
                ui.status('committing %s\n' % current_ctx)
                pushedrev = pushmod.commit(ui, repo, current_ctx, meta,
                                           tip_hash, svn)
            except pushmod.NoFilesException:
                ui.warn("Could not push revision %s because it had no changes "
                        "in svn.\n" % current_ctx)
                return

            # This hook is here purely for testing.  It allows us to
            # onsistently trigger hit the race condition between
            # pushing and pulling here.  In particular, we use it to
            # trigger another revision landing between the time we
            # push a revision and pull it back.
            repo.hook('debug-hgsubversion-between-push-and-pull-for-tests')

            # 5. Pull the latest changesets from subversion, which will
            # include the one we just committed (and possibly others).
            r = pull(repo, dest, force=force, meta=meta)
            assert not r or r == 0

            # 6. Move our tip to the latest pulled tip
            for c in tip_ctx.descendants():
                if c.node() in hashes and c.branch() == svnbranch:
                    if meta.get_source_rev(ctx=c)[0] == pushedrev.revnum:
                        # This is corresponds to the changeset we just pushed
                        if hasobsolete:
                            obsmarkers.append([(original_ctx, [c])])

                    tip_ctx = c

                    # Remember what files have been modified since the
                    # whole push started.
                    for file in c.files():
                        modified_files[file] = True

            # 7. Rebase any children of the commit we just pushed
            # that are not in the outgoing set
            for c in original_ctx.children():
                if not c.node() in hashes and not c.node() in outgoing:
                    util.swap_out_encoding(old_encoding)
                    try:
                        # Path changed as subdirectories were getting
                        # deleted during push.
                        saved_path = os.getcwd()
                        os.chdir(repo.root)

                        def extrafn(ctx, extra):
                            extra['branch'] = ctx.branch()

                        ui.status('rebasing non-outgoing %s onto %s\n' % (c, tip_ctx))
                        needs_rebase_set = "%s::" % node.hex(c.node())
                        hgrebase.rebase(ui, repo,
                                        dest=node.hex(tip_ctx.node()),
                                        rev=[needs_rebase_set],
                                        extrafn=extrafn,
                                        keep=not hasobsolete)
                    finally:
                        os.chdir(saved_path)
                        util.swap_out_encoding()


        util.swap_out_encoding(old_encoding)
        try:
            hg.update(repo, repo.branchtip(workingbranch))
        finally:
            util.swap_out_encoding()

        if hasobsolete:
            for marker in obsmarkers:
                obsolete.createmarkers(repo, marker)
                beforepush = marker[0][0]
                afterpush = marker[0][1][0]
                ui.note('marking %s as obsoleted by %s\n' %
                        (beforepush.hex(), afterpush.hex()))
        else:
            # strip the original changesets since the push was
            # successful and changeset obsolescence is unavailable
            util.strip(ui, repo, outgoing, "all")
    finally:
        try:
            # It's always safe to delete the temporary commits.
            # The originals are not deleted unless the push
            # completely succeeded.
            if temporary_commits:
                # If the repo is on a temporary commit, get off before
                # the strip.
                parent = repo[None].p1()
                if parent.node() in temporary_commits:
                    hg.update(repo, parent.p1().node())
                if hasobsolete:
                    relations = ((repo[n], ()) for n in temporary_commits)
                    obsolete.createmarkers(repo, relations)
                else:
                    util.strip(ui, repo, temporary_commits, backup=None)

        finally:
            util.swap_out_encoding(old_encoding)
    return 1 # so we get a sane exit status, see hg's commands.push
Beispiel #22
0
def prune(ui, repo, *revs, **opts):
    """hide changesets by marking them obsolete

    Pruned changesets are obsolete with no successors. If they also have no
    descendants, they are hidden (invisible to all commands).

    Non-obsolete descendants of pruned changesets become "unstable". Use
    :hg:`evolve` to handle this situation.

    When you prune the parent of your working copy, Mercurial updates the
    working copy to a non-obsolete parent.

    You can use ``--succ`` to tell Mercurial that a newer version (successor)
    of the pruned changeset exists. Mercurial records successor revisions in
    obsolescence markers.

    You can use the ``--biject`` option to specify a 1-1 mapping (bijection)
    between revisions to pruned (precursor) and successor changesets. This
    option may be removed in a future release (with the functionality provided
    automatically).

    If you specify multiple revisions in ``--succ``, you are recording a
    "split" and must acknowledge it by passing ``--split``. Similarly, when you
    prune multiple changesets with a single successor, you must pass the
    ``--fold`` option.
    """
    if opts.get('keep', False):
        advice = "'hg uncommit' provides a better UI for undoing commits " \
                 "while keeping the changes\n"
    else:
        advice = "'hg hide' provides a better UI for hiding commits\n"
    ui.warn(_("advice: %s") % advice)

    revs = scmutil.revrange(repo, list(revs) + opts.get('rev', []))
    succs = opts.get('succ', [])
    bookmarks = set(opts.get('bookmark', ()))
    metadata = _getmetadata(**opts)
    biject = opts.get('biject')
    fold = opts.get('fold')
    split = opts.get('split')

    options = [o for o in ('biject', 'fold', 'split') if opts.get(o)]
    if 1 < len(options):
        raise error.Abort(_("can only specify one of %s") % ', '.join(options))

    if bookmarks:
        repomarks, revs = _reachablefrombookmark(repo, revs, bookmarks)
        if not revs:
            # no revisions to prune - delete bookmark immediately
            _deletebookmark(repo, repomarks, bookmarks)

    if not revs:
        raise error.Abort(_('nothing to prune'))

    wlock = lock = tr = None
    try:
        wlock = repo.wlock()
        lock = repo.lock()
        tr = repo.transaction('prune')
        # defines pruned changesets
        precs = []
        revs.sort()
        for p in revs:
            cp = repo[p]
            if not cp.mutable():
                # note: createmarkers() would have raised something anyway
                raise error.Abort('cannot prune immutable changeset: %s' % cp,
                                  hint="see 'hg help phases' for details")
            precs.append(cp)
        if not precs:
            raise error.Abort('nothing to prune')

        # defines successors changesets
        sucs = scmutil.revrange(repo, succs)
        sucs.sort()
        sucs = tuple(repo[n] for n in sucs)
        if not biject and len(sucs) > 1 and len(precs) > 1:
            msg = "Can't use multiple successors for multiple precursors"
            hint = _("use --biject to mark a series as a replacement"
                     " for another")
            raise error.Abort(msg, hint=hint)
        elif biject and len(sucs) != len(precs):
            msg = "Can't use %d successors for %d precursors" \
                % (len(sucs), len(precs))
            raise error.Abort(msg)
        elif (len(precs) == 1 and len(sucs) > 1) and not split:
            msg = "please add --split if you want to do a split"
            raise error.Abort(msg)
        elif len(sucs) == 1 and len(precs) > 1 and not fold:
            msg = "please add --fold if you want to do a fold"
            raise error.Abort(msg)
        elif biject:
            relations = [(p, (s,)) for p, s in zip(precs, sucs)]
        else:
            relations = [(p, sucs) for p in precs]

        wdp = repo['.']

        if len(sucs) == 1 and len(precs) == 1 and wdp in precs:
            # '.' killed, so update to the successor
            newnode = sucs[0]
        else:
            # update to an unkilled parent
            newnode = wdp

            while newnode in precs or newnode.obsolete():
                newnode = newnode.parents()[0]

        if newnode.node() != wdp.node():
            if opts.get('keep', False):
                # This is largely the same as the implementation in
                # strip.stripcmd(). We might want to refactor this somewhere
                # common at some point.

                # only reset the dirstate for files that would actually change
                # between the working context and uctx
                descendantrevs = repo.revs("%d::." % newnode.rev())
                changedfiles = []
                for rev in descendantrevs:
                    # blindly reset the files, regardless of what actually
                    # changed
                    changedfiles.extend(repo[rev].files())

                # reset files that only changed in the dirstate too
                dirstate = repo.dirstate
                dirchanges = [f for f in dirstate if dirstate[f] != 'n']
                changedfiles.extend(dirchanges)
                repo.dirstate.rebuild(newnode.node(), newnode.manifest(),
                                      changedfiles)
                dirstate.write(tr)
            else:
                bookactive = repo._activebookmark
                # Active bookmark that we don't want to delete (with -B option)
                # we deactivate and move it before the update and reactivate it
                # after
                movebookmark = bookactive and not bookmarks
                if movebookmark:
                    bookmarksmod.deactivate(repo)
                    changes = [(bookactive, newnode.node())]
                    repo._bookmarks.applychanges(repo, tr, changes)
                commands.update(ui, repo, newnode.rev())
                ui.status(_('working directory now at %s\n')
                          % ui.label(str(newnode), 'evolve.node'))
                if movebookmark:
                    bookmarksmod.activate(repo, bookactive)

        # update bookmarks
        if bookmarks:
            _deletebookmark(repo, repomarks, bookmarks)

        # create markers
        obsolete.createmarkers(repo, relations, metadata=metadata,
                               operation='prune')

        # informs that changeset have been pruned
        ui.status(_('%i changesets pruned\n') % len(precs))

        for ctx in repo.unfiltered().set('bookmark() and %ld', precs):
            # used to be:
            #
            #   ldest = list(repo.set('max((::%d) - obsolete())', ctx))
            #   if ldest:
            #      c = ldest[0]
            #
            # but then revset took a lazy arrow in the knee and became much
            # slower. The new forms makes as much sense and a much faster.
            for dest in ctx.ancestors():
                if not dest.obsolete():
                    updatebookmarks = common.bookmarksupdater(
                        repo, ctx.node(), tr)
                    updatebookmarks(dest.node())
                    break

        tr.close()
    finally:
        lockmod.release(tr, lock, wlock)
Beispiel #23
0
def _histedit(ui, repo, state, *freeargs, **opts):
    # TODO only abort if we try and histedit mq patches, not just
    # blanket if mq patches are applied somewhere
    mq = getattr(repo, 'mq', None)
    if mq and mq.applied:
        raise error.Abort(_('source has mq patches applied'))

    # basic argument incompatibility processing
    outg = opts.get('outgoing')
    cont = opts.get('continue')
    editplan = opts.get('edit_plan')
    abort = opts.get('abort')
    force = opts.get('force')
    rules = opts.get('commands', '')
    revs = opts.get('rev', [])
    goal = 'new' # This invocation goal, in new, continue, abort
    if force and not outg:
        raise error.Abort(_('--force only allowed with --outgoing'))
    if cont:
        if any((outg, abort, revs, freeargs, rules, editplan)):
            raise error.Abort(_('no arguments allowed with --continue'))
        goal = 'continue'
    elif abort:
        if any((outg, revs, freeargs, rules, editplan)):
            raise error.Abort(_('no arguments allowed with --abort'))
        goal = 'abort'
    elif editplan:
        if any((outg, revs, freeargs)):
            raise error.Abort(_('only --commands argument allowed with '
                               '--edit-plan'))
        goal = 'edit-plan'
    else:
        if os.path.exists(os.path.join(repo.path, 'histedit-state')):
            raise error.Abort(_('history edit already in progress, try '
                               '--continue or --abort'))
        if outg:
            if revs:
                raise error.Abort(_('no revisions allowed with --outgoing'))
            if len(freeargs) > 1:
                raise error.Abort(
                    _('only one repo argument allowed with --outgoing'))
        else:
            revs.extend(freeargs)
            if len(revs) == 0:
                # experimental config: histedit.defaultrev
                histeditdefault = ui.config('histedit', 'defaultrev')
                if histeditdefault:
                    revs.append(histeditdefault)
            if len(revs) != 1:
                raise error.Abort(
                    _('histedit requires exactly one ancestor revision'))


    replacements = []
    state.keep = opts.get('keep', False)
    supportsmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)

    # rebuild state
    if goal == 'continue':
        state.read()
        state = bootstrapcontinue(ui, state, opts)
    elif goal == 'edit-plan':
        state.read()
        if not rules:
            comment = editcomment % (node.short(state.parentctxnode),
                                     node.short(state.topmost))
            rules = ruleeditor(repo, ui, state.rules, comment)
        else:
            if rules == '-':
                f = sys.stdin
            else:
                f = open(rules)
            rules = f.read()
            f.close()
        rules = [l for l in (r.strip() for r in rules.splitlines())
                 if l and not l.startswith('#')]
        rules = verifyrules(rules, repo, [repo[c] for [_a, c] in state.rules])
        state.rules = rules
        state.write()
        return
    elif goal == 'abort':
        try:
            state.read()
            tmpnodes, leafs = newnodestoabort(state)
            ui.debug('restore wc to old parent %s\n'
                    % node.short(state.topmost))

            # Recover our old commits if necessary
            if not state.topmost in repo and state.backupfile:
                backupfile = repo.join(state.backupfile)
                f = hg.openpath(ui, backupfile)
                gen = exchange.readbundle(ui, f, backupfile)
                tr = repo.transaction('histedit.abort')
                try:
                    if not isinstance(gen, bundle2.unbundle20):
                        gen.apply(repo, 'histedit', 'bundle:' + backupfile)
                    if isinstance(gen, bundle2.unbundle20):
                        bundle2.applybundle(repo, gen, tr,
                                            source='histedit',
                                            url='bundle:' + backupfile)
                    tr.close()
                finally:
                    tr.release()

                os.remove(backupfile)

            # check whether we should update away
            if repo.unfiltered().revs('parents() and (%n  or %ln::)',
                                    state.parentctxnode, leafs | tmpnodes):
                hg.clean(repo, state.topmost)
            cleanupnode(ui, repo, 'created', tmpnodes)
            cleanupnode(ui, repo, 'temp', leafs)
        except Exception:
            if state.inprogress():
                ui.warn(_('warning: encountered an exception during histedit '
                    '--abort; the repository may not have been completely '
                    'cleaned up\n'))
            raise
        finally:
                state.clear()
        return
    else:
        cmdutil.checkunfinished(repo)
        cmdutil.bailifchanged(repo)

        topmost, empty = repo.dirstate.parents()
        if outg:
            if freeargs:
                remote = freeargs[0]
            else:
                remote = None
            root = findoutgoing(ui, repo, remote, force, opts)
        else:
            rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs)))
            if len(rr) != 1:
                raise error.Abort(_('The specified revisions must have '
                    'exactly one common root'))
            root = rr[0].node()

        revs = between(repo, root, topmost, state.keep)
        if not revs:
            raise error.Abort(_('%s is not an ancestor of working directory') %
                             node.short(root))

        ctxs = [repo[r] for r in revs]
        if not rules:
            comment = editcomment % (node.short(root), node.short(topmost))
            rules = ruleeditor(repo, ui, [['pick', c] for c in ctxs], comment)
        else:
            if rules == '-':
                f = sys.stdin
            else:
                f = open(rules)
            rules = f.read()
            f.close()
        rules = [l for l in (r.strip() for r in rules.splitlines())
                 if l and not l.startswith('#')]
        rules = verifyrules(rules, repo, ctxs)

        parentctxnode = repo[root].parents()[0].node()

        state.parentctxnode = parentctxnode
        state.rules = rules
        state.topmost = topmost
        state.replacements = replacements

        # Create a backup so we can always abort completely.
        backupfile = None
        if not obsolete.isenabled(repo, obsolete.createmarkersopt):
            backupfile = repair._bundle(repo, [parentctxnode], [topmost], root,
                                        'histedit')
        state.backupfile = backupfile

    # preprocess rules so that we can hide inner folds from the user
    # and only show one editor
    rules = state.rules[:]
    for idx, ((action, ha), (nextact, unused)) in enumerate(
            zip(rules, rules[1:] + [(None, None)])):
        if action == 'fold' and nextact == 'fold':
            state.rules[idx] = '_multifold', ha

    while state.rules:
        state.write()
        action, ha = state.rules.pop(0)
        ui.debug('histedit: processing %s %s\n' % (action, ha[:12]))
        actobj = actiontable[action].fromrule(state, ha)
        parentctx, replacement_ = actobj.run()
        state.parentctxnode = parentctx.node()
        state.replacements.extend(replacement_)
    state.write()

    hg.update(repo, state.parentctxnode)

    mapping, tmpnodes, created, ntm = processreplacement(state)
    if mapping:
        for prec, succs in mapping.iteritems():
            if not succs:
                ui.debug('histedit: %s is dropped\n' % node.short(prec))
            else:
                ui.debug('histedit: %s is replaced by %s\n' % (
                    node.short(prec), node.short(succs[0])))
                if len(succs) > 1:
                    m = 'histedit:                            %s'
                    for n in succs[1:]:
                        ui.debug(m % node.short(n))

    if supportsmarkers:
        # Only create markers if the temp nodes weren't already removed.
        obsolete.createmarkers(repo, ((repo[t],()) for t in sorted(tmpnodes)
                                       if t in repo))
    else:
        cleanupnode(ui, repo, 'temp', tmpnodes)

    if not state.keep:
        if mapping:
            movebookmarks(ui, repo, mapping, state.topmost, ntm)
            # TODO update mq state
        if supportsmarkers:
            markers = []
            # sort by revision number because it sound "right"
            for prec in sorted(mapping, key=repo.changelog.rev):
                succs = mapping[prec]
                markers.append((repo[prec],
                                tuple(repo[s] for s in succs)))
            if markers:
                obsolete.createmarkers(repo, markers)
        else:
            cleanupnode(ui, repo, 'replaced', mapping)

    state.clear()
    if os.path.exists(repo.sjoin('undo')):
        os.unlink(repo.sjoin('undo'))
Beispiel #24
0
def _histedit(ui, repo, *freeargs, **opts):
    # TODO only abort if we try and histedit mq patches, not just
    # blanket if mq patches are applied somewhere
    mq = getattr(repo, 'mq', None)
    if mq and mq.applied:
        raise util.Abort(_('source has mq patches applied'))

    # basic argument incompatibility processing
    outg = opts.get('outgoing')
    cont = opts.get('continue')
    abort = opts.get('abort')
    force = opts.get('force')
    rules = opts.get('commands', '')
    revs = opts.get('rev', [])
    goal = 'new' # This invocation goal, in new, continue, abort
    if force and not outg:
        raise util.Abort(_('--force only allowed with --outgoing'))
    if cont:
        if util.any((outg, abort, revs, freeargs, rules)):
            raise util.Abort(_('no arguments allowed with --continue'))
        goal = 'continue'
    elif abort:
        if util.any((outg, revs, freeargs, rules)):
            raise util.Abort(_('no arguments allowed with --abort'))
        goal = 'abort'
    else:
        if os.path.exists(os.path.join(repo.path, 'histedit-state')):
            raise util.Abort(_('history edit already in progress, try '
                               '--continue or --abort'))
        if outg:
            if revs:
                raise util.Abort(_('no revisions allowed with --outgoing'))
            if len(freeargs) > 1:
                raise util.Abort(
                    _('only one repo argument allowed with --outgoing'))
        else:
            revs.extend(freeargs)
            if len(revs) != 1:
                raise util.Abort(
                    _('histedit requires exactly one ancestor revision'))


    if goal == 'continue':
        (parentctxnode, rules, keep, topmost, replacements) = readstate(repo)
        parentctx = repo[parentctxnode]
        parentctx, repl = bootstrapcontinue(ui, repo, parentctx, rules, opts)
        replacements.extend(repl)
    elif goal == 'abort':
        (parentctxnode, rules, keep, topmost, replacements) = readstate(repo)
        mapping, tmpnodes, leafs, _ntm = processreplacement(repo, replacements)
        ui.debug('restore wc to old parent %s\n' % node.short(topmost))
        # check whether we should update away
        parentnodes = [c.node() for c in repo[None].parents()]
        for n in leafs | set([parentctxnode]):
            if n in parentnodes:
                hg.clean(repo, topmost)
                break
        else:
            pass
        cleanupnode(ui, repo, 'created', tmpnodes)
        cleanupnode(ui, repo, 'temp', leafs)
        os.unlink(os.path.join(repo.path, 'histedit-state'))
        return
    else:
        cmdutil.checkunfinished(repo)
        cmdutil.bailifchanged(repo)

        topmost, empty = repo.dirstate.parents()
        if outg:
            if freeargs:
                remote = freeargs[0]
            else:
                remote = None
            root = findoutgoing(ui, repo, remote, force, opts)
        else:
            rootrevs = list(repo.set('roots(%lr)', revs))
            if len(rootrevs) != 1:
                raise util.Abort(_('The specified revisions must have '
                    'exactly one common root'))
            root = rootrevs[0].node()

        keep = opts.get('keep', False)
        revs = between(repo, root, topmost, keep)
        if not revs:
            raise util.Abort(_('%s is not an ancestor of working directory') %
                             node.short(root))

        ctxs = [repo[r] for r in revs]
        if not rules:
            rules = '\n'.join([makedesc(c) for c in ctxs])
            rules += '\n\n'
            rules += editcomment % (node.short(root), node.short(topmost))
            rules = ui.edit(rules, ui.username())
            # Save edit rules in .hg/histedit-last-edit.txt in case
            # the user needs to ask for help after something
            # surprising happens.
            f = open(repo.join('histedit-last-edit.txt'), 'w')
            f.write(rules)
            f.close()
        else:
            if rules == '-':
                f = sys.stdin
            else:
                f = open(rules)
            rules = f.read()
            f.close()
        rules = [l for l in (r.strip() for r in rules.splitlines())
                 if l and not l[0] == '#']
        rules = verifyrules(rules, repo, ctxs)

        parentctx = repo[root].parents()[0]
        keep = opts.get('keep', False)
        replacements = []


    while rules:
        writestate(repo, parentctx.node(), rules, keep, topmost, replacements)
        action, ha = rules.pop(0)
        ui.debug('histedit: processing %s %s\n' % (action, ha))
        actfunc = actiontable[action]
        parentctx, replacement_ = actfunc(ui, repo, parentctx, ha, opts)
        replacements.extend(replacement_)

    hg.update(repo, parentctx.node())

    mapping, tmpnodes, created, ntm = processreplacement(repo, replacements)
    if mapping:
        for prec, succs in mapping.iteritems():
            if not succs:
                ui.debug('histedit: %s is dropped\n' % node.short(prec))
            else:
                ui.debug('histedit: %s is replaced by %s\n' % (
                    node.short(prec), node.short(succs[0])))
                if len(succs) > 1:
                    m = 'histedit:                            %s'
                    for n in succs[1:]:
                        ui.debug(m % node.short(n))

    if not keep:
        if mapping:
            movebookmarks(ui, repo, mapping, topmost, ntm)
            # TODO update mq state
        if obsolete._enabled:
            markers = []
            # sort by revision number because it sound "right"
            for prec in sorted(mapping, key=repo.changelog.rev):
                succs = mapping[prec]
                markers.append((repo[prec],
                                tuple(repo[s] for s in succs)))
            if markers:
                obsolete.createmarkers(repo, markers)
        else:
            cleanupnode(ui, repo, 'replaced', mapping)

    cleanupnode(ui, repo, 'temp', tmpnodes)
    os.unlink(os.path.join(repo.path, 'histedit-state'))
    if os.path.exists(repo.sjoin('undo')):
        os.unlink(repo.sjoin('undo'))
Beispiel #25
0
 def _obsoleteoldcommits(self):
     relations = [(self.repo[k], v and (self.repo[v], ) or ())
                  for k, v in self.replacemap.iteritems()]
     if relations:
         obsolete.createmarkers(self.repo, relations)
Beispiel #26
0
def _finishshelve(ui, repo, tr, node, activebookmark):
    if activebookmark:
        bookmarks.activate(repo, activebookmark)
    obsolete.createmarkers(repo, [(repo.unfiltered()[node], ())])
    tr.close()
    tr.release()
Beispiel #27
0
def _histedit(ui, repo, state, *freeargs, **opts):
    # TODO only abort if we try and histedit mq patches, not just
    # blanket if mq patches are applied somewhere
    mq = getattr(repo, 'mq', None)
    if mq and mq.applied:
        raise util.Abort(_('source has mq patches applied'))

    # basic argument incompatibility processing
    outg = opts.get('outgoing')
    cont = opts.get('continue')
    abort = opts.get('abort')
    force = opts.get('force')
    rules = opts.get('commands', '')
    revs = opts.get('rev', [])
    goal = 'new' # This invocation goal, in new, continue, abort
    if force and not outg:
        raise util.Abort(_('--force only allowed with --outgoing'))
    if cont:
        if util.any((outg, abort, revs, freeargs, rules)):
            raise util.Abort(_('no arguments allowed with --continue'))
        goal = 'continue'
    elif abort:
        if util.any((outg, revs, freeargs, rules)):
            raise util.Abort(_('no arguments allowed with --abort'))
        goal = 'abort'
    else:
        if os.path.exists(os.path.join(repo.path, 'histedit-state')):
            raise util.Abort(_('history edit already in progress, try '
                               '--continue or --abort'))
        if outg:
            if revs:
                raise util.Abort(_('no revisions allowed with --outgoing'))
            if len(freeargs) > 1:
                raise util.Abort(
                    _('only one repo argument allowed with --outgoing'))
        else:
            revs.extend(freeargs)
            if len(revs) != 1:
                raise util.Abort(
                    _('histedit requires exactly one ancestor revision'))


    replacements = []
    keep = opts.get('keep', False)

    # rebuild state
    if goal == 'continue':
        state = histeditstate(repo)
        state.read()
        state = bootstrapcontinue(ui, state, opts)
    elif goal == 'abort':
        state = histeditstate(repo)
        state.read()
        mapping, tmpnodes, leafs, _ntm = processreplacement(state)
        ui.debug('restore wc to old parent %s\n' % node.short(state.topmost))
        # check whether we should update away
        parentnodes = [c.node() for c in repo[None].parents()]
        for n in leafs | set([state.parentctx.node()]):
            if n in parentnodes:
                hg.clean(repo, state.topmost)
                break
        else:
            pass
        cleanupnode(ui, repo, 'created', tmpnodes)
        cleanupnode(ui, repo, 'temp', leafs)
        state.clear()
        return
    else:
        cmdutil.checkunfinished(repo)
        cmdutil.bailifchanged(repo)

        topmost, empty = repo.dirstate.parents()
        if outg:
            if freeargs:
                remote = freeargs[0]
            else:
                remote = None
            root = findoutgoing(ui, repo, remote, force, opts)
        else:
            rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs)))
            if len(rr) != 1:
                raise util.Abort(_('The specified revisions must have '
                    'exactly one common root'))
            root = rr[0].node()

        revs = between(repo, root, topmost, keep)
        if not revs:
            raise util.Abort(_('%s is not an ancestor of working directory') %
                             node.short(root))

        ctxs = [repo[r] for r in revs]
        if not rules:
            rules = '\n'.join([makedesc(c) for c in ctxs])
            rules += '\n\n'
            rules += editcomment % (node.short(root), node.short(topmost))
            rules = ui.edit(rules, ui.username())
            # Save edit rules in .hg/histedit-last-edit.txt in case
            # the user needs to ask for help after something
            # surprising happens.
            f = open(repo.join('histedit-last-edit.txt'), 'w')
            f.write(rules)
            f.close()
        else:
            if rules == '-':
                f = sys.stdin
            else:
                f = open(rules)
            rules = f.read()
            f.close()
        rules = [l for l in (r.strip() for r in rules.splitlines())
                 if l and not l.startswith('#')]
        rules = verifyrules(rules, repo, ctxs)

        parentctx = repo[root].parents()[0]

        state.parentctx = parentctx
        state.rules = rules
        state.keep = keep
        state.topmost = topmost
        state.replacements = replacements

    while state.rules:
        state.write()
        action, ha = state.rules.pop(0)
        ui.debug('histedit: processing %s %s\n' % (action, ha))
        actfunc = actiontable[action]
        state.parentctx, replacement_ = actfunc(ui, state, ha, opts)
        state.replacements.extend(replacement_)

    hg.update(repo, state.parentctx.node())

    mapping, tmpnodes, created, ntm = processreplacement(state)
    if mapping:
        for prec, succs in mapping.iteritems():
            if not succs:
                ui.debug('histedit: %s is dropped\n' % node.short(prec))
            else:
                ui.debug('histedit: %s is replaced by %s\n' % (
                    node.short(prec), node.short(succs[0])))
                if len(succs) > 1:
                    m = 'histedit:                            %s'
                    for n in succs[1:]:
                        ui.debug(m % node.short(n))

    if not keep:
        if mapping:
            movebookmarks(ui, repo, mapping, state.topmost, ntm)
            # TODO update mq state
        if obsolete.isenabled(repo, obsolete.createmarkersopt):
            markers = []
            # sort by revision number because it sound "right"
            for prec in sorted(mapping, key=repo.changelog.rev):
                succs = mapping[prec]
                markers.append((repo[prec],
                                tuple(repo[s] for s in succs)))
            if markers:
                obsolete.createmarkers(repo, markers)
        else:
            cleanupnode(ui, repo, 'replaced', mapping)

    cleanupnode(ui, repo, 'temp', tmpnodes)
    state.clear()
    if os.path.exists(repo.sjoin('undo')):
        os.unlink(repo.sjoin('undo'))