Exemple #1
0
def _hidenodes(repo, nodes):
    unfi = repo
    if obsolete.isenabled(repo, obsolete.createmarkersopt):
        markers = [(unfi[n], ()) for n in nodes]
        obsolete.createmarkers(repo, markers)
    if visibility.tracking(repo):
        visibility.remove(repo, nodes)
Exemple #2
0
def _revive(repo, rev):
    """Brings the given rev back into the repository. Finding it in backup
    bundles if necessary.
    """
    unfi = repo
    try:
        ctx = unfi[rev]
    except error.RepoLookupError:
        # It could either be a revset or a stripped commit.
        pass
    else:
        if obsolete.isenabled(repo, obsolete.createmarkersopt):
            if ctx.obsolete():
                torevive = unfi.set("::%d & obsolete()", ctx.rev())
                obsolete.revive(torevive, operation="reset")
        visibility.add(repo, [ctx.node()])

    try:
        revs = scmutil.revrange(repo, [rev])
        if len(revs) > 1:
            raise error.Abort(_("exactly one revision must be specified"))
        if len(revs) == 1:
            return repo[revs.first()]
    except error.RepoLookupError:
        revs = []

    return _pullbundle(repo, rev)
Exemple #3
0
def smarthide(repo, revhide, revshow, local=False):
    """hides changecontexts and reveals some commits

    tries to connect related hides and shows with obs marker
    when reasonable and correct

    use local to not hide revhides without corresponding revshows
    """
    hidectxs = repo.set(revhide)
    showctxs = repo.set(revshow)
    markers = []
    nodes = []
    for ctx in hidectxs:
        unfi = repo.unfiltered()
        related = set()
        if mutation.enabled(unfi):
            related.update(mutation.allpredecessors(unfi, [ctx.node()]))
            related.update(mutation.allsuccessors(unfi, [ctx.node()]))
        else:
            related.update(obsutil.allpredecessors(unfi.obsstore,
                                                   [ctx.node()]))
            related.update(obsutil.allsuccessors(unfi.obsstore, [ctx.node()]))
        related.intersection_update(x.node() for x in showctxs)
        destinations = [repo[x] for x in related]

        # two primary objectives:
        # 1. correct divergence/nondivergence
        # 2. correct visibility of changesets for the user
        # secondary objectives:
        # 3. useful ui message in hg sl: "Undone to"
        # Design choices:
        # 1-to-1 correspondence is easy
        # 1-to-many correspondence is hard:
        #   it's either divergent A to B, A to C
        #   or split A to B,C
        #   because of undo we don't know which
        #   without complex logic
        # Solution: provide helpful ui message for
        # common and easy case (1 to 1), use simplest
        # correct solution for complex edge case

        if len(destinations) == 1:
            markers.append((ctx, destinations))
            nodes.append(ctx.node())
        elif len(destinations) > 1:  # split
            markers.append((ctx, []))
            nodes.append(ctx.node())
        elif len(destinations) == 0:
            if not local:
                markers.append((ctx, []))
                nodes.append(ctx.node())

    if obsolete.isenabled(repo, obsolete.createmarkersopt):
        obsolete.createmarkers(repo, markers, operation="undo")
    visibility.remove(repo, nodes)
Exemple #4
0
def addpendingobsmarkers(repo, markers):
    if not obsolete.isenabled(repo, obsolete.createmarkersopt):
        return

    with lockmod.lock(repo.svfs, _obsmarkerslockname, timeout=_obsmarkerslocktimeout):
        with repo.svfs.open(_obsmarkerspending, "ab") as f:
            offset = f.tell()
            # offset == 0: new file - add the version header
            data = b"".join(
                obsolete.encodemarkers(markers, offset == 0, obsolete._fm1version)
            )
            f.write(data)
Exemple #5
0
def getsyncingobsmarkers(repo):
    """Transfers any pending obsmarkers, and returns all syncing obsmarkers.

    The caller must hold the backup lock.
    """
    if not obsolete.isenabled(repo, obsolete.createmarkersopt):
        return []

    # Move any new obsmarkers from the pending file to the syncing file
    with lockmod.lock(repo.svfs, _obsmarkerslockname, timeout=_obsmarkerslocktimeout):
        if repo.svfs.exists(_obsmarkerspending):
            with repo.svfs.open(_obsmarkerspending) as f:
                _version, markers = obsolete._readmarkers(f.read())
            with repo.sharedvfs.open(_obsmarkerssyncing, "ab") as f:
                offset = f.tell()
                # offset == 0: new file - add the version header
                data = b"".join(
                    obsolete.encodemarkers(markers, offset == 0, obsolete._fm1version)
                )
                f.write(data)
            repo.svfs.unlink(_obsmarkerspending)

    # Load the syncing obsmarkers
    markers = []
    if repo.sharedvfs.exists(_obsmarkerssyncing):
        with repo.sharedvfs.open(_obsmarkerssyncing) as f:
            _version, markers = obsolete._readmarkers(f.read())

    # developer config: commitcloud.maxsendmarkers
    # set to -1 to disable this completely
    maxsendmarkers = repo.ui.configint("commitcloud", "maxsendmarkers", 500)
    if maxsendmarkers >= 0 and len(markers) > maxsendmarkers:
        # Sending too many markers is unlikely to work.  Just send the most recent
        # ones.
        markers = markers[-maxsendmarkers:]
    return markers
Exemple #6
0
def _mergeobsmarkers(repo, tr, obsmarkers):
    if obsolete.isenabled(repo, obsolete.createmarkersopt):
        tr._commitcloudskippendingobsmarkers = True
        repo.obsstore.add(tr, obsmarkers)
Exemple #7
0
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage, state, tr):
    # Pull all the new heads and any bookmark hashes we don't have. We need to
    # filter cloudrefs before pull as pull doesn't check if a rev is present
    # locally.
    unfi = repo
    newheads = [head for head in cloudrefs.heads if head not in unfi]
    if maxage is not None and maxage >= 0:
        mindate = time.time() - maxage * 86400
        omittedheads = [
            head
            for head in newheads
            if head in cloudrefs.headdates and cloudrefs.headdates[head] < mindate
        ]
        if omittedheads:
            repo.ui.status(_("omitting heads that are older than %d days:\n") % maxage)
            for head in omittedheads:
                headdatestr = util.datestr(util.makedate(cloudrefs.headdates[head]))
                repo.ui.status(_("  %s from %s\n") % (head[:12], headdatestr))
        newheads = [head for head in newheads if head not in omittedheads]
    else:
        omittedheads = []
    omittedbookmarks = []
    omittedremotebookmarks = []

    newvisibleheads = None
    if visibility.tracking(repo):
        localheads = _getheads(repo)
        localheadsset = set(localheads)
        cloudheads = [head for head in cloudrefs.heads if head not in omittedheads]
        cloudheadsset = set(cloudheads)
        if localheadsset != cloudheadsset:
            oldvisibleheads = [
                head
                for head in lastsyncstate.heads
                if head not in lastsyncstate.omittedheads
            ]
            newvisibleheads = util.removeduplicates(
                oldvisibleheads + cloudheads + localheads
            )
            toremove = {
                head
                for head in oldvisibleheads
                if head not in localheadsset or head not in cloudheadsset
            }
            newvisibleheads = [head for head in newvisibleheads if head not in toremove]

    remotebookmarknewnodes = set()
    remotebookmarkupdates = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        (remotebookmarkupdates, remotebookmarknewnodes) = _processremotebookmarks(
            repo, cloudrefs.remotebookmarks, lastsyncstate
        )

    try:
        snapshot = extensions.find("snapshot")
    except KeyError:
        snapshot = None
        addedsnapshots = []
        removedsnapshots = []
        newsnapshots = lastsyncstate.snapshots
    else:
        addedsnapshots = [
            s for s in cloudrefs.snapshots if s not in lastsyncstate.snapshots
        ]
        removedsnapshots = [
            s for s in lastsyncstate.snapshots if s not in cloudrefs.snapshots
        ]
        newsnapshots = cloudrefs.snapshots
        newheads += addedsnapshots

    if remotebookmarknewnodes or newheads:
        # Partition the heads into groups we can pull together.
        headgroups = _partitionheads(
            list(remotebookmarknewnodes) + newheads, cloudrefs.headdates
        )
        _pullheadgroups(repo, remotepath, headgroups)

    omittedbookmarks.extend(
        _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate)
    )

    newremotebookmarks = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        newremotebookmarks, omittedremotebookmarks = _updateremotebookmarks(
            repo, tr, remotebookmarkupdates
        )

    if snapshot:
        with repo.lock(), repo.transaction("sync-snapshots") as tr:
            repo.snapshotlist.update(
                tr, addnodes=addedsnapshots, removenodes=removedsnapshots
            )

    _mergeobsmarkers(repo, tr, cloudrefs.obsmarkers)

    if newvisibleheads is not None:
        visibility.setvisibleheads(repo, [nodemod.bin(n) for n in newvisibleheads])

    # Obsmarker sharing is unreliable.  Some of the commits that should now
    # be visible might be hidden still, and some commits that should be
    # hidden might still be visible.  Create local obsmarkers to resolve
    # this.
    if obsolete.isenabled(repo, obsolete.createmarkersopt) and not repo.ui.configbool(
        "mutation", "proxy-obsstore"
    ):
        unfi = repo
        # Commits that are only visible in the cloud are commits that are
        # ancestors of the cloud heads but are hidden locally.
        cloudvisibleonly = list(
            unfi.set(
                "not public() & ::%ls & hidden()",
                [head for head in cloudrefs.heads if head not in omittedheads],
            )
        )
        # Commits that are only hidden in the cloud are commits that are
        # ancestors of the previous cloud heads that are not ancestors of the
        # current cloud heads, but have not been hidden or obsoleted locally.
        cloudhiddenonly = list(
            unfi.set(
                "(not public() & ::%ls) - (not public() & ::%ls) - hidden() - obsolete()",
                [
                    head
                    for head in lastsyncstate.heads
                    if head not in lastsyncstate.omittedheads
                ],
                [head for head in cloudrefs.heads if head not in omittedheads],
            )
        )
        if cloudvisibleonly or cloudhiddenonly:
            msg = _(
                "detected obsmarker inconsistency (fixing by obsoleting [%s] and reviving [%s])\n"
            ) % (
                ", ".join([nodemod.short(ctx.node()) for ctx in cloudhiddenonly]),
                ", ".join([nodemod.short(ctx.node()) for ctx in cloudvisibleonly]),
            )
            repo.ui.log("commitcloud_sync", msg)
            repo.ui.warn(msg)
            repo._commitcloudskippendingobsmarkers = True
            with repo.lock():
                obsolete.createmarkers(repo, [(ctx, ()) for ctx in cloudhiddenonly])
                obsolete.revive(cloudvisibleonly)
            repo._commitcloudskippendingobsmarkers = False

    # We have now synced the repo to the cloud version.  Store this.
    logsyncop(
        repo,
        "from_cloud",
        cloudrefs.version,
        lastsyncstate.heads,
        cloudrefs.heads,
        lastsyncstate.bookmarks,
        cloudrefs.bookmarks,
        lastsyncstate.remotebookmarks,
        newremotebookmarks,
        lastsyncstate.snapshots,
        newsnapshots,
    )
    lastsyncstate.update(
        tr,
        newversion=cloudrefs.version,
        newheads=cloudrefs.heads,
        newbookmarks=cloudrefs.bookmarks,
        newremotebookmarks=newremotebookmarks,
        newmaxage=maxage,
        newomittedheads=omittedheads,
        newomittedbookmarks=omittedbookmarks,
        newomittedremotebookmarks=omittedremotebookmarks,
        newsnapshots=newsnapshots,
    )

    # Also update backup state.  These new heads are already backed up,
    # otherwise the server wouldn't have told us about them.
    state.update([nodemod.bin(head) for head in newheads], tr)
Exemple #8
0
def unamend(ui, repo, **opts):
    """undo the last amend operation on the current commit

    Reverse the effects of an :hg:`amend` operation. Hides the current commit
    and checks out the previous version of the commit. :hg:`unamend` does not
    revert the state of the working copy, so changes that were added to the
    commit in the last amend operation become pending changes in the working
    copy.

    :hg:`unamend` cannot be run on amended commits that have children. In
    other words, you cannot unamend an amended commit in the middle of a
    stack.

    .. note::

        Running :hg:`unamend` is similar to running :hg:`undo --keep`
        immediately after :hg:`amend`. However, unlike :hg:`undo`, which can
        only undo an amend if it was the last operation you performed,
        :hg:`unamend` can unamend any draft amended commit in the graph that
        does not have children.

    .. container:: verbose

      Although :hg:`unamend` is typically used to reverse the effects of
      :hg:`amend`, it actually rolls back the current commit to its previous
      version, regardless of whether the changes resulted from an :hg:`amend`
      operation or from another operation, such as :hg:`rebase`.
    """
    unfi = repo

    # identify the commit from which to unamend
    curctx = repo["."]

    # identify the commit to which to unamend
    if mutation.enabled(repo):
        prednodes = curctx.mutationpredecessors()
        if not prednodes:
            prednodes = []
    else:
        prednodes = [marker.prednode() for marker in predecessormarkers(curctx)]

    if len(prednodes) != 1:
        e = _("changeset must have one predecessor, found %i predecessors")
        raise error.Abort(e % len(prednodes))
    prednode = prednodes[0]

    if prednode not in unfi:
        # Trigger autopull.
        autopull.trypull(unfi, [nodemod.hex(prednode)])

    predctx = unfi[prednode]

    if curctx.children():
        raise error.Abort(_("cannot unamend in the middle of a stack"))

    with repo.wlock(), repo.lock():
        ctxbookmarks = curctx.bookmarks()
        changedfiles = []
        wctx = repo[None]
        wm = wctx.manifest()
        cm = predctx.manifest()
        dirstate = repo.dirstate
        diff = cm.diff(wm)
        changedfiles.extend(pycompat.iterkeys(diff))

        tr = repo.transaction("unamend")
        with dirstate.parentchange():
            dirstate.rebuild(prednode, cm, changedfiles)
            # we want added and removed files to be shown
            # properly, not with ? and ! prefixes
            for filename, data in pycompat.iteritems(diff):
                if data[0][0] is None:
                    dirstate.add(filename)
                if data[1][0] is None:
                    dirstate.remove(filename)
        changes = []
        for book in ctxbookmarks:
            changes.append((book, prednode))
        repo._bookmarks.applychanges(repo, tr, changes)
        if obsolete.isenabled(repo, obsolete.createmarkersopt):
            obsolete.createmarkers(repo, [(curctx, (predctx,))])
        visibility.remove(repo, [curctx.node()])
        visibility.add(repo, [predctx.node()])
        tr.close()
Exemple #9
0
def hide(ui, repo, *revs, **opts):
    """hide commits and their descendants

    Mark the specified commits as hidden. Hidden commits are not included in
    the output of most Mercurial commands, including :hg:`log` and
    :hg:`smartlog.` Any descendants of the specified commits will also be
    hidden.

    Hidden commits are not deleted. They will remain in the repo indefinitely
    and are still accessible by their hashes. However, :hg:`hide` will delete
    any bookmarks pointing to hidden commits.

    Use the :hg:`unhide` command to make hidden commits visible again. See
    :hg:`help unhide` for more information.

    To view hidden commits, run :hg:`journal`.

    When you hide the current commit, the most recent visible ancestor is
    checked out.

    To hide obsolete stacks (stacks that have a newer version), run
    :hg:`hide --cleanup`. This command is equivalent to:

    :hg:`hide 'obsolete() - ancestors(draft() & not obsolete())'`

    --cleanup skips obsolete commits with non-obsolete descendants.
    """
    if opts.get("cleanup") and len(opts.get("rev") + list(revs)) != 0:
        raise error.Abort(_("--rev and --cleanup are incompatible"))
    elif opts.get("cleanup"):
        # hides all the draft, obsolete commits that
        # don't have non-obsolete descendants
        revs = ["obsolete() - (draft() & ::(draft() & not obsolete()))"]
    else:
        revs = list(revs) + opts.pop("rev", [])

    with repo.wlock(), repo.lock(), repo.transaction("hide") as tr:
        revs = repo.revs("(%ld)::", scmutil.revrange(repo, revs))

        bookmarks = set(opts.get("bookmark", ()))
        if bookmarks:
            revs += bookmarksmod.reachablerevs(repo, bookmarks)
            if not revs:
                # No revs are reachable exclusively from these bookmarks, just
                # delete the bookmarks.
                if not ui.quiet:
                    for bookmark in sorted(bookmarks):
                        ui.status(
                            _("removing bookmark '%s' (was at: %s)\n")
                            % (bookmark, short(repo._bookmarks[bookmark]))
                        )
                bookmarksmod.delete(repo, tr, bookmarks)
                ui.status(
                    _n(
                        "%i bookmark removed\n",
                        "%i bookmarks removed\n",
                        len(bookmarks),
                    )
                    % len(bookmarks)
                )
                return 0

        if not revs:
            raise error.Abort(_("nothing to hide"))

        hidectxs = [repo[r] for r in revs]

        # revs to be hidden
        for ctx in hidectxs:
            if not ctx.mutable():
                raise error.Abort(
                    _("cannot hide immutable changeset: %s") % ctx,
                    hint="see 'hg help phases' for details",
                )
            if not ui.quiet:
                ui.status(
                    _('hiding commit %s "%s"\n')
                    % (ctx, ctx.description().split("\n")[0][:50])
                )

        wdp = repo["."]
        newnode = wdp

        while newnode in hidectxs:
            newnode = newnode.parents()[0]

        if newnode.node() != wdp.node():
            cmdutil.bailifchanged(repo, merge=False)
            hg.update(repo, newnode, False)
            ui.status(
                _("working directory now at %s\n") % ui.label(str(newnode), "node")
            )

        # create markers
        if obsolete.isenabled(repo, obsolete.createmarkersopt):
            obsolete.createmarkers(repo, [(r, []) for r in hidectxs], operation="hide")
        visibility.remove(repo, [c.node() for c in hidectxs])
        ui.status(
            _n("%i changeset hidden\n", "%i changesets hidden\n", len(hidectxs))
            % len(hidectxs)
        )

        # remove bookmarks pointing to hidden changesets
        hnodes = [r.node() for r in hidectxs]
        deletebookmarks = set(bookmarks)
        for bookmark, node in sorted(bookmarksmod.listbinbookmarks(repo)):
            if node in hnodes:
                deletebookmarks.add(bookmark)
        if deletebookmarks:
            for bookmark in sorted(deletebookmarks):
                if not ui.quiet:
                    ui.status(
                        _('removing bookmark "%s (was at: %s)"\n')
                        % (bookmark, short(repo._bookmarks[bookmark]))
                    )
            bookmarksmod.delete(repo, tr, deletebookmarks)
            ui.status(
                _n(
                    "%i bookmark removed\n",
                    "%i bookmarks removed\n",
                    len(deletebookmarks),
                )
                % len(deletebookmarks)
            )
        hintutil.trigger("undo")
Exemple #10
0
def _dounhide(repo, revs):
    unfi = repo
    if obsolete.isenabled(repo, obsolete.createmarkersopt):
        ctxs = unfi.set("not public() & ::(%ld) & obsolete()", revs)
        obsolete.revive(ctxs, operation="unhide")
    visibility.add(repo, [unfi[r].node() for r in revs])
Exemple #11
0
def split(ui, repo, *revs, **opts):
    """split a changeset into smaller changesets

    Prompt for hunks to be selected until exhausted. Each selection of hunks
    will form a separate changeset, in order from parent to child: the first
    selection will form the first changeset, the second selection will form
    the second changeset, and so on.

    Operates on the current revision by default. Use --rev to split a given
    changeset instead.
    """
    newcommits = []

    revarg = (list(revs) + opts.get("rev")) or ["."]
    if len(revarg) != 1:
        msg = _("more than one revset is given")
        hnt = _(
            "use either `hg split <rs>` or `hg split --rev <rs>`, not both")
        raise error.Abort(msg, hint=hnt)

    rev = scmutil.revsingle(repo, revarg[0])
    if opts.get("no_rebase"):
        torebase = ()
    else:
        torebase = list(
            map(hex, repo.nodes("descendants(%d) - (%d)", rev, rev)))

    with repo.wlock(), repo.lock():
        cmdutil.bailifchanged(repo)
        if torebase:
            cmdutil.checkunfinished(repo)
        ctx = repo[rev]
        r = ctx.hex()
        allowunstable = visibility.tracking(repo) or obsolete.isenabled(
            repo, obsolete.allowunstableopt)
        if not allowunstable:
            # XXX We should check head revs
            if repo.revs("(%d::) - %d", rev, rev):
                raise error.Abort(
                    _("cannot split commit: %s not a head") % ctx)

        if len(ctx.parents()) > 1:
            raise error.Abort(_("cannot split merge commits"))
        prev = ctx.p1()
        bmupdate = common.bookmarksupdater(repo, ctx.node())
        bookactive = repo._activebookmark
        if bookactive is not None:
            repo.ui.status(_("(leaving bookmark %s)\n") % repo._activebookmark)
        bookmarks.deactivate(repo)
        hg.update(repo, prev)

        commands.revert(ui, repo, rev=r, all=True)

        def haschanges():
            modified, added, removed, deleted = repo.status()[:4]
            return modified or added or removed or deleted

        # We need to detect the case where the user selects all remaining
        # changes, as that will end the split.  That's the commit we want to
        # mark as the result of the split.  To do this, wrap the recordfilter
        # function and compare the output to see if it contains all the
        # originalchunks.
        shouldrecordmutation = [False]

        def mutinfo(extra):
            if shouldrecordmutation[0]:
                return mutation.record(
                    repo,
                    extra,
                    [ctx.node()],
                    "split",
                    splitting=[c.node() for c in newcommits],
                )

        def recordfilter(ui, originalchunks, operation=None):
            chunks, newopts = cmdutil.recordfilter(ui, originalchunks,
                                                   operation)
            if cmdutil.comparechunks(chunks, originalchunks):
                shouldrecordmutation[0] = True
            return chunks, newopts

        msg = ("HG: This is the original pre-split commit message. "
               "Edit it as appropriate.\n\n")
        msg += ctx.description()
        opts["message"] = msg
        opts["edit"] = True
        opts["_commitmutinfofunc"] = mutinfo
        try:
            while haschanges():
                pats = ()
                with repo.transaction("split"):
                    cmdutil.dorecord(ui, repo, commands.commit, "commit",
                                     False, recordfilter, *pats, **opts)
                # TODO: Does no seem like the best way to do this
                # We should make dorecord return the newly created commit
                newcommits.append(repo["."])
                if haschanges():
                    if ui.prompt("Done splitting? [yN]", default="n") == "y":
                        shouldrecordmutation[0] = True
                        with repo.transaction("split"):
                            commands.commit(ui, repo, **opts)
                        newcommits.append(repo["."])
                        break
                else:
                    ui.status(_("no more change to split\n"))
        except Exception:
            # Rollback everything
            hg.updaterepo(repo, r, True)  # overwrite=True
            if newcommits:
                visibility.remove(repo, [c.node() for c in newcommits])

            if bookactive is not None:
                bookmarks.activate(repo, bookactive)
            raise

        if newcommits:
            phabdiffs = {}
            for c in newcommits:
                phabdiff = diffprops.parserevfromcommitmsg(
                    repo[c].description())
                if phabdiff:
                    phabdiffs.setdefault(phabdiff, []).append(c)
            if any(len(commits) > 1 for commits in phabdiffs.values()):
                hintutil.trigger("split-phabricator",
                                 ui.config("split", "phabricatoradvice"))

            tip = repo[newcommits[-1]]
            with repo.transaction("post-split"):
                bmupdate(tip.node())
                if bookactive is not None:
                    bookmarks.activate(repo, bookactive)
                if obsolete.isenabled(repo, obsolete.createmarkersopt):
                    obsolete.createmarkers(repo, [(repo[r], newcommits)],
                                           operation="split")
            if torebase:
                rebaseopts = {"dest": "_destrestack(SRC)", "rev": torebase}
                rebase.rebase(ui, repo, **rebaseopts)
            unfi = repo
            with repo.transaction("post-split-hide"):
                visibility.remove(repo, [unfi[r].node()])
Exemple #12
0
def _dopull(orig, ui, repo, source="default", **opts):
    # Copy paste from `pull` command
    source, branches = hg.parseurl(ui.expandpath(source), opts.get("branch"))

    scratchbookmarks = {}
    unfi = repo.unfiltered()
    unknownnodes = []
    pullbookmarks = opts.get("bookmark") or []
    for rev in opts.get("rev", []):
        if repo._scratchbranchmatcher.match(rev):
            # rev is a scratch bookmark, treat it as a bookmark
            pullbookmarks.append(rev)
        elif rev not in unfi:
            unknownnodes.append(rev)
    if pullbookmarks:
        realbookmarks = []
        revs = opts.get("rev") or []
        for bookmark in pullbookmarks:
            if repo._scratchbranchmatcher.match(bookmark):
                # rev is not known yet
                # it will be fetched with listkeyspatterns next
                scratchbookmarks[bookmark] = "REVTOFETCH"
            else:
                realbookmarks.append(bookmark)

        if scratchbookmarks:
            other = hg.peer(repo, opts, source)
            fetchedbookmarks = other.listkeyspatterns(
                "bookmarks", patterns=scratchbookmarks)
            for bookmark in scratchbookmarks:
                if bookmark not in fetchedbookmarks:
                    raise error.Abort("remote bookmark %s not found!" %
                                      bookmark)
                scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
                revs.append(fetchedbookmarks[bookmark])
        opts["bookmark"] = realbookmarks
        opts["rev"] = [rev for rev in revs if rev not in scratchbookmarks]

    # Pulling revisions that were filtered results in a error.
    # Let's revive them.
    unfi = repo.unfiltered()
    torevive = []
    for rev in opts.get("rev", []):
        try:
            repo[rev]
        except error.FilteredRepoLookupError:
            torevive.append(rev)
        except error.RepoLookupError:
            pass
    if obsolete.isenabled(repo, obsolete.createmarkersopt):
        obsolete.revive([unfi[r] for r in torevive])
    visibility.add(repo, [unfi[r].node() for r in torevive])

    if scratchbookmarks or unknownnodes:
        # Set anyincoming to True
        extensions.wrapfunction(discovery, "findcommonincoming",
                                _findcommonincoming)
    try:
        # Remote scratch bookmarks will be deleted because remotenames doesn't
        # know about them. Let's save it before pull and restore after
        remotescratchbookmarks = bookmarks.readremotebookmarks(
            ui, repo, source)
        result = orig(ui, repo, source, **opts)
        # TODO(stash): race condition is possible
        # if scratch bookmarks was updated right after orig.
        # But that's unlikely and shouldn't be harmful.
        with repo.wlock(), repo.lock(), repo.transaction("pull"):
            if bookmarks.remotebookmarksenabled(ui):
                remotescratchbookmarks.update(scratchbookmarks)
                bookmarks.saveremotebookmarks(repo, remotescratchbookmarks,
                                              source)
            else:
                bookmarks.savelocalbookmarks(repo, scratchbookmarks)
        return result
    finally:
        if scratchbookmarks:
            extensions.unwrapfunction(discovery, "findcommonincoming")
Exemple #13
0
def revealcommits(repo, rev):
    ctxs = list(repo.set(rev))
    if obsolete.isenabled(repo, obsolete.createmarkersopt):
        obsolete.revive(ctxs)
    visibility.add(repo, [ctx.node() for ctx in ctxs])
Exemple #14
0
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage,
                       state, tr):
    pullcmd, pullopts = ccutil.getcommandandoptions("pull|pul")

    try:
        remotenames = extensions.find("remotenames")
    except KeyError:
        remotenames = None

    # Pull all the new heads and any bookmark hashes we don't have. We need to
    # filter cloudrefs before pull as pull doesn't check if a rev is present
    # locally.
    unfi = repo.unfiltered()
    newheads = [head for head in cloudrefs.heads if head not in unfi]
    if maxage is not None and maxage >= 0:
        mindate = time.time() - maxage * 86400
        omittedheads = [
            head for head in newheads if head in cloudrefs.headdates
            and cloudrefs.headdates[head] < mindate
        ]
        if omittedheads:
            repo.ui.status(
                _("omitting heads that are older than %d days:\n") % maxage)
            for head in omittedheads:
                headdatestr = util.datestr(
                    util.makedate(cloudrefs.headdates[head]))
                repo.ui.status(_("  %s from %s\n") % (head[:12], headdatestr))
        newheads = [head for head in newheads if head not in omittedheads]
    else:
        omittedheads = []
    omittedbookmarks = []

    newvisibleheads = None
    if visibility.tracking(repo):
        localheads = _getheads(repo)
        localheadsset = set(localheads)
        cloudheads = [
            head for head in cloudrefs.heads if head not in omittedheads
        ]
        cloudheadsset = set(cloudheads)
        if localheadsset != cloudheadsset:
            oldvisibleheads = [
                head for head in lastsyncstate.heads
                if head not in lastsyncstate.omittedheads
            ]
            newvisibleheads = util.removeduplicates(oldvisibleheads +
                                                    cloudheads + localheads)
            toremove = {
                head
                for head in oldvisibleheads
                if head not in localheadsset or head not in cloudheadsset
            }
            newvisibleheads = [
                head for head in newvisibleheads if head not in toremove
            ]

    remotebookmarknodes = []
    newremotebookmarks = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        newremotebookmarks = _processremotebookmarks(repo,
                                                     cloudrefs.remotebookmarks,
                                                     lastsyncstate)

        # Pull public commits, which remote bookmarks point to, if they are not
        # present locally.
        for node in newremotebookmarks.values():
            if node not in unfi:
                remotebookmarknodes.append(node)

    try:
        snapshot = extensions.find("snapshot")
    except KeyError:
        snapshot = None
        addedsnapshots = []
        removedsnapshots = []
        newsnapshots = lastsyncstate.snapshots
    else:
        addedsnapshots = [
            s for s in cloudrefs.snapshots if s not in lastsyncstate.snapshots
        ]
        removedsnapshots = [
            s for s in lastsyncstate.snapshots if s not in cloudrefs.snapshots
        ]
        newsnapshots = cloudrefs.snapshots

    # TODO(alexeyqu): pull snapshots separately
    newheads += addedsnapshots

    backuplock.progresspulling(repo, [nodemod.bin(node) for node in newheads])

    if remotebookmarknodes or newheads:
        # Partition the heads into groups we can pull together.
        headgroups = ([remotebookmarknodes] if remotebookmarknodes else
                      []) + _partitionheads(newheads, cloudrefs.headdates)

        def disabled(*args, **kwargs):
            pass

        # Disable pulling of obsmarkers
        wrapobs = extensions.wrappedfunction(exchange, "_pullobsolete",
                                             disabled)

        # Disable pulling of bookmarks
        wrapbook = extensions.wrappedfunction(exchange, "_pullbookmarks",
                                              disabled)

        # Disable pulling of remote bookmarks
        if remotenames:
            wrapremotenames = extensions.wrappedfunction(
                remotenames, "pullremotenames", disabled)
        else:
            wrapremotenames = util.nullcontextmanager()

        # Disable automigration and prefetching of trees
        configoverride = repo.ui.configoverride(
            {
                ("pull", "automigrate"): False,
                ("treemanifest", "pullprefetchrevs"): ""
            },
            "cloudsyncpull",
        )

        prog = progress.bar(repo.ui,
                            _("pulling from commit cloud"),
                            total=len(headgroups))
        with wrapobs, wrapbook, wrapremotenames, configoverride, prog:
            for index, headgroup in enumerate(headgroups):
                headgroupstr = " ".join([head[:12] for head in headgroup])
                repo.ui.status(_("pulling %s\n") % headgroupstr)
                prog.value = (index, headgroupstr)
                pullopts["rev"] = headgroup
                pullcmd(repo.ui, repo, remotepath, **pullopts)
                repo.connectionpool.close()

    omittedbookmarks.extend(
        _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate))

    if _isremotebookmarkssyncenabled(repo.ui):
        _updateremotebookmarks(repo, tr, newremotebookmarks)

    if snapshot:
        with repo.lock(), repo.transaction("sync-snapshots") as tr:
            repo.snapshotlist.update(tr,
                                     addnodes=addedsnapshots,
                                     removenodes=removedsnapshots)

    _mergeobsmarkers(repo, tr, cloudrefs.obsmarkers)

    if newvisibleheads is not None:
        visibility.setvisibleheads(repo,
                                   [nodemod.bin(n) for n in newvisibleheads])

    # Obsmarker sharing is unreliable.  Some of the commits that should now
    # be visible might be hidden still, and some commits that should be
    # hidden might still be visible.  Create local obsmarkers to resolve
    # this.
    if obsolete.isenabled(repo, obsolete.createmarkersopt):
        unfi = repo.unfiltered()
        # Commits that are only visible in the cloud are commits that are
        # ancestors of the cloud heads but are hidden locally.
        cloudvisibleonly = list(
            unfi.set(
                "not public() & ::%ls & hidden()",
                [head for head in cloudrefs.heads if head not in omittedheads],
            ))
        # Commits that are only hidden in the cloud are commits that are
        # ancestors of the previous cloud heads that are not ancestors of the
        # current cloud heads, but have not been hidden or obsoleted locally.
        cloudhiddenonly = list(
            unfi.set(
                "(not public() & ::%ls) - (not public() & ::%ls) - hidden() - obsolete()",
                [
                    head for head in lastsyncstate.heads
                    if head not in lastsyncstate.omittedheads
                ],
                [head for head in cloudrefs.heads if head not in omittedheads],
            ))
        if cloudvisibleonly or cloudhiddenonly:
            msg = _(
                "detected obsmarker inconsistency (fixing by obsoleting [%s] and reviving [%s])\n"
            ) % (
                ", ".join(
                    [nodemod.short(ctx.node()) for ctx in cloudhiddenonly]),
                ", ".join(
                    [nodemod.short(ctx.node()) for ctx in cloudvisibleonly]),
            )
            repo.ui.log("commitcloud_sync", msg)
            repo.ui.warn(msg)
            repo._commitcloudskippendingobsmarkers = True
            with repo.lock():
                obsolete.createmarkers(repo,
                                       [(ctx, ()) for ctx in cloudhiddenonly])
                obsolete.revive(cloudvisibleonly)
            repo._commitcloudskippendingobsmarkers = False

    # We have now synced the repo to the cloud version.  Store this.
    logsyncop(
        repo,
        "from_cloud",
        cloudrefs.version,
        lastsyncstate.heads,
        cloudrefs.heads,
        lastsyncstate.bookmarks,
        cloudrefs.bookmarks,
        lastsyncstate.remotebookmarks,
        newremotebookmarks,
        lastsyncstate.snapshots,
        newsnapshots,
    )
    lastsyncstate.update(
        tr,
        cloudrefs.version,
        cloudrefs.heads,
        cloudrefs.bookmarks,
        omittedheads,
        omittedbookmarks,
        maxage,
        newremotebookmarks,
        newsnapshots,
    )

    # Also update backup state.  These new heads are already backed up,
    # otherwise the server wouldn't have told us about them.
    state.update([nodemod.bin(head) for head in newheads], tr)
Exemple #15
0
def clearsyncingobsmarkers(repo):
    """Clears all syncing obsmarkers.  The caller must hold the backup lock."""
    if not obsolete.isenabled(repo, obsolete.createmarkersopt):
        return

    repo.sharedvfs.tryunlink(_obsmarkerssyncing)