Exemplo n.º 1
0
def smartlogrevset(repo, subset, x):
    """``smartlog([heads], [master])``
    Changesets relevent to you.

    'heads' overrides what feature branches to include.
    (default: 'interestingbookmarks() + heads(draft()) + .')

    'master' is the head of the public branch.
    (default: 'interestingmaster()')
    """
    args = revset.getargsdict(x, "smartlogrevset", "heads master")
    if "master" in args:
        masterset = revset.getset(repo, subset, args["master"])
    else:
        masterset = repo.revs("interestingmaster()")

    if "heads" in args:
        heads = set(revset.getset(repo, subset, args["heads"]))
    else:
        heads = set(repo.revs("interestingbookmarks() + heads(draft()) + ."))

    # Remove "null" commit. "::x" does not support it.
    masterset -= smartset.baseset([nodemod.nullrev])
    if nodemod.nullrev in heads:
        heads.remove(nodemod.nullrev)
    # Explicitly disable revnum deprecation warnings.
    with repo.ui.configoverride({("devel", "legacy.revnum:real"): ""}):
        # Select ancestors that are draft.
        drafts = repo.revs("draft() & ::%ld", heads)
        # Include parents of drafts, and public heads.
        revs = repo.revs(
            "parents(%ld) + %ld + %ld + %ld", drafts, drafts, heads, masterset
        )

    # Include the ancestor of above commits to make the graph connected.
    #
    # When calculating ancestors, filter commits using 'public()' to reduce the
    # number of commits to calculate. This is sound because the above logic
    # includes p1 of draft commits, and assume master is public. Practically,
    # this optimization can make a 3x difference.
    revs = smartset.baseset(repo.revs("ancestor(%ld & public()) + %ld", revs, revs))

    # Collapse long obsoleted stack - only keep their heads and roots.
    # This is incompatible with automation (namely, nuclide-core) yet.
    if repo.ui.configbool("smartlog", "collapse-obsolete") and not repo.ui.plain():
        obsrevs = smartset.baseset(repo.revs("%ld & obsolete()", revs))
        hiderevs = smartset.baseset(
            repo.revs("%ld - (heads(%ld) + roots(%ld))", obsrevs, obsrevs, obsrevs)
        )
        revs = repo.revs("%ld - %ld", revs, hiderevs)

    return subset & revs
Exemplo n.º 2
0
def lastsubmitted(repo, subset, x):
    revs = revset.getset(repo, revset.fullreposet(repo), x)
    phabrevs = set()
    for rev in revs:
        phabrev = diffprops.parserevfromcommitmsg(repo[rev].description())
        if phabrev is None:
            mess = _(
                "local changeset is not associated with a differential revision"
            )
            raise error.Abort(mess)
        phabrevs.add(phabrev)

    resultrevs = set()
    for phabrev in phabrevs:
        diffrev = _differentialhash(repo.ui, repo, phabrev)
        if diffrev is None or not isinstance(diffrev,
                                             dict) or "hash" not in diffrev:
            mess = _("unable to determine previous changeset hash")
            raise error.Abort(mess)

        lasthash = str(diffrev["hash"])
        _maybepull(repo, lasthash)
        resultrevs.add(repo[lasthash].rev())

    return subset & smartset.baseset(sorted(resultrevs), repo=repo)
Exemplo n.º 3
0
def getstablerev(repo, subset, x):
    """Returns the "stable" revision.

    The script to run is set via config::

      [stablerev]
      script = scripts/get_stable_rev.py

    The revset takes an optional "target" argument that is passed to the
    script (as `--target $TARGET`). This argumement can be made `optional`,
    `required`, or `forbidden`::

      [stablerev]
      targetarg = forbidden

    The revset can automatically pull if the returned commit doesn't exist
    locally::

      [stablerev]
      pullonmissing = False
    """
    ui = repo.ui
    target = None
    args = getargsdict(x, "getstablerev", "target")
    if "target" in args:
        target = getstring(args["target"],
                           _("target argument must be a string"))

    _validatetarget(ui, target)
    revspec = _executeandparse(ui, repo, target)
    trypull = ui.configbool("stablerev", "pullonmissing", False)
    commitctx = _lookup(ui, repo, revspec, trypull=trypull)

    return subset & baseset([commitctx.rev()])
Exemplo n.º 4
0
def gitnode(repo, subset, x):
    """``gitnode(id)``
    Return the hg revision corresponding to a given git rev."""
    l = revset.getargs(x, 1, 1, _("id requires one argument"))
    n = revset.getstring(l[0], _("id requires a string"))

    reponame = repo.ui.config("fbscmquery", "reponame")
    if not reponame:
        # We don't know who we are, so we can't ask for a translation
        return subset.filter(lambda r: False)
    backingrepos = repo.ui.configlist("fbscmquery", "backingrepos", default=[reponame])

    lasterror = None
    hghash = None
    for backingrepo in backingrepos:
        try:
            client = graphql.Client(repo=repo)
            hghash = client.getmirroredrev(backingrepo, "git", reponame, "hg", n)
            if hghash != "":
                break
        except Exception as ex:
            lasterror = ex

    if not hghash:
        if lasterror:
            repo.ui.warn(
                ("Could not translate revision {0}: {1}\n".format(n, lasterror))
            )
        else:
            repo.ui.warn(("Could not translate revision {0}\n".format(n)))
        return subset.filter(lambda r: False)

    rn = repo[node.bin(hghash)].rev()
    return subset & smartset.baseset([rn])
Exemplo n.º 5
0
def _oldworkingcopyparent(repo, subset, x):
    """``oldworkingcopyparent([index])``
    previous working copy parent

    'index' is how many undoable commands you want to look back.  See 'hg undo'.
    """
    args = revset.getargsdict(x, "oldoworkingcopyrevset", "reverseindex")
    reverseindex = revsetlang.getinteger(
        args.get("reverseindex"), _("index must be a positive interger"), 1)
    revs = _getoldworkingcopyparent(repo, reverseindex)
    return subset & smartset.baseset(revs)
Exemplo n.º 6
0
def backedup(repo, subset, x):
    """draft changesets that have been backed up to Commit Cloud"""
    path = ccutil.getnullableremotepath(repo.ui)
    if not path:
        return smartset.baseset(repo=repo)
    heads = backupstate.BackupState(repo, path).heads
    cl = repo.changelog
    if cl.algorithmbackend == "segments":
        backedup = repo.dageval(lambda: draft() & ancestors(heads))
        return subset & cl.torevset(backedup)
    backedup = repo.revs("not public() and ::%ln", heads)
    return smartset.filteredset(subset & repo.revs("draft()"), lambda r: r in backedup)
Exemplo n.º 7
0
def _cachedgetoldworkingcopyparent(repo, wkpnode):
    if not util.safehasattr(repo, "_undooldworkingparentcache"):
        repo._undooldworkingparentcache = {}
    cache = repo._undooldworkingparentcache
    key = wkpnode
    if key not in cache:
        oldworkingparent = _readnode(repo, "workingparent.i", wkpnode)
        oldworkingparent = filter(None, oldworkingparent.split("\n"))
        oldwkprevstring = revsetlang.formatspec("%ls", oldworkingparent)
        urepo = repo.unfiltered()
        cache[key] = smartset.baseset(urepo.revs(oldwkprevstring))
    return cache[key]
Exemplo n.º 8
0
def _destrestack(repo, subset, x):
    """restack destination for given single source revision"""
    unfi = repo.unfiltered()
    obsoleted = unfi.revs("obsolete()")
    getparents = unfi.changelog.parentrevs
    getphase = unfi._phasecache.phase
    nodemap = unfi.changelog.nodemap

    src = revset.getset(repo, subset, x).first()

    # Empty src or already obsoleted - Do not return a destination
    if not src or src in obsoleted:
        return smartset.baseset()

    # Find the obsoleted "base" by checking source's parent recursively
    base = src
    while base not in obsoleted:
        base = getparents(base)[0]
        # When encountering a public revision which cannot be obsoleted, stop
        # the search early and return no destination. Do the same for nullrev.
        if getphase(repo, base) == phases.public or base == nullrev:
            return smartset.baseset()

    # Find successors for given base
    # NOTE: Ideally we can use obsutil.successorssets to detect divergence
    # case. However it does not support cycles (unamend) well. So we use
    # allsuccessors and pick non-obsoleted successors manually as a workaround.
    basenode = repo[base].node()
    if mutation.enabled(repo):
        succnodes = mutation.allsuccessors(repo, [basenode])
    else:
        succnodes = obsutil.allsuccessors(repo.obsstore, [basenode])
    succnodes = [
        n for n in succnodes
        if (n != basenode and n in nodemap and nodemap[n] not in obsoleted)
    ]

    # In case of a split, only keep its heads
    succrevs = list(unfi.revs("heads(%ln)", succnodes))

    if len(succrevs) == 0:
        # Prune - Find the first non-obsoleted ancestor
        while base in obsoleted:
            base = getparents(base)[0]
            if base == nullrev:
                # Root node is pruned. The new base (destination) is the
                # virtual nullrev.
                return smartset.baseset([nullrev])
        return smartset.baseset([base])
    elif len(succrevs) == 1:
        # Unique visible successor case - A valid destination
        return smartset.baseset([succrevs[0]])
    else:
        # Multiple visible successors - Choose the one with a greater revision
        # number. This is to be compatible with restack old behavior. We might
        # want to revisit it when we introduce the divergence concept to users.
        return smartset.baseset([max(succrevs)])
Exemplo n.º 9
0
def _olddraft(repo, subset, x):
    """``olddraft([index])``
    previous draft commits

    'index' is how many undoable commands you want to look back
    an undoable command is one that changed draft heads, bookmarks
    and or working copy parent.  Note that olddraft uses an absolute index and
    so olddraft(1) represents the state after an hg undo -a and not an hg undo.
    Note: this revset may include hidden commits
    """
    args = revset.getargsdict(x, "olddraftrevset", "reverseindex")
    reverseindex = revsetlang.getinteger(args.get("reverseindex"),
                                         _("index must be a positive integer"),
                                         1)
    revs = _getolddrafts(repo, reverseindex)
    return subset & smartset.baseset(revs)
Exemplo n.º 10
0
def upstream_revs(filt, repo, subset, x):
    upstream_tips = set()
    for remotename in repo._remotenames.keys():
        rname = "remote" + remotename
        try:
            ns = repo.names[rname]
        except KeyError:
            continue
        for name in ns.listnames(repo):
            if filt(splitremotename(name)[0]):
                upstream_tips.update(ns.nodes(repo, name))

    if not upstream_tips:
        return smartset.baseset([], repo=repo)

    tipancestors = repo.revs("::%ln", upstream_tips)
    return smartset.filteredset(subset, lambda n: n in tipancestors)
Exemplo n.º 11
0
def notbackedup(repo, subset, x):
    """changesets that have not yet been backed up to Commit Cloud"""
    path = ccutil.getnullableremotepath(repo.ui)
    if not path:
        # arguably this should return draft(). However, since there is no
        # remote, and no way to do backup, returning an empty set avoids
        # upsetting users with "not backed up" warnings.
        return smartset.baseset(repo=repo)
    heads = backupstate.BackupState(repo, path).heads
    cl = repo.changelog
    if cl.algorithmbackend == "segments":
        notbackedup = repo.dageval(lambda: draft() - ancestors(heads))
        return subset & cl.torevset(notbackedup)
    backedup = repo.revs("not public() and ::%ln", heads)
    return smartset.filteredset(
        subset & repo.revs("not public() - hidden()"), lambda r: r not in backedup
    )
Exemplo n.º 12
0
def interestingheads(repo, subset, x):
    """Set of interesting bookmarks (local and remote)"""
    rev = repo.changelog.rev
    heads = set()
    books = bookmarks.bmstore(repo)
    ignore = re.compile(repo.ui.config("smartlog", "ignorebookmarks", "!"))
    for b in books:
        if not ignore.match(b):
            heads.add(rev(books[b]))

    # add 'interesting' remote bookmarks as well
    if util.safehasattr(repo, "names") and "remotebookmarks" in repo.names:
        ns = repo.names["remotebookmarks"]
        for name in _reposnames(repo.ui):
            nodes = ns.namemap(repo, name)
            if nodes:
                heads.add(rev(nodes[0]))

    return subset & smartset.baseset(heads, repo=repo)
Exemplo n.º 13
0
def cloudremote(repo, subset, x):
    """pull missing known changesets from the remote store

    Currently only for obsoleted commits, can be extended for any commit.
    """

    args = revset.getargs(x, 1, 50,
                          _("cloudremote takes from 1 to up to 50 hex revs"))
    args = [n[1] for n in args]

    try:
        hexnodespulled = missingcloudrevspull(
            repo, [nodemod.bin(nodehex) for nodehex in args])
        return subset & repo.unfiltered().revs("%ls", hexnodespulled)
    except Exception as e:
        repo.ui.status(
            _("unable to pull all changesets from the remote store\n%s\n") % e,
            component="commitcloud",
        )
    return smartset.baseset([])
Exemplo n.º 14
0
def _localbranch(repo, subset, x):
    """``_localbranch(changectx)``
    localbranch changesets

    Returns all commits within the same localbranch as the changeset(s). A local
    branch is all draft changesets that are connected, uninterupted by public
    changesets.  Any draft commit within a branch, or a public commit at the
    base of the branch, can be used to identify localbranches.
    """
    # executed on an filtered repo
    args = revset.getargsdict(x, "branchrevset", "changectx")
    revstring = revsetlang.getstring(
        args.get("changectx"), _("localbranch argument must be a changectx"))
    revs = repo.revs(revstring)
    # we assume that there is only a single rev
    if repo[revs.first()].phase() == phases.public:
        querystring = revsetlang.formatspec("(children(%d) & draft())::",
                                            revs.first())
    else:
        querystring = revsetlang.formatspec("((::%ld) & draft())::", revs)
    return subset & smartset.baseset(repo.revs(querystring))
Exemplo n.º 15
0
def filelogrevset(orig, repo, subset, x):
    """``filelog(pattern)``
    Changesets connected to the specified filelog.

    For performance reasons, ``filelog()`` does not show every changeset
    that affects the requested file(s). See :hg:`help log` for details. For
    a slower, more accurate result, use ``file()``.
    """

    if not shallowrepo.requirement in repo.requirements:
        return orig(repo, subset, x)

    # i18n: "filelog" is a keyword
    pat = revset.getstring(x, _("filelog requires a pattern"))
    m = match.match(repo.root,
                    repo.getcwd(), [pat],
                    default="relpath",
                    ctx=repo[None])
    s = set()

    if not match.patkind(pat):
        # slow
        for r in subset:
            ctx = repo[r]
            cfiles = ctx.files()
            for f in m.files():
                if f in cfiles:
                    s.add(ctx.rev())
                    break
    else:
        # partial
        files = (f for f in repo[None] if m(f))
        for f in files:
            fctx = repo[None].filectx(f)
            s.add(fctx.linkrev())
            for actx in fctx.ancestors():
                s.add(actx.linkrev())

    return smartset.baseset([r for r in subset if r in s])
Exemplo n.º 16
0
def _cachedgetolddrafts(repo, nodedict):
    if not util.safehasattr(repo, "_undoolddraftcache"):
        repo._undoolddraftcache = {}
    cache = repo._undoolddraftcache
    if repo.ui.configbool("experimental", "narrow-heads"):
        headnode = key = nodedict["visibleheads"]
        if key not in cache:
            oldheads = _readnode(repo, "visibleheads.i", headnode).split("\n")
            cache[key] = repo.revs("(not public()) & ::%ls", oldheads)
    else:
        draftnode = nodedict["draftheads"]
        obsnode = nodedict["draftobsolete"]
        key = draftnode + obsnode
        if key not in cache:
            olddraftheads = _readnode(repo, "draftheads.i", draftnode)
            oldheadslist = olddraftheads.split("\n")
            oldobs = _readnode(repo, "draftobsolete.i", obsnode)
            oldobslist = filter(None, oldobs.split("\n"))
            oldlogrevstring = revsetlang.formatspec(
                "(draft() & ancestors(%ls)) - %ls", oldheadslist, oldobslist)
            urepo = repo.unfiltered()
            cache[key] = smartset.baseset(urepo.revs(oldlogrevstring))
    return cache[key]
Exemplo n.º 17
0
def getdag(ui, repo, revs, master):

    knownrevs = set(revs)
    gpcache = {}
    results = []
    reserved = []

    # we store parents together with the parent type information
    # but sometimes we need just a list of parents
    # [(a,b), (c,d), (e,f)] => [b, d, f]
    def unzip(parents):
        if parents:
            return list(list(zip(*parents))[1])
        else:
            return list()

    # For each rev we need to show, compute it's parents in the dag.
    # If we have to reach for a grandparent, insert a fake node so we
    # can show '...' in the graph.
    # Use 'reversed' to start at the lowest commit so fake nodes are
    # placed at their lowest possible positions.
    for rev in reversed(revs):
        ctx = repo[rev]
        # Parents in the dag
        parents = sorted(
            set(
                [
                    (graphmod.PARENT, p.rev())
                    for p in ctx.parents()
                    if p.rev() in knownrevs
                ]
            )
        )
        # Parents not in the dag
        mpars = [
            p.rev()
            for p in ctx.parents()
            if p.rev() != nodemod.nullrev and p.rev() not in unzip(parents)
        ]

        for mpar in mpars:
            gp = gpcache.get(mpar)
            if gp is None:
                gp = gpcache[mpar] = dagop.reachableroots(
                    repo, smartset.baseset(revs), [mpar]
                )
            if not gp:
                parents.append((graphmod.MISSINGPARENT, mpar))
            else:
                gp = [g for g in gp if g not in unzip(parents)]
                for g in gp:
                    parents.append((graphmod.GRANDPARENT, g))

        results.append((ctx.rev(), "C", ctx, parents))

    # Compute parent rev->parents mapping
    lookup = {}
    for r in results:
        lookup[r[0]] = unzip(r[3])

    def parentfunc(node):
        return lookup.get(node, [])

    # Compute the revs on the master line. We use this for sorting later.
    masters = set()
    queue = [master]
    while queue:
        m = queue.pop()
        if m not in masters:
            masters.add(m)
            queue.extend(lookup.get(m, []))

    # Topologically sort the noderev numbers. Note: unlike the vanilla
    # topological sorting, we move master to the top.
    order = sortnodes([r[0] for r in results], parentfunc, masters)
    order = dict((e[1], e[0]) for e in enumerate(order))

    # Sort the actual results based on their position in the 'order'
    try:
        results.sort(key=lambda x: order[x[0]], reverse=True)
    except ValueError:  # Happened when 'order' is empty
        ui.warn(_("smartlog encountered an error\n"), notice=_("note"))
        ui.warn(_("(so the sorting might be wrong.\n\n)"))
        results.reverse()

    # indent the top non-public stack
    if ui.configbool("smartlog", "indentnonpublic", False):
        rev, ch, ctx, parents = results[0]
        if ctx.phase() != phases.public:
            # find a public parent and add a fake node, so the non-public nodes
            # will be shown in the non-first column
            prev = None
            for i in range(1, len(results)):
                pctx = results[i][2]
                if pctx.phase() == phases.public:
                    prev = results[i][0]
                    break
            if prev:
                reserved.append(prev)

    return results, reserved
Exemplo n.º 18
0
def _backup(
    repo,
    backupstate,
    remotepath,
    getconnection,
    revs=None,
):
    """backs up the given revisions to commit cloud

    Returns (backedup, failed), where "backedup" is a revset of the commits that
    were backed up, and "failed" is a revset of the commits that could not be
    backed up.
    """
    unfi = repo

    if revs is None:
        # No revs specified.  Back up all visible commits that are not already
        # backed up.
        revset = "heads(not public() - hidden() - (not public() & ::%ln))"
        heads = unfi.revs(revset, backupstate.heads)
    else:
        # Some revs were specified.  Back up all of those commits that are not
        # already backed up.
        heads = unfi.revs(
            "heads((not public() & ::%ld) - (not public() & ::%ln))",
            revs,
            backupstate.heads,
        )

    if not heads:
        return smartset.baseset(repo=repo), smartset.baseset(repo=repo)

    # Check if any of the heads are already available on the server.
    headnodes = list(unfi.nodes("%ld", heads))
    remoteheadnodes = {
        head
        for head, backedup in zip(
            headnodes,
            dependencies.infinitepush.isbackedupnodes(
                getconnection, [nodemod.hex(n) for n in headnodes]),
        ) if backedup
    }
    if remoteheadnodes:
        backupstate.update(remoteheadnodes)

    heads = unfi.revs("%ld - %ln", heads, remoteheadnodes)

    if not heads:
        return smartset.baseset(repo=repo), smartset.baseset(repo=repo)

    # Filter out any commits that have been marked as bad.
    badnodes = repo.ui.configlist("infinitepushbackup", "dontbackupnodes", [])
    if badnodes:
        badnodes = [node for node in badnodes if node in unfi]
        # The nodes we can't back up are the bad nodes and their descendants,
        # minus any commits that we know are already backed up anyway.
        badnodes = list(
            unfi.nodes(
                "(not public() & ::%ld) & (%ls::) - (not public() & ::%ln)",
                heads,
                badnodes,
                backupstate.heads,
            ))
        if badnodes:
            repo.ui.warn(
                _("not backing up commits marked as bad: %s\n") %
                ", ".join([nodemod.hex(node) for node in badnodes]))
            heads = unfi.revs("heads((not public() & ::%ld) - %ln)", heads,
                              badnodes)

    # Limit the number of heads we backup in a single operation.
    backuplimit = repo.ui.configint("infinitepushbackup", "maxheadstobackup")
    if backuplimit is not None and backuplimit >= 0:
        if len(heads) > backuplimit:
            repo.ui.status(
                _n(
                    "backing up only the most recent %d head\n",
                    "backing up only the most recent %d heads\n",
                    backuplimit,
                ) % backuplimit)
            heads = sorted(heads, reverse=True)[:backuplimit]

    # Back up the new heads.
    backingup = unfi.nodes("(not public() & ::%ld) - (not public() & ::%ln)",
                           heads, backupstate.heads)
    backuplock.progressbackingup(repo, list(backingup))
    with perftrace.trace("Push Backup Bundles"):
        newheads, failedheads = dependencies.infinitepush.pushbackupbundlestacks(
            repo.ui,
            unfi,
            getconnection,
            [nodemod.hex(n) for n in unfi.nodes("%ld", heads)],
        )

    # The commits that got backed up are all the ancestors of the new backup
    # heads, minus any commits that were already backed up at the start.
    backedup = unfi.revs("(not public() & ::%ls) - (not public() & ::%ln)",
                         newheads, backupstate.heads)
    # The commits that failed to get backed up are the ancestors of the failed
    # heads, except for commits that are also ancestors of a successfully backed
    # up head, or commits that were already known to be backed up.
    failed = unfi.revs(
        "(not public() & ::%ls) - (not public() & ::%ls) - (not public() & ::%ln)",
        failedheads,
        newheads,
        backupstate.heads,
    )

    backupstate.update(unfi.nodes("%ld", backedup))

    return backedup, failed