Ejemplo n.º 1
0
def fastlogfollow(orig, repo, subset, x, name, followfirst=False):
    if followfirst:
        # fastlog does not support followfirst=True
        repo.ui.debug("fastlog: not used because 'followfirst' is set\n")
        return orig(repo, subset, x, name, followfirst)

    args = revset.getargsdict(x, name, "file startrev")
    if "file" not in args:
        # Not interesting for fastlog case.
        repo.ui.debug("fastlog: not used because 'file' is not provided\n")
        return orig(repo, subset, x, name, followfirst)

    if "startrev" in args:
        revs = revset.getset(repo, smartset.fullreposet(repo),
                             args["startrev"])
        it = iter(revs)
        try:
            startrev = next(it)
        except StopIteration:
            startrev = repo["."].rev()
        try:
            next(it)
            # fastlog does not support multiple startrevs
            repo.ui.debug(
                "fastlog: not used because multiple revs are provided\n")
            return orig(repo, subset, x, name, followfirst)
        except StopIteration:
            # supported by fastlog: startrev contains a single rev
            pass
    else:
        startrev = repo["."].rev()

    reponame = repo.ui.config("fbscmquery", "reponame")
    if not reponame or not repo.ui.configbool("fastlog", "enabled"):
        repo.ui.debug("fastlog: not used because fastlog is disabled\n")
        return orig(repo, subset, x, name, followfirst)

    path = revset.getstring(args["file"], _("%s expected a pattern") % name)
    if path.startswith("path:"):
        # strip "path:" prefix
        path = path[5:]

    if any(
            path.startswith("%s:" % prefix)
            for prefix in matchmod.allpatternkinds):
        # Patterns other than "path:" are not supported
        repo.ui.debug(
            "fastlog: not used because '%s:' patterns are not supported\n" %
            path.split(":", 1)[0])
        return orig(repo, subset, x, name, followfirst)

    files = [path]
    if not files or "." in files:
        # Walking the whole repo - bail on fastlog
        repo.ui.debug(
            "fastlog: not used because walking through the entire repo\n")
        return orig(repo, subset, x, name, followfirst)

    dirs = set()
    wvfs = repo.wvfs
    for path in files:
        if wvfs.isdir(path) and not wvfs.islink(path):
            dirs.update([path + "/"])
        else:
            if repo.ui.configbool("fastlog", "files"):
                dirs.update([path])
            else:
                # bail on symlinks, and also bail on files for now
                # with follow behavior, for files, we are supposed
                # to track copies / renames, but it isn't convenient
                # to do this through scmquery
                repo.ui.debug(
                    "fastlog: not used because %s is not a directory\n" % path)
                return orig(repo, subset, x, name, followfirst)

    rev = startrev

    parents = repo.changelog.parentrevs
    public = set()

    # Our criterion for invoking fastlog is finding a single
    # common public ancestor from the current head.  First we
    # have to walk back through drafts to find all interesting
    # public parents.  Typically this will just be one, but if
    # there are merged drafts, we may have multiple parents.
    if repo[rev].phase() == phases.public:
        public.add(rev)
    else:
        queue = deque()
        queue.append(rev)
        seen = set()
        while queue:
            cur = queue.popleft()
            if cur not in seen:
                seen.add(cur)
                if repo[cur].mutable():
                    for p in parents(cur):
                        if p != nullrev:
                            queue.append(p)
                else:
                    public.add(cur)

    def fastlog(repo, startrev, dirs, localmatch):
        filefunc = repo.changelog.readfiles
        for parent in lazyparents(startrev, public, parents):
            files = filefunc(parent)
            if dirmatches(files, dirs):
                yield parent
        repo.ui.debug("found common parent at %s\n" % repo[parent].hex())
        for rev in combinator(repo, parent, dirs, localmatch):
            yield rev

    def combinator(repo, rev, dirs, localmatch):
        """combinator(repo, rev, dirs, localmatch)
        Make parallel local and remote queries along ancestors of
        rev along path and combine results, eliminating duplicates,
        restricting results to those which match dirs
        """
        LOCAL = "L"
        REMOTE = "R"
        queue = util.queue(FASTLOG_QUEUE_SIZE + 100)
        hash = repo[rev].hex()

        local = LocalIteratorThread(queue, LOCAL, rev, dirs, localmatch, repo)
        remote = FastLogThread(queue, REMOTE, reponame, "hg", hash, dirs, repo)

        # Allow debugging either remote or local path
        debug = repo.ui.config("fastlog", "debug")
        if debug != "local":
            repo.ui.debug("starting fastlog at %s\n" % hash)
            remote.start()
        if debug != "remote":
            local.start()
        seen = set([rev])

        try:
            while True:
                try:
                    producer, success, msg = queue.get(True, 3600)
                except util.empty:
                    raise error.Abort("Timeout reading log data")
                if not success:
                    if producer == LOCAL:
                        raise error.Abort(msg)
                    elif msg:
                        repo.ui.log("hgfastlog", msg)
                        continue

                if msg is None:
                    # Empty message means no more results
                    return

                rev = msg
                if debug:
                    if producer == LOCAL:
                        repo.ui.debug("LOCAL:: %s\n" % msg)
                    elif producer == REMOTE:
                        repo.ui.debug("REMOTE:: %s\n" % msg)

                if rev not in seen:
                    seen.add(rev)
                    yield rev
        finally:
            local.stop()
            remote.stop()

    revgen = fastlog(repo, rev, dirs, dirmatches)
    fastlogset = smartset.generatorset(revgen, iterasc=False)
    # Optimization: typically for "reverse(:.) & follow(path)" used by
    # "hg log". The left side is more expensive, although it has smaller
    # "weight". Make sure fastlogset is on the left side to avoid slow
    # walking through ":.".
    if subset.isdescending():
        fastlogset.reverse()
        return fastlogset & subset
    return subset & fastlogset
Ejemplo n.º 2
0
def redo(ui, repo, *args, **opts):
    """undo the last undo

    Reverse the effects of an :hg:`undo` operation.

    You can run :hg:`redo` multiple times to undo a series of :hg:`undo`
    commands. Alternatively, you can explicitly specify the number of
    :hg:`undo` commands to undo by providing a number as a positional argument.

    Specify --preview to see a graphical display that shows what your smartlog
    will look like after you run the command.

    For an interactive interface, run :hg:`undo --interactive`. This command
    enables you to visually step backwards and forwards in the undo history.
    Run :hg:`help undo` for more information.

    """
    shiftedindex = _computerelative(repo, 0)
    preview = opts.get("preview")

    branch = ""
    reverseindex = 0
    redocount = 0
    done = False
    while not done:
        # we step back the linear undo log
        # redoes cancel out undoes, if we have one more undo, we should undo
        # there, otherwise we continue looking
        # we are careful to not redo past absolute undoes (bc we loose undoredo
        # log info)
        # if we run into something that isn't undo or redo, we Abort (including
        # gaps in the log)
        # we extract the --index arguments out of undoes to make sure we update
        # the undoredo index correctly
        nodedict = _readindex(repo, reverseindex)
        commandstr = _readnode(repo, "command.i", nodedict["command"])
        commandlist = commandstr.split("\0")

        if "True" == nodedict["unfinished"]:
            # don't want to redo to an interupted state
            reverseindex += 1
        elif commandlist[0] == "undo":
            _args, undoopts = cliparser.parsecommand(
                commandlist,
                cmdtable["undo"][1] + commands.globalopts,
            )
            if redocount == 0:
                # want to go to state before the undo (not after)
                toshift = undoopts["step"]
                shiftedindex -= toshift
                reverseindex += 1
                branch = undoopts.get("branch")
                done = True
            else:
                if undoopts["absolute"]:
                    raise error.Abort(_("can't redo past absolute undo"))
                reverseindex += 1
                redocount -= 1
        elif commandlist[0] == "redo":
            redocount += 1
            reverseindex += 1
        else:
            raise error.Abort(_("nothing to redo"))

    if preview:
        _preview(ui, repo, reverseindex)
        return

    with repo.wlock(), repo.lock(), repo.transaction("redo"):
        cmdutil.checkunfinished(repo)
        cmdutil.bailifchanged(repo)
        _tryundoto(ui, repo, reverseindex)
        # update undredo by removing what the given undo added
        _logundoredoindex(repo, shiftedindex, branch)
Ejemplo n.º 3
0
def getdag(ui, repo, revs, master, template):

    knownrevs = set(revs)
    gpcache = {}
    results = []
    reserved = []

    # we store parents together with the parent type information
    # but sometimes we need just a list of parents
    # [(a,b), (c,d), (e,f)] => [b, d, f]
    def unzip(parents):
        if parents:
            return list(list(zip(*parents))[1])
        else:
            return list()

    simplifygrandparents = ui.configbool("log", "simplify-grandparents")
    cl = repo.changelog
    if cl.algorithmbackend != "segments":
        simplifygrandparents = False
    if simplifygrandparents:
        rootnodes = cl.tonodes(revs)

    revs = smartset.baseset(revs, repo=repo)
    revs.sort(reverse=True)
    ctxstream = revs.prefetchbytemplate(repo, template).iterctx()

    # For each rev we need to show, compute it's parents in the dag.
    # If we have to reach for a grandparent, insert a fake node so we
    # can show '...' in the graph.
    # Use 'reversed' to start at the lowest commit so fake nodes are
    # placed at their lowest possible positions.
    for ctx in ctxstream:
        rev = ctx.rev()
        # Parents in the dag
        parents = sorted(
            set(
                [
                    (graphmod.PARENT, p.rev())
                    for p in ctx.parents()
                    if p.rev() in knownrevs
                ]
            )
        )
        # Parents not in the dag
        mpars = [
            p.rev()
            for p in ctx.parents()
            if p.rev() != nodemod.nullrev and p.rev() not in unzip(parents)
        ]

        for mpar in mpars:
            gp = gpcache.get(mpar)
            if gp is None:
                if simplifygrandparents:
                    gp = gpcache[mpar] = cl.torevs(
                        cl.dageval(
                            lambda: headsancestors(
                                ancestors(cl.tonodes([mpar])) & rootnodes
                            )
                        )
                    )
                else:
                    gp = gpcache[mpar] = dagop.reachableroots(repo, revs, [mpar])
            if not gp:
                parents.append((graphmod.MISSINGPARENT, mpar))
            else:
                gp = [g for g in gp if g not in unzip(parents)]
                for g in gp:
                    parents.append((graphmod.GRANDPARENT, g))

        results.append((ctx.rev(), "C", ctx, parents))

    # Compute parent rev->parents mapping
    lookup = {}
    for r in results:
        lookup[r[0]] = unzip(r[3])

    def parentfunc(node):
        return lookup.get(node, [])

    # Compute the revs on the master line. We use this for sorting later.
    masters = set()
    queue = [master]
    while queue:
        m = queue.pop()
        if m not in masters:
            masters.add(m)
            queue.extend(lookup.get(m, []))

    # Topologically sort the noderev numbers. Note: unlike the vanilla
    # topological sorting, we move master to the top.
    order = sortnodes([r[0] for r in results], parentfunc, masters)
    order = dict((e[1], e[0]) for e in enumerate(order))

    # Sort the actual results based on their position in the 'order'
    try:
        results.sort(key=lambda x: order[x[0]], reverse=True)
    except ValueError:  # Happened when 'order' is empty
        ui.warn(_("smartlog encountered an error\n"), notice=_("note"))
        ui.warn(_("(so the sorting might be wrong.\n\n)"))
        results.reverse()

    # indent the top non-public stack
    if ui.configbool("smartlog", "indentnonpublic", False):
        rev, ch, ctx, parents = results[0]
        if ctx.phase() != phases.public:
            # find a public parent and add a fake node, so the non-public nodes
            # will be shown in the non-first column
            prev = None
            for i in range(1, len(results)):
                pctx = results[i][2]
                if pctx.phase() == phases.public:
                    prev = results[i][0]
                    break
            if prev:
                reserved.append(prev)

    return results, reserved
Ejemplo n.º 4
0
def hintundo():
    return _("you can undo this using the `hg undo` command")
Ejemplo n.º 5
0
def hintundocorrupt(path=None):
    msg = _("undo history is corrupted\n")
    msg += _("(try deleting %s to recover)\n") % (path or ".hg/undolog")
    return msg
Ejemplo n.º 6
0
def _tryundoto(ui, repo, reverseindex, keep=False, branch=None):
    try:
        _undoto(ui, repo, reverseindex, keep, branch)
    except error.RevlogError:
        ui.write_err(_("cannot undo: undo history is corrupted\n"))
Ejemplo n.º 7
0
def _findnextdelta(repo, reverseindex, branch, direction):
    # finds closest repos state making changes to branch in direction
    # input:
    #   repo: mercurial.localrepo
    #   reverseindex: positive int for index.i
    #   branch: string changectx (commit hash)
    #   direction: positive or negative int
    # output:
    #   int index with next branch delta
    #   this is the first repo state that makes a changectx, bookmark or working
    #   copy parent change that effects the given branch
    if 0 == direction:  # no infinite cycles guarantee
        raise error.ProgrammingError
    # current state
    try:
        nodedict = _readindex(repo, reverseindex)
    except IndexError:
        raise error.Abort(_("index out of bounds"))
    alphaworkingcopyparent = _readnode(
        repo, "workingparent.i", nodedict["workingparent"]
    )
    alphabookstring = _readnode(repo, "bookmarks.i", nodedict["bookmarks"])
    incrementalindex = reverseindex

    spec = revsetlang.formatspec("_localbranch(%s)", branch)
    hexnodes = tohexnode(repo, spec)

    done = False
    while not done:
        # move index
        incrementalindex += direction
        # check this index
        try:
            nodedict = _readindex(repo, incrementalindex)
        except IndexError:
            raise error.Abort(_("index out of bounds"))
        # skip interupted commands
        if "True" == nodedict["unfinished"]:
            break
        # check wkp, commits, bookmarks
        workingcopyparent = _readnode(
            repo, "workingparent.i", nodedict["workingparent"]
        )
        bookstring = _readnode(repo, "bookmarks.i", nodedict["bookmarks"])
        # local changes in respect to visible changectxs
        # disjunctive union of present and old = changes
        # intersection of changes and local = localchanges
        localctxchanges = revsetlang.formatspec(
            "((olddraft(%d) + olddraft(%d)) -"
            "(olddraft(%d) and olddraft(%d)))"
            " and _localbranch(%s)",
            incrementalindex,
            reverseindex,
            incrementalindex,
            reverseindex,
            branch,
        )
        done = done or repo.revs(localctxchanges)
        if done:  # perf boost
            break
        # bookmark changes
        if alphabookstring != bookstring:
            diff = set(alphabookstring.split("\n")) ^ set(bookstring.split("\n"))
            for mark in diff:
                if mark:
                    kv = mark.rsplit(" ", 1)
                    # was or will the mark be in the localbranch
                    if kv[1] in hexnodes:
                        done = True
                        break

        # working copy parent changes
        # for workingcopyparent, only changes within the scope are interesting
        if alphaworkingcopyparent != workingcopyparent:
            done = done or (
                workingcopyparent in hexnodes and alphaworkingcopyparent in hexnodes
            )

    return incrementalindex
Ejemplo n.º 8
0
def hintcommitcloudswitch():
    return _(
        "if you would like to switch to the default workspace\n"
        "run `hg cloud join --switch -w default` inside the repo\n"
        "run `hg cloud list` to see all your workspaces and learn how to switch between them\n"
    )
Ejemplo n.º 9
0
def unamend(ui, repo, **opts):
    """undo the last amend operation on the current commit

    Reverse the effects of an :hg:`amend` operation. Hides the current commit
    and checks out the previous version of the commit. :hg:`unamend` does not
    revert the state of the working copy, so changes that were added to the
    commit in the last amend operation become pending changes in the working
    copy.

    :hg:`unamend` cannot be run on amended commits that have children. In
    other words, you cannot unamend an amended commit in the middle of a
    stack.

    .. note::

        Running :hg:`unamend` is similar to running :hg:`undo --keep`
        immediately after :hg:`amend`. However, unlike :hg:`undo`, which can
        only undo an amend if it was the last operation you performed,
        :hg:`unamend` can unamend any draft amended commit in the graph that
        does not have children.

    .. container:: verbose

      Although :hg:`unamend` is typically used to reverse the effects of
      :hg:`amend`, it actually rolls back the current commit to its previous
      version, regardless of whether the changes resulted from an :hg:`amend`
      operation or from another operation, such as :hg:`rebase`.
    """
    unfi = repo.unfiltered()

    # identify the commit from which to unamend
    curctx = repo["."]

    # identify the commit to which to unamend
    if mutation.enabled(repo):
        prednodes = curctx.mutationpredecessors()
        if not prednodes:
            prednodes = []
    else:
        prednodes = [
            marker.prednode() for marker in predecessormarkers(curctx)
        ]

    if len(prednodes) != 1:
        e = _("changeset must have one predecessor, found %i predecessors")
        raise error.Abort(e % len(prednodes))
    prednode = prednodes[0]

    if prednode not in unfi:
        # Trigger autopull.
        autopull.trypull(unfi, [nodemod.hex(prednode)])

    predctx = unfi[prednode]

    if curctx.children():
        raise error.Abort(_("cannot unamend in the middle of a stack"))

    with repo.wlock(), repo.lock():
        ctxbookmarks = curctx.bookmarks()
        changedfiles = []
        wctx = repo[None]
        wm = wctx.manifest()
        cm = predctx.manifest()
        dirstate = repo.dirstate
        diff = cm.diff(wm)
        changedfiles.extend(pycompat.iterkeys(diff))

        tr = repo.transaction("unamend")
        with dirstate.parentchange():
            dirstate.rebuild(prednode, cm, changedfiles)
            # we want added and removed files to be shown
            # properly, not with ? and ! prefixes
            for filename, data in pycompat.iteritems(diff):
                if data[0][0] is None:
                    dirstate.add(filename)
                if data[1][0] is None:
                    dirstate.remove(filename)
        changes = []
        for book in ctxbookmarks:
            changes.append((book, prednode))
        repo._bookmarks.applychanges(repo, tr, changes)
        if obsolete.isenabled(repo, obsolete.createmarkersopt):
            obsolete.createmarkers(repo, [(curctx, (predctx, ))])
        visibility.remove(repo, [curctx.node()])
        visibility.add(repo, [predctx.node()])
        tr.close()
Ejemplo n.º 10
0
def revsetdiff(repo, diffid):
    """Return a set of revisions corresponding to a given Differential ID """

    repo_callsign = repo.ui.config("phrevset", "callsign")
    if repo_callsign is None:
        msg = _("phrevset.callsign is not set - doing a linear search\n")
        hint = _("This will be slow if the diff was not committed recently\n")
        repo.ui.warn(msg)
        repo.ui.warn(hint)
        rev = localgetdiff(repo, diffid)
        if rev is None:
            raise error.Abort("Could not find diff D%s in changelog" % diffid)
        else:
            return [rev]

    revs, resp = search(repo, diffid)

    if revs is not None:
        # The log walk found the diff, nothing more to do
        return revs

    if resp is None:
        # The graphql query finished but didn't return anything
        return []

    vcs = resp["source_control_system"]

    repo.ui.debug("[diffrev] VCS is %s\n" % vcs)

    if vcs == "git":
        gitrev = parsedesc(repo, resp, ignoreparsefailure=False)
        repo.ui.debug("[diffrev] GIT rev is %s\n" % gitrev)

        peerpath = repo.ui.expandpath("default")
        remoterepo = hg.peer(repo, {}, peerpath)
        remoterev = remoterepo.lookup("_gitlookup_git_%s" % gitrev)

        repo.ui.debug("[diffrev] HG rev is %s\n" % remoterev.encode("hex"))
        if not remoterev:
            repo.ui.debug("[diffrev] Falling back to linear search\n")
            linear_search_result = localgetdiff(repo, diffid)
            if linear_search_result is None:
                # walked the entire repo and couldn't find the diff
                raise error.Abort("Could not find diff D%s in changelog" %
                                  diffid)

            return [linear_search_result]

        return [repo[remoterev].rev()]

    elif vcs == "hg":
        rev = parsedesc(repo, resp, ignoreparsefailure=True)
        if rev:
            # The response from phabricator contains a changeset ID.
            # Convert it back to a rev number.
            try:
                return [repo[rev].rev()]
            except error.RepoLookupError:
                # TODO: 's/svnrev/globalrev' after turning off Subversion
                # servers. We will know about this when we remove the `svnrev`
                # revset.
                #
                # Unfortunately the rev can also be a svnrev/globalrev :(.
                if rev.isdigit():
                    try:
                        return [r for r in repo.revs("svnrev(%s)" % rev)]
                    except error.RepoLookupError:
                        pass

                raise error.Abort("Landed commit for diff D%s not available "
                                  'in current repository: run "hg pull" '
                                  "to retrieve it" % diffid)

        # commit is still local, get its hash

        props = resp["phabricator_version_properties"]["edges"]
        commits = []
        for prop in props:
            if prop["node"]["property_name"] == "local:commits":
                commits = json.loads(prop["node"]["property_value"])

        revs = [c["commit"] for c in commits.values()]

        # verify all revisions exist in the current repo; if not, try to
        # find their counterpart by parsing the log
        results = set()
        for rev in revs:
            try:
                unfiltered = repo.unfiltered()
                node = unfiltered[rev]
            except error.RepoLookupError:
                raise error.Abort(
                    _("cannot find the latest version of D%s (%s) locally") %
                    (diffid, rev),
                    hint=_("try 'hg pull -r %s'") % rev,
                )
            successors = list(repo.revs("last(successors(%n))", node.node()))
            if len(successors) != 1:
                results.add(node.rev())
            else:
                results.add(successors[0])

        if not results:
            raise error.Abort("Could not find local commit for D%s" % diffid)

        return set(results)

    else:
        if not vcs:
            msg = (
                "D%s does not have an associated version control system\n"
                "You can view the diff at https:///our.internmc.facebook.com/intern/diff/D%s\n"
            )
            repo.ui.warn(msg % (diffid, diffid))

            return []
        else:
            raise error.Abort("Conduit returned unknown "
                              'sourceControlSystem "%s"' % vcs)
Ejemplo n.º 11
0
from edenscm.mercurial.context import memctx
from edenscm.mercurial.i18n import _
from edenscm.mercurial.node import hex, nullid

from . import backup, backuplock
from .commands import command


@command(
    "debughiddencommit",
    [
        (
            "",
            "ignored-files",
            True,
            _("include ignored files"),
        ),
    ] + cmdutil.walkopts,
)
def debughiddencommit(ui, repo, *pats, **opts):
    """
    commit to commit cloud

    This command adds a commit to the commit cloud by committing
    locally, sending to commit cloud, then hiding it.

    Files in the working copy will not be changed.

    Commit hash is printed as a result of this command.
    """
    with backuplock.lock(repo), repo.wlock():
Ejemplo n.º 12
0
 def _prompt(self):
     raise error.Abort(_("lfs.url needs to be configured"))
Ejemplo n.º 13
0
 def checkblobs(self, pointers):
     for p in pointers:
         if not self.vfs.exists(p.oid()):
             raise LfsRemoteError(
                 _("LFS object %s is not uploaded to remote server") %
                 p.oid())
Ejemplo n.º 14
0
    def _batch(self, pointers, localstore, action, objectnames=None):
        if action not in ["upload", "download"]:
            raise error.ProgrammingError("invalid Git-LFS action: %s" % action)

        response = self._batchrequest(pointers, action)
        objects = self._extractobjects(response, pointers, action)
        total = sum(x.get("size", 0) for x in objects)
        perftrace.tracebytes("Size", total)
        sizes = {}
        for obj in objects:
            sizes[obj.get("oid")] = obj.get("size", 0)
        topic = {
            "upload": _("lfs uploading"),
            "download": _("lfs downloading")
        }[action]
        if self.ui.verbose and len(objects) > 1:
            self.ui.write(
                _("lfs: need to transfer %d objects (%s)\n") %
                (len(objects), util.bytecount(total)))

        def transfer(chunk):
            for obj in chunk:
                objsize = obj.get("size", 0)
                if self.ui.verbose:
                    if action == "download":
                        msg = _("lfs: downloading %s (%s)\n")
                    elif action == "upload":
                        msg = _("lfs: uploading %s (%s)\n")
                    self.ui.write(msg %
                                  (obj.get("oid"), util.bytecount(objsize)))
                retry = self.retry
                while True:
                    try:
                        yield 0, obj.get("oid")
                        self._basictransfer(obj, action, localstore)
                        yield 1, obj.get("oid")
                        break
                    except Exception as ex:
                        if retry > 0:
                            if self.ui.verbose:
                                self.ui.write(
                                    _("lfs: failed: %r (remaining retry %d)\n")
                                    % (ex, retry))
                            retry -= 1
                            continue
                        raise

        starttimestamp = util.timer()
        if action == "download":
            oids = worker.worker(
                self.ui,
                0.1,
                transfer,
                (),
                sorted(objects, key=lambda o: o.get("oid")),
                preferthreads=True,
                callsite="blobstore",
            )
        else:
            oids = transfer(objects)

        transferred = 0
        with progress.bar(self.ui,
                          topic,
                          _("bytes"),
                          total=total,
                          formatfunc=util.bytecount) as prog:
            for count, oid in oids:
                if count != 0:
                    transferred += sizes[oid]
                    if self.ui.verbose:
                        self.ui.write(_("lfs: processed: %s\n") % oid)
                if objectnames is not None:
                    prog.value = (transferred, objectnames.get(oid, ""))
                else:
                    prog.value = transferred

        currenttimestamp = util.timer()
        self._metrics["lfs_%s_size" % action] += total
        self._metrics["lfs_%s_time" % action] += (currenttimestamp - max(
            self._timestamp["latest_%s_timestamp" % action],
            starttimestamp)) * 1000
        self._timestamp["latest_%s_timestamp" % action] = currenttimestamp
Ejemplo n.º 15
0
def addchangegroupfiles(orig, repo, source, revmap, trp, *args):
    if not requirement in repo.requirements:
        return orig(repo, source, revmap, trp, *args)

    newfiles = 0
    visited = set()
    revisiondatas = {}
    queue = []

    # Normal Mercurial processes each file one at a time, adding all
    # the new revisions for that file at once. In remotefilelog a file
    # revision may depend on a different file's revision (in the case
    # of a rename/copy), so we must lay all revisions down across all
    # files in topological order.

    # read all the file chunks but don't add them
    with progress.bar(repo.ui, _("files")) as prog:
        while True:
            chunkdata = source.filelogheader()
            if not chunkdata:
                break
            f = chunkdata["filename"]
            repo.ui.debug("adding %s revisions\n" % f)
            prog.value += 1

            if not repo.shallowmatch(f):
                fl = repo.file(f)
                deltas = source.deltaiter()
                fl.addgroup(deltas, revmap, trp)
                continue

            chain = None
            while True:
                # returns: (node, p1, p2, cs, deltabase, delta, flags) or None
                revisiondata = source.deltachunk(chain)
                if not revisiondata:
                    break

                chain = revisiondata[0]

                revisiondatas[(f, chain)] = revisiondata
                queue.append((f, chain))

                if f not in visited:
                    newfiles += 1
                    visited.add(f)

            if chain is None:
                raise error.Abort(_("received file revlog group is empty"))

    processed = set()

    def available(f, node, depf, depnode):
        if depnode != nullid and (depf, depnode) not in processed:
            if not (depf, depnode) in revisiondatas:
                # It's not in the changegroup, assume it's already
                # in the repo
                return True
            # re-add self to queue
            queue.insert(0, (f, node))
            # add dependency in front
            queue.insert(0, (depf, depnode))
            return False
        return True

    skipcount = 0

    # Prefetch the non-bundled revisions that we will need
    prefetchfiles = []
    for f, node in queue:
        revisiondata = revisiondatas[(f, node)]
        # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
        dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]

        for dependent in dependents:
            if dependent == nullid or (f, dependent) in revisiondatas:
                continue
            prefetchfiles.append((f, hex(dependent)))

    repo.fileservice.prefetch(prefetchfiles)

    # Get rawtext by applying delta chains.
    @util.lrucachefunc
    def reconstruct(f, node):
        revisiondata = revisiondatas.get((f, node), None)
        if revisiondata is None:
            # Read from repo.
            return repo.file(f).revision(node, raw=False)
        else:
            # Apply delta-chain.
            # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
            deltabase, delta, flags = revisiondata[4:]
            if deltabase == nullid:
                base = ""
            else:
                if flags:
                    # LFS (flags != 0) should always use nullid as deltabase.
                    raise error.Abort("unexpected deltabase")
                base = reconstruct(f, deltabase)
            rawtext = mdiff.patch(base, delta)
            if isinstance(rawtext, pycompat.buffer):  # noqa
                rawtext = bytes(rawtext)
            return rawtext

    # Apply the revisions in topological order such that a revision
    # is only written once it's deltabase and parents have been written.
    maxskipcount = len(queue) + 1
    while queue:
        f, node = queue.pop(0)
        if (f, node) in processed:
            continue

        skipcount += 1
        if skipcount > maxskipcount:
            raise error.Abort(_("circular node dependency on ancestormap"))

        revisiondata = revisiondatas[(f, node)]
        # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
        node, p1, p2, linknode, deltabase, delta, flags = revisiondata

        # Deltas are always against flags=0 rawtext (see revdiff and its
        # callers), if deltabase is not nullid.
        if flags and deltabase != nullid:
            raise error.Abort("unexpected deltabase")

        rawtext = reconstruct(f, node)
        meta, text = shallowutil.parsemeta(rawtext, flags)
        if "copy" in meta:
            copyfrom = meta["copy"]
            copynode = bin(meta["copyrev"])
            if not available(f, node, copyfrom, copynode):
                continue

        if any(not available(f, node, f, p) for p in [p1, p2] if p != nullid):
            continue

        # Use addrawrevision so if it's already LFS, take it as-is, do not
        # re-calculate the LFS object.
        fl = repo.file(f)
        fl.addrawrevision(rawtext,
                          trp,
                          linknode,
                          p1,
                          p2,
                          node=node,
                          flags=flags)
        processed.add((f, node))
        skipcount = 0

    return len(revisiondatas), newfiles
Ejemplo n.º 16
0
Archivo: hg.py Proyecto: zerkella/eden
    def putcommit(self, files, copies, parents, commit, source, revmap, full,
                  cleanp2):
        files = dict(files)

        def getfilectx(repo, memctx, f):
            if p2ctx and f in p2files and f not in copies:
                self.ui.debug("reusing %s from p2\n" % f)
                try:
                    return p2ctx[f]
                except error.ManifestLookupError:
                    # If the file doesn't exist in p2, then we're syncing a
                    # delete, so just return None.
                    return None
            try:
                v = files[f]
            except KeyError:
                return None
            data, mode = source.getfile(f, v)
            if data is None:
                return None
            return context.memfilectx(self.repo, memctx, f, data, "l" in mode,
                                      "x" in mode, copies.get(f))

        pl = []
        for p in parents:
            if p not in pl:
                pl.append(p)
        parents = pl
        nparents = len(parents)
        if self.filemapmode and nparents == 1:
            m1node = self.repo.changelog.read(nodemod.bin(parents[0]))[0]
            parent = parents[0]

        if len(parents) < 2:
            parents.append(nodemod.nullid)
        if len(parents) < 2:
            parents.append(nodemod.nullid)
        p2 = parents.pop(0)

        text = commit.desc

        sha1s = re.findall(sha1re, text)
        for sha1 in sha1s:
            try:
                oldrev = source.lookuprev(sha1)
                newrev = revmap.get(oldrev)
                if newrev is not None:
                    text = text.replace(sha1, newrev[:len(sha1)])
            except Exception:
                # Don't crash if we find a bad sha in the message
                continue

        extra = commit.extra.copy()

        sourcename = self.repo.ui.config("convert", "hg.sourcename")
        if sourcename:
            extra["convert_source"] = sourcename

        for label in (
                "source",
                "transplant_source",
                "rebase_source",
                "intermediate-source",
        ):
            node = extra.get(label)

            if node is None:
                continue

            # Only transplant stores its reference in binary
            if label == "transplant_source":
                node = nodemod.hex(node)

            newrev = revmap.get(node)
            if newrev is not None:
                if label == "transplant_source":
                    newrev = nodemod.bin(newrev)

                extra[label] = newrev

        if self.branchnames and commit.branch:
            extra["branch"] = commit.branch
        if commit.rev and commit.saverev:
            extra["convert_revision"] = commit.rev

        unfi = self.repo
        while parents:
            p1 = p2
            p2 = parents.pop(0)
            p1ctx = unfi[p1]
            p2ctx = None
            if p2 != nodemod.nullid:
                p2ctx = unfi[p2]
            fileset = set(files)
            if full:
                fileset.update(unfi[p1])
                fileset.update(unfi[p2])

            if p2ctx:
                p2files = set(cleanp2)
                for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
                    p2files.add(file)
                    fileset.add(file)

            ctx = context.memctx(
                unfi,
                (p1, p2),
                text,
                fileset,
                getfilectx,
                commit.author,
                commit.date,
                extra,
            )

            # We won't know if the conversion changes the node until after the
            # commit, so copy the source's phase for now.
            unfi.ui.setconfig("phases", "new-commit",
                              phases.phasenames[commit.phase], "convert")

            with unfi.transaction("convert") as tr:
                node = nodemod.hex(unfi.commitctx(ctx))

                # If the node value has changed, but the phase is lower than
                # draft, set it back to draft since it hasn't been exposed
                # anywhere.
                if commit.rev != node:
                    ctx = unfi[node]
                    if ctx.phase() < phases.draft:
                        phases.registernew(unfi, tr, phases.draft,
                                           [ctx.node()])

            text = "(octopus merge fixup)\n"
            p2 = node

        if self.filemapmode and nparents == 1:
            mfl = unfi.manifestlog
            mnode = unfi.changelog.read(nodemod.bin(p2))[0]
            closed = "close" in commit.extra
            if not closed and not mfl[m1node].read().diff(mfl[mnode].read()):
                self.ui.status(_("filtering out empty revision\n"))
                unfi.rollback(force=True)
                return parent
        return p2
Ejemplo n.º 17
0
        return result


# Visualize

"""debug commands and instrumentation for the undo extension

Adds the `debugundohistory` and `debugundosmartlog` commands to visualize
operational history and to give a preview of how undo will behave.
"""


@command(
    "debugundohistory",
    [
        ("n", "index", 0, _("details about specific operation")),
        ("l", "list", False, _("list recent undo-able operation")),
    ],
)
def debugundohistory(ui, repo, *args, **opts):
    """Print operational history
    0 is the most recent operation
    """
    if repo is not None:
        if opts.get("list"):
            if args and args[0].isdigit():
                offset = int(args[0])
            else:
                offset = 0
            _debugundolist(ui, repo, offset)
        else:
Ejemplo n.º 18
0
def _walk(self, match, event):
    """Replacement for filesystem._walk, hooking into Watchman.

    Whenever listignored is False and the Watchman client is available, use
    Watchman combined with saved state to possibly return only a subset of
    files."""

    state = self._fsmonitorstate
    clock, ignorehash, notefiles = state.get()
    if not clock:
        if state.walk_on_invalidate:
            raise fsmonitorfallback("no clock")
        # Initial NULL clock value, see
        # https://facebook.github.io/watchman/docs/clockspec.html
        clock = "c:0:0"
        notefiles = []

    ignore = self.dirstate._ignore

    # experimental config: experimental.fsmonitor.skipignore
    if not self._ui.configbool("experimental", "fsmonitor.skipignore"):
        if ignorehash and _hashignore(
                ignore) != ignorehash and clock != "c:0:0":
            # ignore list changed -- can't rely on Watchman state any more
            if state.walk_on_invalidate:
                raise fsmonitorfallback("ignore rules changed")
            notefiles = []
            clock = "c:0:0"

    matchfn = match.matchfn
    matchalways = match.always()
    dmap = self.dirstate._map
    if util.safehasattr(dmap, "_map"):
        # for better performance, directly access the inner dirstate map if the
        # standard dirstate implementation is in use.
        dmap = dmap._map
    if "treestate" in self._repo.requirements:
        # treestate has a fast path to filter out ignored directories.
        ignorevisitdir = self.dirstate._ignore.visitdir

        def dirfilter(path):
            result = ignorevisitdir(path.rstrip("/"))
            return result == "all"

        nonnormalset = self.dirstate._map.nonnormalsetfiltered(dirfilter)
    else:
        nonnormalset = self.dirstate._map.nonnormalset

    event["old_clock"] = clock
    event["old_files"] = blackbox.shortlist(nonnormalset)

    copymap = self.dirstate._map.copymap
    getkind = stat.S_IFMT
    dirkind = stat.S_IFDIR
    regkind = stat.S_IFREG
    lnkkind = stat.S_IFLNK
    join = self.dirstate._join
    normcase = util.normcase
    fresh_instance = False

    exact = False
    if match.isexact():  # match.exact
        exact = True

    if not exact and self.dirstate._checkcase:
        # note that even though we could receive directory entries, we're only
        # interested in checking if a file with the same name exists. So only
        # normalize files if possible.
        normalize = self.dirstate._normalizefile
    else:
        normalize = None

    # step 2: query Watchman
    try:
        # Use the user-configured timeout for the query.
        # Add a little slack over the top of the user query to allow for
        # overheads while transferring the data
        self._watchmanclient.settimeout(state.timeout + 0.1)
        result = self._watchmanclient.command(
            "query",
            {
                "fields": ["mode", "mtime", "size", "exists", "name"],
                "since":
                clock,
                "expression": [
                    "not",
                    [
                        "anyof", ["dirname", ".hg"],
                        ["name", ".hg", "wholename"]
                    ],
                ],
                "sync_timeout":
                int(state.timeout * 1000),
                "empty_on_fresh_instance":
                state.walk_on_invalidate,
            },
        )
    except Exception as ex:
        event["is_error"] = True
        _handleunavailable(self._ui, state, ex)
        self._watchmanclient.clearconnection()
        # XXX: Legacy scuba logging. Remove this once the source of truth
        # is moved to the Rust Event.
        self._ui.log("fsmonitor_status", fsmonitor_status="exception")
        if self._ui.configbool("fsmonitor", "fallback-on-watchman-exception"):
            raise fsmonitorfallback("exception during run")
        else:
            raise ex
    else:
        # We need to propagate the last observed clock up so that we
        # can use it for our next query
        event["new_clock"] = result["clock"]
        event["is_fresh"] = result["is_fresh_instance"]
        state.setlastclock(result["clock"])
        state.setlastisfresh(result["is_fresh_instance"])
        if result["is_fresh_instance"]:
            if not self._ui.plain() and self._ui.configbool(
                    "fsmonitor", "warn-fresh-instance"):
                oldpid = _watchmanpid(event["old_clock"])
                newpid = _watchmanpid(event["new_clock"])
                if oldpid is not None and newpid is not None:
                    self._ui.warn(
                        _("warning: watchman has recently restarted (old pid %s, new pid %s) - operation will be slower than usual\n"
                          ) % (oldpid, newpid))
                elif oldpid is None and newpid is not None:
                    self._ui.warn(
                        _("warning: watchman has recently started (pid %s) - operation will be slower than usual\n"
                          ) % (newpid, ))
                else:
                    self._ui.warn(
                        _("warning: watchman failed to catch up with file change events and requires a full scan - operation will be slower than usual\n"
                          ))

            if state.walk_on_invalidate:
                state.invalidate(reason="fresh_instance")
                raise fsmonitorfallback("fresh instance")
            fresh_instance = True
            # Ignore any prior noteable files from the state info
            notefiles = []
        else:
            count = len(result["files"])
            state.setwatchmanchangedfilecount(count)
            event["new_files"] = blackbox.shortlist(
                (e["name"] for e in result["files"]), count)
        # XXX: Legacy scuba logging. Remove this once the source of truth
        # is moved to the Rust Event.
        if event["is_fresh"]:
            self._ui.log("fsmonitor_status", fsmonitor_status="fresh")
        else:
            self._ui.log("fsmonitor_status", fsmonitor_status="normal")

    results = {}

    # for file paths which require normalization and we encounter a case
    # collision, we store our own foldmap
    if normalize:
        foldmap = dict((normcase(k), k) for k in results)

    switch_slashes = pycompat.ossep == "\\"
    # The order of the results is, strictly speaking, undefined.
    # For case changes on a case insensitive filesystem we may receive
    # two entries, one with exists=True and another with exists=False.
    # The exists=True entries in the same response should be interpreted
    # as being happens-after the exists=False entries due to the way that
    # Watchman tracks files.  We use this property to reconcile deletes
    # for name case changes.
    ignorelist = []
    ignorelistappend = ignorelist.append
    for entry in result["files"]:
        fname = entry["name"]
        if _fixencoding:
            fname = _watchmantofsencoding(fname)
        if switch_slashes:
            fname = fname.replace("\\", "/")
        if normalize:
            normed = normcase(fname)
            fname = normalize(fname, True, True)
            foldmap[normed] = fname
        fmode = entry["mode"]
        fexists = entry["exists"]
        kind = getkind(fmode)

        if not fexists:
            # if marked as deleted and we don't already have a change
            # record, mark it as deleted.  If we already have an entry
            # for fname then it was either part of walkexplicit or was
            # an earlier result that was a case change
            if (fname not in results and fname in dmap
                    and (matchalways or matchfn(fname))):
                results[fname] = None
        elif kind == dirkind:
            if fname in dmap and (matchalways or matchfn(fname)):
                results[fname] = None
        elif kind == regkind or kind == lnkkind:
            if fname in dmap:
                if matchalways or matchfn(fname):
                    results[fname] = entry
            else:
                ignored = ignore(fname)
                if ignored:
                    ignorelistappend(fname)
                if (matchalways or matchfn(fname)) and not ignored:
                    results[fname] = entry
        elif fname in dmap and (matchalways or matchfn(fname)):
            results[fname] = None
        elif fname in match.files():
            match.bad(fname, filesystem.badtype(kind))

    # step 3: query notable files we don't already know about
    # XXX try not to iterate over the entire dmap
    if normalize:
        # any notable files that have changed case will already be handled
        # above, so just check membership in the foldmap
        notefiles = set((normalize(f, True, True) for f in notefiles
                         if normcase(f) not in foldmap))
    visit = set((f for f in notefiles if (
        f not in results and matchfn(f) and (f in dmap or not ignore(f)))))

    if not fresh_instance:
        if matchalways:
            visit.update(f for f in nonnormalset if f not in results)
            visit.update(f for f in copymap if f not in results)
        else:
            visit.update(f for f in nonnormalset
                         if f not in results and matchfn(f))
            visit.update(f for f in copymap if f not in results and matchfn(f))
    else:
        if matchalways:
            visit.update(f for f in dmap if f not in results)
            visit.update(f for f in copymap if f not in results)
        else:
            visit.update(f for f in dmap if f not in results and matchfn(f))
            visit.update(f for f in copymap if f not in results and matchfn(f))

    # audit returns False for paths with one of its parent directories being a
    # symlink.
    audit = pathutil.pathauditor(self.dirstate._root, cached=True).check
    auditpass = [f for f in visit if audit(f)]
    auditpass.sort()
    auditfail = visit.difference(auditpass)
    droplist = []
    droplistappend = droplist.append
    for f in auditfail:
        # For auditfail paths, they should be treated as not existed in working
        # copy.
        filestate = dmap.get(f, ("?", ))[0]
        if filestate in ("?", ):
            # do not exist in working parents, remove them from treestate and
            # avoid walking through them.
            droplistappend(f)
            results.pop(f, None)
        else:
            # tracked, mark as deleted
            results[f] = None

    nf = iter(auditpass).next
    for st in util.statfiles([join(f) for f in auditpass]):
        f = nf()
        if (st and not ignore(f)) or f in dmap:
            results[f] = st
        elif not st:
            # '?' (untracked) file was deleted from the filesystem - remove it
            # from treestate.
            #
            # We can only update the dirstate (and treestate) while holding the
            # wlock. That happens inside poststatus.__call__ -> state.set. So
            # buffer what files to "drop" so state.set can clean them up.
            entry = dmap.get(f, None)
            if entry and entry[0] == "?":
                droplistappend(f)
    # The droplist and ignorelist need to match setlastclock()
    state.setdroplist(droplist)
    state.setignorelist(ignorelist)

    results.pop(".hg", None)
    return pycompat.iteritems(results)
Ejemplo n.º 19
0
def _undoto(ui, repo, reverseindex, keep=False, branch=None):
    # undo to specific reverseindex
    # branch is a changectx hash (potentially short form)
    # which identifies its branch via localbranch revset

    if branch and repo.ui.configbool("experimental", "narrow-heads"):
        raise error.Abort(
            _("'undo --branch' is no longer supported in the current setup")
        )

    try:
        nodedict = _readindex(repo, reverseindex)
    except IndexError:
        raise error.Abort(_("index out of bounds"))

    # bookmarks
    bookstring = _readnode(repo, "bookmarks.i", nodedict["bookmarks"])
    booklist = bookstring.split("\n")
    if branch:
        spec = revsetlang.formatspec("_localbranch(%s)", branch)
        branchcommits = tohexnode(repo, spec)
    else:
        branchcommits = False

    # copy implementation for bookmarks
    itercopy = []
    for mark in pycompat.iteritems(repo._bookmarks):
        itercopy.append(mark)
    bmremove = []
    for mark in itercopy:
        if not branchcommits or hex(mark[1]) in branchcommits:
            bmremove.append((mark[0], None))
    repo._bookmarks.applychanges(repo, repo.currenttransaction(), bmremove)
    bmchanges = []
    for mark in booklist:
        if mark:
            kv = mark.rsplit(" ", 1)
            if not branchcommits or kv[1] in branchcommits or (kv[0], None) in bmremove:
                bmchanges.append((kv[0], bin(kv[1])))
    repo._bookmarks.applychanges(repo, repo.currenttransaction(), bmchanges)

    # working copy parent
    workingcopyparent = _readnode(repo, "workingparent.i", nodedict["workingparent"])
    if not keep:
        if not branchcommits or workingcopyparent in branchcommits:
            # bailifchanged is run, so this should be safe
            hg.clean(repo, workingcopyparent, show_stats=False)
    elif not branchcommits or workingcopyparent in branchcommits:
        # keeps working copy files
        prednode = bin(workingcopyparent)
        predctx = repo[prednode]

        changedfiles = []
        wctx = repo[None]
        wctxmanifest = wctx.manifest()
        predctxmanifest = predctx.manifest()
        dirstate = repo.dirstate
        diff = predctxmanifest.diff(wctxmanifest)
        changedfiles.extend(pycompat.iterkeys(diff))

        with dirstate.parentchange():
            dirstate.rebuild(prednode, predctxmanifest, changedfiles)
            # we want added and removed files to be shown
            # properly, not with ? and ! prefixes
            for filename, data in pycompat.iteritems(diff):
                if data[0][0] is None:
                    dirstate.add(filename)
                if data[1][0] is None:
                    dirstate.remove(filename)

    # visible changesets
    addedrevs = revsetlang.formatspec("olddraft(0) - olddraft(%d)", reverseindex)
    removedrevs = revsetlang.formatspec("olddraft(%d) - olddraft(0)", reverseindex)
    if not branch:
        if repo.ui.configbool("experimental", "narrow-heads"):
            # Assuming mutation and visibility are used. Restore visibility heads
            # directly.
            _restoreheads(repo, reverseindex)
        else:
            # Legacy path.
            smarthide(repo, addedrevs, removedrevs)
            revealcommits(repo, removedrevs)
    else:
        localadds = revsetlang.formatspec(
            "(olddraft(0) - olddraft(%d)) and" " _localbranch(%s)", reverseindex, branch
        )
        localremoves = revsetlang.formatspec(
            "(olddraft(%d) - olddraft(0)) and" " _localbranch(%s)", reverseindex, branch
        )
        smarthide(repo, localadds, removedrevs)
        smarthide(repo, addedrevs, localremoves, local=True)
        revealcommits(repo, localremoves)

    # informative output
    time = _readnode(repo, "date.i", nodedict["date"])
    time = util.datestr([float(x) for x in time.split(" ")])

    nodedict = _readindex(repo, reverseindex - 1)
    commandstr = _readnode(repo, "command.i", nodedict["command"])
    commandlist = commandstr.split("\0")[1:]
    commandstr = " ".join(commandlist)
    uimessage = _("undone to %s, before %s\n") % (time, commandstr)
    if reverseindex == 1 and any(cmd in ("commit", "amend") for cmd in commandlist):
        command = "commit"
        if any(cmd in ("amend", "--amend") for cmd in commandlist):
            command = "amend"
        oldcommithash = _readnode(repo, "workingparent.i", nodedict["workingparent"])
        shorthash = short(bin(oldcommithash))
        hintutil.trigger("undo-uncommit-unamend", command, shorthash)
    repo.ui.status(uimessage)
Ejemplo n.º 20
0
    def _fspostpendingfixup(self, oldid, changed, startclock, match):
        """update dirstate for files that are actually clean"""
        try:
            repo = self.dirstate._repo

            istreestate = "treestate" in self.dirstate._repo.requirements

            # Only update fsmonitor state if the results aren't filtered
            isfullstatus = not match or match.always()

            # Updating the dirstate is optional so we don't wait on the
            # lock.
            # wlock can invalidate the dirstate, so cache normal _after_
            # taking the lock. This is a bit weird because we're inside the
            # dirstate that is no longer valid.

            # If watchman reports fresh instance, still take the lock,
            # since not updating watchman state leads to very painful
            # performance.
            freshinstance = False
            try:
                freshinstance = self._fs._fsmonitorstate._lastisfresh
            except Exception:
                pass
            if freshinstance:
                repo.ui.debug(
                    "poststatusfixup decides to wait for wlock since watchman reported fresh instance\n"
                )
            with repo.disableeventreporting(), repo.wlock(freshinstance):
                # The dirstate may have been reloaded after the wlock
                # was taken, so load it again.
                newdirstate = repo.dirstate
                if newdirstate.identity() == oldid:
                    # Invalidate fsmonitor.state if dirstate changes. This avoids the
                    # following issue:
                    # 1. pid 11 writes dirstate
                    # 2. pid 22 reads dirstate and inconsistent fsmonitor.state
                    # 3. pid 22 calculates a wrong state
                    # 4. pid 11 writes fsmonitor.state
                    # Because before 1,
                    # 0. pid 11 invalidates fsmonitor.state
                    # will happen.
                    #
                    # To avoid race conditions when reading without a lock, do things
                    # in this order:
                    # 1. Invalidate fsmonitor state
                    # 2. Write dirstate
                    # 3. Write fsmonitor state
                    if isfullstatus:
                        if not istreestate:
                            # Treestate is always in sync and doesn't need this
                            # valdiation.
                            self._fsmonitorstate.invalidate(
                                reason="dirstate_change")
                        else:
                            # Treestate records the fsmonitor state inside the
                            # dirstate, so we need to write it before we call
                            # newdirstate.write()
                            self._updatefsmonitorstate(changed, startclock)

                    self._marklookupsclean()

                    # write changes out explicitly, because nesting
                    # wlock at runtime may prevent 'wlock.release()'
                    # after this block from doing so for subsequent
                    # changing files
                    #
                    # This is a no-op if dirstate is not dirty.
                    tr = repo.currenttransaction()
                    newdirstate.write(tr)

                    # In non-treestate mode write the fsmonitorstate after the
                    # dirstate to avoid the race condition mentioned in the
                    # comment above. In treestate mode this race condition
                    # doesn't exist, and the state is written earlier, so we can
                    # skip it here.
                    if isfullstatus and not istreestate:
                        self._updatefsmonitorstate(changed, startclock)
                else:
                    if freshinstance:
                        repo.ui.write_err(
                            _("warning: failed to update watchman state because dirstate has been changed by other processes\n"
                              ))
                        repo.ui.write_err(dirstatemod.slowstatuswarning)

                    # in this case, writing changes out breaks
                    # consistency, because .hg/dirstate was
                    # already changed simultaneously after last
                    # caching (see also issue5584 for detail)
                    repo.ui.debug(
                        "skip updating dirstate: identity mismatch\n")
        except error.LockError:
            if freshinstance:
                repo.ui.write_err(
                    _("warning: failed to update watchman state because wlock cannot be obtained\n"
                      ))
                repo.ui.write_err(dirstatemod.slowstatuswarning)
Ejemplo n.º 21
0
def _preview(ui, repo, reverseindex):
    # Print smartlog like preview of undo
    # Input:
    #   ui:
    #   repo: mercurial.localrepo
    # Output:
    #   returns 1 on index error, 0 otherwise

    # override "UNDOINDEX" as a variable usable in template
    if not _gapcheck(ui, repo, reverseindex):
        repo.ui.status(_("WARN: missing history between present and this" " state\n"))
    overrides = {("templates", "UNDOINDEX"): str(reverseindex)}

    opts = {}
    opts["template"] = "{undopreview}"

    try:
        nodedict = _readindex(repo, reverseindex)
        curdict = _readindex(repo, reverseindex)
    except IndexError:
        return 1

    bookstring = _readnode(repo, "bookmarks.i", nodedict["bookmarks"])
    oldmarks = bookstring.split("\n")
    oldpairs = set()
    for mark in oldmarks:
        kv = mark.rsplit(" ", 1)
        if len(kv) == 2:
            oldpairs.update(kv)
    bookstring = _readnode(repo, "bookmarks.i", curdict["bookmarks"])
    curmarks = bookstring.split("\n")
    curpairs = set()
    for mark in curmarks:
        kv = mark.rsplit(" ", 1)
        if len(kv) == 2:
            curpairs.update(kv)

    diffpairs = oldpairs.symmetric_difference(curpairs)
    # extract hashes from diffpairs

    bookdiffs = []
    for kv in diffpairs:
        bookdiffs += kv[0]

    revstring = revsetlang.formatspec(
        "ancestor(olddraft(0), olddraft(%s)) +"
        "(draft() & ::((olddraft(0) - olddraft(%s)) + "
        "(olddraft(%s) - olddraft(0)) + %ls + '.' + "
        "oldworkingcopyparent(%s)))",
        reverseindex,
        reverseindex,
        reverseindex,
        bookdiffs,
        reverseindex,
    )

    opts["rev"] = [revstring]
    try:
        with ui.configoverride(overrides):
            cmdutil.graphlog(ui, repo, None, opts)
        # informative output
        nodedict = _readindex(repo, reverseindex)
        time = _readnode(repo, "date.i", nodedict["date"])
        time = util.datestr([float(x) for x in time.split(" ")])
    except IndexError:
        # don't print anything
        return 1

    try:
        nodedict = _readindex(repo, reverseindex - 1)
        commandstr = _readnode(repo, "command.i", nodedict["command"])
        commandlist = commandstr.split("\0")[1:]
        commandstr = " ".join(commandlist)
        uimessage = _("undo to %s, before %s\n") % (time, commandstr)
        repo.ui.status(uimessage)
    except IndexError:
        repo.ui.status(_("most recent state: undoing here won't change" " anything\n"))
    return 0
Ejemplo n.º 22
0
    def requestpacks(self, fileids, fetchdata, fetchhistory):
        if not self.remotecache.connected:
            self.connect()
        perftrace.traceflag("packs")
        cache = self.remotecache
        fileslog = self.repo.fileslog

        total = len(fileids)
        totalfetches = 0
        if fetchdata:
            totalfetches += total
        if fetchhistory:
            totalfetches += total
        with progress.bar(
            self.ui, _("fetching from memcache"), total=totalfetches
        ) as prog:
            # generate `get` keys and make data request
            getkeys = [file + "\0" + node for file, node in fileids]
            if fetchdata:
                cache.getdatapack(getkeys)
            if fetchhistory:
                cache.gethistorypack(getkeys)

            # receive both data and history
            misses = []
            try:
                allmisses = set()
                if fetchdata:
                    allmisses.update(cache.receive(prog))
                    fileslog.contentstore.markforrefresh()
                if fetchhistory:
                    allmisses.update(cache.receive(prog))
                    fileslog.metadatastore.markforrefresh()

                misses = [key.split("\0") for key in allmisses]
                perftrace.tracevalue("Memcache Misses", len(misses))
            except CacheConnectionError:
                misses = fileids
                self.ui.warn(
                    _(
                        "warning: cache connection closed early - "
                        + "falling back to server\n"
                    )
                )

            global fetchmisses
            missedfiles = len(misses)
            fetchmisses += missedfiles

            fromcache = total - missedfiles
            self.ui.log(
                "remotefilelog",
                "remote cache hit rate is %r of %r\n",
                fromcache,
                total,
                hit=fromcache,
                total=total,
            )

        oldumask = os.umask(0o002)
        try:
            # receive cache misses from master
            if missedfiles > 0:
                self._fetchpackfiles(misses, fetchdata, fetchhistory)
        finally:
            os.umask(oldumask)
Ejemplo n.º 23
0
def hintuncommit(command, oldhash):
    return _(
        "undoing %ss discards their changes.\n"
        "to restore the changes to the working copy, run 'hg revert -r %s --all'\n"
        "in the future, you can use 'hg un%s' instead of 'hg undo' to keep changes"
    ) % (command, oldhash, command)
Ejemplo n.º 24
0
    showopts = {
        "template":
        "Dropping changeset "
        '{shortest(node, 6)}{if(bookmarks, " ({bookmarks})")}'
        ": {desc|firstline}\n"
    }
    displayer = cmdutil.show_changeset(ui, repo, showopts)
    displayer.show(repo[revid])


def extsetup(ui):
    global rebasemod
    rebasemod = _checkextension("rebase", ui)


@command("drop", [("r", "rev", [], _("revision to drop"))],
         _("hg drop [OPTION] [REV]"))
def drop(ui, repo, *revs, **opts):
    """drop changeset from stack
    """
    if not rebasemod:
        raise error.Abort(_("required extensions not detected"))

    cmdutil.checkunfinished(repo)
    cmdutil.bailifchanged(repo)

    revs = scmutil.revrange(repo, list(revs) + opts.get("rev"))
    if not revs:
        raise error.Abort(_("no revision to drop was provided"))

    # currently drop supports dropping only one changeset at a time
Ejemplo n.º 25
0
def undo(ui, repo, *args, **opts):
    """undo the last local command

    Reverse the effects of the last local command. A local command is one that
    changed the currently checked out commit, that modified the contents of
    local commits, or that changed local bookmarks. Examples of local commands
    include :hg:`checkout`, :hg:`commit`, :hg:`amend`, and :hg:`rebase`.

    You cannot use :hg:`undo` to undo uncommited changes in the working copy,
    or changes to remote bookmarks.

    You can run :hg:`undo` multiple times to undo a series of local commands.
    Alternatively, you can explicitly specify the number of local commands to
    undo using --step. This number can also be specified as a positional
    argument.

    To undo the effects of :hg:`undo`, run :hg:`redo`. Run :hg:`help redo` for
    more information.

    Include --keep to preserve the state of the working copy. For example,
    specify --keep when running :hg:`undo` to reverse the effects of an
    :hg:`commit` or :hg:`amend` operation while still preserving changes
    in the working copy. These changes will appear as pending changes.

    Specify --preview to see a graphical display that shows what your smartlog
    will look like after you run the command. Specify --interactive for an
    interactive version of this preview in which you can step backwards and
    forwards in the undo history.

    .. note::

       :hg:`undo` cannot be used with non-local commands, or with commands
       that are read-only. :hg:`undo` will skip over these commands in the
       undo history.

       For hybrid commands that result in both local and remote changes,
       :hg:`undo` will undo the local changes, but not the remote changes.
       For example, `hg pull --rebase` might move remote/master and also
       rebase local commits. In this situation, :hg:`undo` will revert the
       rebase, but not the change to remote/master.

    .. container:: verbose

        Branch limits the scope of an undo to a group of local (draft)
        changectxs, identified by any one member of this group.
    """
    reverseindex = opts.get("step")
    relativeundo = not opts.get("absolute")
    keep = opts.get("keep")
    branch = opts.get("branch")
    preview = opts.get("preview")
    interactive = opts.get("interactive")
    if interactive and interactiveui is None:
        raise error.Abort(_("interactive ui is not supported on Windows"))
    if interactive:
        preview = True

    if branch and reverseindex != 1 and reverseindex != -1:
        raise error.Abort(_("--branch with --index not supported"))
    if relativeundo:
        try:
            reverseindex = _computerelative(
                repo, reverseindex, absolute=not relativeundo, branch=branch
            )
        except IndexError:
            raise error.Abort(
                _("cannot undo this far - undo extension was not" " enabled")
            )

    if branch and preview:
        raise error.Abort(_("--branch with --preview not supported"))

    if interactive:
        cmdutil.checkunfinished(repo)
        cmdutil.bailifchanged(repo)

        class undopreview(interactiveui.viewframe):
            def render(self):
                ui = self.ui
                ui.pushbuffer()
                return_code = _preview(ui, self.repo, self.index)
                if return_code == 1:
                    if self.index < 0:
                        self.index += 1
                        repo.ui.status(_("Already at newest repo state\a\n"))
                    elif self.index > 0:
                        self.index -= 1
                        repo.ui.status(_("Already at oldest repo state\a\n"))
                    _preview(ui, self.repo, self.index)
                text = ui.config(
                    "undo",
                    "interactivehelptext",
                    "legend: red - to hide; green - to revive\n",
                )
                repo.ui.status(text)
                repo.ui.status(
                    _("<-: newer  " "->: older  " "q: abort  " "enter: confirm\n")
                )
                return ui.popbuffer()

            def rightarrow(self):
                self.index += 1

            def leftarrow(self):
                self.index -= 1

            def enter(self):
                del opts["preview"]
                del opts["interactive"]
                opts["absolute"] = "absolute"
                opts["step"] = self.index
                undo(ui, repo, *args, **opts)
                return

        viewobj = undopreview(ui, repo, reverseindex)
        interactiveui.view(viewobj)
        return
    elif preview:
        _preview(ui, repo, reverseindex)
        return

    with repo.wlock(), repo.lock(), repo.transaction("undo"):
        cmdutil.checkunfinished(repo)
        cmdutil.bailifchanged(repo)
        if not (opts.get("force") or _gapcheck(ui, repo, reverseindex)):
            raise error.Abort(_("attempted risky undo across" " missing history"))
        _tryundoto(ui, repo, reverseindex, keep=keep, branch=branch)

        # store undo data
        # for absolute undos, think of this as a reset
        # for relative undos, think of this as an update
        _logundoredoindex(repo, reverseindex, branch)
Ejemplo n.º 26
0
def _checkextension(name, ui):
    try:
        return extensions.find(name)
    except KeyError:
        ui.warn(_("extension %s not found\n") % name)
        return None
Ejemplo n.º 27
0
    """Date.  Returns one of two values depending on whether the date provided
    is in the past and recent or not."""
    date = templater.evalfuncarg(context, mapping, args[0])
    threshold = templater.evalinteger(context, mapping, args[1])
    now = time.time()
    then = date[0]
    if now - threshold <= then <= now:
        return templater.evalstring(context, mapping, args[2])
    else:
        return templater.evalstring(context, mapping, args[3])


@command(
    "smartlog|sl|slog|sm|sma|smar|smart|smartl|smartlo",
    [
        ("", "master", "", _("master bookmark"), _("BOOKMARK")),
        ("r", "rev", [], _("show the specified revisions or range"), _("REV")),
        ("", "all", False, _("don't hide old local changesets"), ""),
        ("", "commit-info", False, _("show changes in current changeset"), ""),
    ]
    + logopts,
    _("[OPTION]... [[-r] REV]"),
)
def smartlog(ui, repo, *pats, **opts):
    """show a graph of the commits that are relevant to you

    Includes:

    - Your local commits
    - The master bookmark for your repository
    - Any commits with local bookmarks
Ejemplo n.º 28
0
    def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
                          fnodes, source):
        # type: (Sequence[int], Mapping[bytes, int], bool, Any, MutableMapping[str, Any], Any) -> Iterable[bytes]
        """
        - `commonrevs` is the set of known commits on both sides
        - `clrevorder` is a mapping from cl node to rev number, used for
                       determining which commit is newer.
        - `mfs` is the potential manifest nodes to send,
                with maps to their linknodes
                { manifest root node -> link node }
        - `fnodes` is a mapping of { filepath -> { node -> clnode } }
                If fastpathlinkrev is false, we are responsible for populating
                fnodes.
        - `args` and `kwargs` are extra arguments that will be passed to the
                core generatemanifests method, whose length depends on the
                version of core Hg.
        """
        # If we're not using the fastpath, then all the trees will be necessary
        # so we can inspect which files changed and need to be sent. So let's
        # bulk fetch the trees up front.
        repo = self._repo

        if self._cansendflat(mfs.keys()):
            # In this code path, generating the manifests populates fnodes for
            # us.
            chunks = super(shallowcg1packer,
                           self).generatemanifests(commonrevs, clrevorder,
                                                   fastpathlinkrev, mfs,
                                                   fnodes, source)
            for chunk in chunks:
                yield chunk
        else:
            # If not using the fast path, we need to discover what files to send
            if not fastpathlinkrev:
                # If we're sending files, we need to process the manifests
                filestosend = self.shouldaddfilegroups(source)
                if filestosend is not NoFiles:
                    mflog = repo.manifestlog
                    with progress.bar(repo.ui, _("manifests"),
                                      total=len(mfs)) as prog:
                        for mfnode, clnode in pycompat.iteritems(mfs):
                            prog.value += 1
                            if (filestosend == LocalFiles
                                    and repo[clnode].phase() == phases.public):
                                continue
                            try:
                                mfctx = mflog[mfnode]
                                p1node = mfctx.parents[0]
                                p1ctx = mflog[p1node]
                            except LookupError:
                                if not repo.svfs.treemanifestserver or treeonly(
                                        repo):
                                    raise
                                # If we can't find the flat version, look for trees
                                tmfl = mflog.treemanifestlog
                                mfctx = tmfl[mfnode]
                                p1node = tmfl[mfnode].parents[0]
                                p1ctx = tmfl[p1node]

                            diff = pycompat.iteritems(p1ctx.read().diff(
                                mfctx.read()))
                            for filename, ((anode, aflag), (bnode,
                                                            bflag)) in diff:
                                if bnode is not None:
                                    fclnodes = fnodes.setdefault(filename, {})
                                    fclnode = fclnodes.setdefault(
                                        bnode, clnode)
                                    if clrevorder[clnode] < clrevorder[fclnode]:
                                        fclnodes[bnode] = clnode

            yield self.close()
Ejemplo n.º 29
0
from edenscm.mercurial.node import hex

from . import commitcloud

DIFFERENTIAL_REGEX = re.compile(
    "Differential Revision: http.+?/"  # Line start, URL
    "D(?P<id>[0-9]+)"  # Differential ID, just numeric part
)
cmdtable = {}
command = registrar.command(cmdtable)


@command(
    "debugcrdump",
    [
        ("r", "rev", [], _("revisions to dump")),
        # We use 1<<15 for "as much context as possible"
        ("U", "unified", 1 << 15, _("number of lines of context to show"),
         _("NUM")),
        ("l", "lfs", False, "Provide sha256 for lfs files instead of dumping"),
        ("", "obsolete", False,
         "add obsolete markers related to the given revisions"),
        ("", "nobinary", False, "do not dump binary files"),
    ],
    _("hg debugcrdump [OPTION]... [-r] [REV]"),
)
def crdump(ui, repo, *revs, **opts):
    """
    Dump the info about the revisions in format that's friendly for sending the
    patches for code review.
Ejemplo n.º 30
0
def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None):
    """pick fixup chunks from targetctx, apply them to stack.

    if targetctx is None, the working copy context will be used.
    if stack is None, the current draft stack will be used.
    return fixupstate.
    """
    if stack is None:
        limit = ui.configint("absorb", "maxstacksize", 50)
        stack = getdraftstack(repo["."], limit)
        if limit and len(stack) >= limit:
            ui.warn(
                _("absorb: only the recent %d changesets will "
                  "be analysed\n") % limit)
    if not stack:
        raise error.Abort(_("no changeset to change"))
    if targetctx is None:  # default to working copy
        targetctx = repo[None]
    if pats is None:
        pats = ()
    if opts is None:
        opts = {}
    state = fixupstate(stack, ui=ui, opts=opts)
    matcher = scmutil.match(targetctx, pats, opts)
    if opts.get("interactive"):
        diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher)
        origchunks = patch.parsepatch(diff)
        chunks = cmdutil.recordfilter(ui, origchunks)[0]
        targetctx = overlaydiffcontext(stack[-1], chunks)
    fm = None
    if not (ui.quiet
            and opts.get("apply_changes")) and not opts.get("edit_lines"):
        fm = ui.formatter("absorb", opts)
    state.diffwith(targetctx, matcher, fm)
    if fm is not None and state.ctxaffected:
        fm.startitem()
        count = len(state.ctxaffected)
        fm.write(
            "count",
            _n("\n%d changeset affected\n", "\n%d changesets affected\n",
               count),
            count,
        )
        fm.data(linetype="summary")
        for ctx in reversed(stack):
            if ctx not in state.ctxaffected:
                continue
            fm.startitem()
            fm.context(ctx=ctx)
            fm.data(linetype="changeset")
            fm.write("node", "%-7.7s ", ctx.hex(), label="absorb.node")
            descfirstline = ctx.description().splitlines()[0]
            fm.write("descfirstline",
                     "%s\n",
                     descfirstline,
                     label="absorb.description")
        fm.end()
    if not opts.get("edit_lines") and not any(
            f.fixups for f in state.fixupmap.values()):
        ui.write(_("nothing to absorb\n"))
    elif not opts.get("dry_run"):
        if not opts.get("apply_changes"):
            if ui.promptchoice("apply changes (yn)? $$ &Yes $$ &No",
                               default=0):
                raise error.Abort(_("absorb cancelled\n"))
        state.apply()
        state.commit()
        state.printchunkstats()
    return state