Beispiel #1
0
    def hgcmd(cmdname, *args, **additional_opts):
        cmd, opts = cmdutil.getcmdanddefaultopts(cmdname, commands.table)
        opts.update(additional_opts)

        _repo = repo
        if "_repo" in opts:
            _repo = opts["_repo"]
            del opts["_repo"]
        # If we failed to popbuffer for some reason, do not mess up with the
        # main `ui` object.
        newui = ui.copy()
        newui.pushbuffer(error=True, subproc=True)
        newui._colormode = None

        def remoteui(orig, src, opts):
            rui = orig(src, opts)
            rui._outputui = newui
            return rui

        try:
            with newui.configoverride(
                configoverrides, "rage"
            ), extensions.wrappedfunction(hg, "remoteui", remoteui):
                if cmd.norepo:
                    cmd(newui, *args, **opts)
                else:
                    cmd(newui, _repo, *args, **opts)
        finally:
            return newui.popbuffer()
Beispiel #2
0
def hg(stdin: BinaryIO, stdout: BinaryIO, stderr: BinaryIO, env: Env) -> int:
    """run hg commands in-process
    requires edenscm modules - run from "hg debugpython", not vanilla python
    """
    # debugpython won't work - emulate Py_Main instead
    if env.args[1:3] == ["debugpython", "--"]:
        env.args = [env.args[0]] + env.args[3:]
        args = env.args[1:]
        return python(args, stdin, stdout, stderr, env)

    import bindings
    from edenscm.mercurial import encoding, extensions, pycompat, util

    # emulate ui.system via sheval
    rawsystem = partial(_rawsystem, env, stdin, stdout, stderr)
    origstdio = (pycompat.stdin, pycompat.stdout, pycompat.stderr)

    try:
        with shellenv(env, stdin=stdin, stdout=stdout,
                      stderr=stderr), extensions.wrappedfunction(
                          util, "rawsystem", rawsystem):
            encoding.setfromenviron()
            pycompat.stdin = stdin
            pycompat.stdout = stdout
            pycompat.stderr = stderr
            pycompat.sysargv = env.args
            util._reloadenv()
            exitcode = bindings.commands.run(env.args, stdin, stdout, stderr)
            return exitcode
    finally:
        # restore environ
        encoding.setfromenviron()
        pycompat.stdin, pycompat.stdout, pycompat.stderr = origstdio
Beispiel #3
0
def snapshotshow(ui, repo, *args, **opts):
    """show the snapshot contents, given its revision id"""
    cctx = getsnapshotctx(ui, repo, args)
    rev = cctx.hex()
    opts["rev"] = [rev]
    opts["patch"] = True
    revs, expr, filematcher = cmdutil.getlogrevs(repo, [], opts)
    revmatchfn = filematcher(rev) if filematcher else None
    ui.pager("snapshotshow")
    displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
    with extensions.wrappedfunction(patch, "diff", _diff), extensions.wrappedfunction(
        cmdutil.changeset_printer, "_show", _show
    ), extensions.wrappedfunction(cmdutil.changeset_templater, "_show", _show):
        displayer.show(cctx, matchfn=revmatchfn)
        displayer.flush(cctx)
    displayer.close()
Beispiel #4
0
        def testremovesymlinkplaceholder(self):
            class SpecificError(Exception):
                pass

            # Rename is the last step of makelock. Make it fail.
            def _failrename(orig, src, dst):
                raise SpecificError()

            testtmp = encoding.environ.get("TESTTMP")
            lockpath = os.path.join(testtmp, "testlock")
            with extensions.wrappedfunction(
                    os, "rename",
                    _failrename), self.assertRaises(SpecificError):
                util.makelock("foo:%s" % os.getpid(), lockpath)

            # The placeholder lock should be removed.
            self.assertFalse(os.path.lexists(lockpath))
Beispiel #5
0
sh % "echo 1" > "file1"
sh % "echo 1" > "file2"
sh % "hg commit -Aqm commit1"
sh % "echo 2" > "file2"

sh % "cat file2" == "2"
sh % "tglog" == "@  6408d34d8180 'commit1'"


def update(orig, repo, *args, **kwargs):
    if repo.ui.configbool("abortupdate", "after"):
        orig(repo, *args, **kwargs)
    raise KeyboardInterrupt


with extensions.wrappedfunction(hg, "update", update):
    sh % "hg shelve" == r"""
        shelved as default
        interrupted!
        [255]"""

sh % "cat file2" == "2"
sh % "tglog" == "@  6408d34d8180 'commit1'"
sh % "hg update --clean --quiet ."
sh % "hg shelve --list" == "default * shelve changes to: commit1 (glob)"
sh % "hg unshelve" == "unshelving change 'default'"
sh % "cat file2" == "2"
with extensions.wrappedfunction(hg, "update", update):
    sh % "hg shelve --config 'abortupdate.after=true'" == r"""
        shelved as default
        1 files updated, 0 files merged, 0 files removed, 0 files unresolved
Beispiel #6
0
# Test interrupted shelve - this should not lose work

sh % "newrepo"
sh % "echo 1" > "file1"
sh % "echo 1" > "file2"
sh % "hg commit -Aqm commit1"
sh % "echo 2" > "file2"


def createmarkers(orig, *args, **kwargs):
    orig(*args, **kwargs)
    raise KeyboardInterrupt


with extensions.wrappedfunction(obsolete, "createmarkers", createmarkers):
    sh % "hg shelve" == r"""
        transaction abort!
        rollback completed
        interrupted!
        [255]"""

sh % "cat file2" == "2"
sh % "tglog" == "@  0: 6408d34d8180 'commit1'"


def update(orig, repo, *args, **kwargs):
    if repo.ui.configbool("abortupdate", "after"):
        orig(repo, *args, **kwargs)
    raise KeyboardInterrupt
Beispiel #7
0
# Setup repo
sh.newrepo()

now = int(time.time())

sh % "touch file1"
sh % "hg add file1"

for delta in [31536000, 86401, 86369, 3800, 420, 5]:
    committime = now - delta
    open("file1", "w").write("%s\n" % delta)
    sh.hg("commit", "-d", "%s 0" % committime, "-m",
          "Changeset %s seconds ago" % delta)

with wrappedfunction(time, "time", lambda orig: now + 1):
    # Check age ranges
    sh % "hg log -T '{rev} {desc}\\n' -r 'age(\"<30\")'" == "5 Changeset 5 seconds ago"
    sh % "hg log -T '{rev} {desc}\\n' -r 'age(\"<7m30s\")'" == r"""
        4 Changeset 420 seconds ago
        5 Changeset 5 seconds ago"""
    sh % "hg log -T '{rev} {desc}\\n' -r 'age(\"<1h4m\")'" == r"""
        3 Changeset 3800 seconds ago
        4 Changeset 420 seconds ago
        5 Changeset 5 seconds ago"""
    sh % "hg log -T '{rev} {desc}\\n' -r 'age(\"<1d\")'" == r"""
        2 Changeset 86369 seconds ago
        3 Changeset 3800 seconds ago
        4 Changeset 420 seconds ago
        5 Changeset 5 seconds ago"""
    sh % "hg log -T '{rev} {desc}\\n' -r 'age(\"<364d23h59m\")'" == r"""
Beispiel #8
0
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage,
                       state, tr):
    pullcmd, pullopts = ccutil.getcommandandoptions("pull|pul")

    try:
        remotenames = extensions.find("remotenames")
    except KeyError:
        remotenames = None

    # Pull all the new heads and any bookmark hashes we don't have. We need to
    # filter cloudrefs before pull as pull doesn't check if a rev is present
    # locally.
    unfi = repo.unfiltered()
    newheads = [head for head in cloudrefs.heads if head not in unfi]
    if maxage is not None and maxage >= 0:
        mindate = time.time() - maxage * 86400
        omittedheads = [
            head for head in newheads if head in cloudrefs.headdates
            and cloudrefs.headdates[head] < mindate
        ]
        if omittedheads:
            repo.ui.status(
                _("omitting heads that are older than %d days:\n") % maxage)
            for head in omittedheads:
                headdatestr = util.datestr(
                    util.makedate(cloudrefs.headdates[head]))
                repo.ui.status(_("  %s from %s\n") % (head[:12], headdatestr))
        newheads = [head for head in newheads if head not in omittedheads]
    else:
        omittedheads = []
    omittedbookmarks = []

    newvisibleheads = None
    if visibility.tracking(repo):
        localheads = _getheads(repo)
        localheadsset = set(localheads)
        cloudheads = [
            head for head in cloudrefs.heads if head not in omittedheads
        ]
        cloudheadsset = set(cloudheads)
        if localheadsset != cloudheadsset:
            oldvisibleheads = [
                head for head in lastsyncstate.heads
                if head not in lastsyncstate.omittedheads
            ]
            newvisibleheads = util.removeduplicates(oldvisibleheads +
                                                    cloudheads + localheads)
            toremove = {
                head
                for head in oldvisibleheads
                if head not in localheadsset or head not in cloudheadsset
            }
            newvisibleheads = [
                head for head in newvisibleheads if head not in toremove
            ]

    remotebookmarknodes = []
    newremotebookmarks = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        newremotebookmarks = _processremotebookmarks(repo,
                                                     cloudrefs.remotebookmarks,
                                                     lastsyncstate)

        # Pull public commits, which remote bookmarks point to, if they are not
        # present locally.
        for node in newremotebookmarks.values():
            if node not in unfi:
                remotebookmarknodes.append(node)

    try:
        snapshot = extensions.find("snapshot")
    except KeyError:
        snapshot = None
        addedsnapshots = []
        removedsnapshots = []
        newsnapshots = lastsyncstate.snapshots
    else:
        addedsnapshots = [
            s for s in cloudrefs.snapshots if s not in lastsyncstate.snapshots
        ]
        removedsnapshots = [
            s for s in lastsyncstate.snapshots if s not in cloudrefs.snapshots
        ]
        newsnapshots = cloudrefs.snapshots

    # TODO(alexeyqu): pull snapshots separately
    newheads += addedsnapshots

    backuplock.progresspulling(repo, [nodemod.bin(node) for node in newheads])

    if remotebookmarknodes or newheads:
        # Partition the heads into groups we can pull together.
        headgroups = ([remotebookmarknodes] if remotebookmarknodes else
                      []) + _partitionheads(newheads, cloudrefs.headdates)

        def disabled(*args, **kwargs):
            pass

        # Disable pulling of obsmarkers
        wrapobs = extensions.wrappedfunction(exchange, "_pullobsolete",
                                             disabled)

        # Disable pulling of bookmarks
        wrapbook = extensions.wrappedfunction(exchange, "_pullbookmarks",
                                              disabled)

        # Disable pulling of remote bookmarks
        if remotenames:
            wrapremotenames = extensions.wrappedfunction(
                remotenames, "pullremotenames", disabled)
        else:
            wrapremotenames = util.nullcontextmanager()

        # Disable automigration and prefetching of trees
        configoverride = repo.ui.configoverride(
            {
                ("pull", "automigrate"): False,
                ("treemanifest", "pullprefetchrevs"): ""
            },
            "cloudsyncpull",
        )

        prog = progress.bar(repo.ui,
                            _("pulling from commit cloud"),
                            total=len(headgroups))
        with wrapobs, wrapbook, wrapremotenames, configoverride, prog:
            for index, headgroup in enumerate(headgroups):
                headgroupstr = " ".join([head[:12] for head in headgroup])
                repo.ui.status(_("pulling %s\n") % headgroupstr)
                prog.value = (index, headgroupstr)
                pullopts["rev"] = headgroup
                pullcmd(repo.ui, repo, remotepath, **pullopts)
                repo.connectionpool.close()

    omittedbookmarks.extend(
        _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate))

    if _isremotebookmarkssyncenabled(repo.ui):
        _updateremotebookmarks(repo, tr, newremotebookmarks)

    if snapshot:
        with repo.lock(), repo.transaction("sync-snapshots") as tr:
            repo.snapshotlist.update(tr,
                                     addnodes=addedsnapshots,
                                     removenodes=removedsnapshots)

    _mergeobsmarkers(repo, tr, cloudrefs.obsmarkers)

    if newvisibleheads is not None:
        visibility.setvisibleheads(repo,
                                   [nodemod.bin(n) for n in newvisibleheads])

    # Obsmarker sharing is unreliable.  Some of the commits that should now
    # be visible might be hidden still, and some commits that should be
    # hidden might still be visible.  Create local obsmarkers to resolve
    # this.
    if obsolete.isenabled(repo, obsolete.createmarkersopt):
        unfi = repo.unfiltered()
        # Commits that are only visible in the cloud are commits that are
        # ancestors of the cloud heads but are hidden locally.
        cloudvisibleonly = list(
            unfi.set(
                "not public() & ::%ls & hidden()",
                [head for head in cloudrefs.heads if head not in omittedheads],
            ))
        # Commits that are only hidden in the cloud are commits that are
        # ancestors of the previous cloud heads that are not ancestors of the
        # current cloud heads, but have not been hidden or obsoleted locally.
        cloudhiddenonly = list(
            unfi.set(
                "(not public() & ::%ls) - (not public() & ::%ls) - hidden() - obsolete()",
                [
                    head for head in lastsyncstate.heads
                    if head not in lastsyncstate.omittedheads
                ],
                [head for head in cloudrefs.heads if head not in omittedheads],
            ))
        if cloudvisibleonly or cloudhiddenonly:
            msg = _(
                "detected obsmarker inconsistency (fixing by obsoleting [%s] and reviving [%s])\n"
            ) % (
                ", ".join(
                    [nodemod.short(ctx.node()) for ctx in cloudhiddenonly]),
                ", ".join(
                    [nodemod.short(ctx.node()) for ctx in cloudvisibleonly]),
            )
            repo.ui.log("commitcloud_sync", msg)
            repo.ui.warn(msg)
            repo._commitcloudskippendingobsmarkers = True
            with repo.lock():
                obsolete.createmarkers(repo,
                                       [(ctx, ()) for ctx in cloudhiddenonly])
                obsolete.revive(cloudvisibleonly)
            repo._commitcloudskippendingobsmarkers = False

    # We have now synced the repo to the cloud version.  Store this.
    logsyncop(
        repo,
        "from_cloud",
        cloudrefs.version,
        lastsyncstate.heads,
        cloudrefs.heads,
        lastsyncstate.bookmarks,
        cloudrefs.bookmarks,
        lastsyncstate.remotebookmarks,
        newremotebookmarks,
        lastsyncstate.snapshots,
        newsnapshots,
    )
    lastsyncstate.update(
        tr,
        cloudrefs.version,
        cloudrefs.heads,
        cloudrefs.bookmarks,
        omittedheads,
        omittedbookmarks,
        maxage,
        newremotebookmarks,
        newsnapshots,
    )

    # Also update backup state.  These new heads are already backed up,
    # otherwise the server wouldn't have told us about them.
    state.update([nodemod.bin(head) for head in newheads], tr)
Beispiel #9
0
def batchunwrap(wrappers):
    for w in wrappers:
        result = None
        try:
            result = extensions.unwrapfunction(dummy, "getstack", w)
            msg = str(dummy.getstack())
        except (ValueError, IndexError) as e:
            msg = e.__class__.__name__
        print("unwrap %s: %s: %s" % (getid(w), getid(result), msg))


batchwrap(wrappers + [wrappers[0]])
batchunwrap([(wrappers[i] if i >= 0 else None)
             for i in [3, None, 0, 4, 0, 2, 1, None]])

wrap0 = extensions.wrappedfunction(dummy, "getstack", wrappers[0])
wrap1 = extensions.wrappedfunction(dummy, "getstack", wrappers[1])

# Use them in a different order from how they were created to check that
# the wrapping happens in __enter__, not in __init__
print("context manager", dummy.getstack())
with wrap1:
    print("context manager", dummy.getstack())
    with wrap0:
        print("context manager", dummy.getstack())
        # Bad programmer forgets to unwrap the function, but the context
        # managers still unwrap their wrappings.
        extensions.wrapfunction(dummy, "getstack", wrappers[2])
        print("context manager", dummy.getstack())
    print("context manager", dummy.getstack())
print("context manager", dummy.getstack())