예제 #1
0
def _diff(orig, repo, *args, **kwargs):
    def snapshotdiff(data1, data2, path):
        uheaders, hunks = mdiff.unidiff(
            data1,
            date1,
            data2,
            date2,
            path,
            path,
            opts=kwargs.get("opts"),
            check_binary=False,
        )
        return "".join(sum((list(hlines) for hrange, hlines in hunks), []))

    for text in orig(repo, *args, **kwargs):
        yield text
    node2 = kwargs.get("node2") or args[1]
    if node2 is None:
        # this should be the snapshot node
        return
    ctx2 = repo.unfiltered()[node2]
    date2 = util.datestr(ctx2.date())
    node1 = kwargs.get("node1") or args[0]
    if node1 is not None:
        ctx1 = repo[node1]
    else:
        # is that possible?
        ctx1 = ctx2.p1()
    date1 = util.datestr(ctx1.date())
    metadataid = ctx2.extra().get("snapshotmetadataid", "")
    if not metadataid:
        # node2 is not a snapshot
        return
    snapmetadata = snapshotmetadata.getfromlocalstorage(repo, metadataid)
    store = repo.svfs.snapshotstore
    # print unknown files from snapshot
    # diff("", content)
    yield "\n===\nUntracked changes:\n===\n"
    for f in snapmetadata.unknown:
        yield "? %s\n" % f.path
        yield snapshotdiff("", f.getcontent(store), f.path)
    # print deleted files from snapshot
    # diff(prevcontent, "")
    for f in snapmetadata.deleted:
        yield "! %s\n" % f.path
        fctx1 = ctx1.filectx(f.path)
        yield snapshotdiff(fctx1.data(), "", f.path)
예제 #2
0
def _debugundoindex(ui, repo, reverseindex):
    try:
        nodedict = _readindex(repo, reverseindex)
    except IndexError:
        raise error.Abort(_("index out of bounds"))
        return
    template = "{tabindent(sub('\0', ' ', content))}\n"
    fm = ui.formatter("debugundohistory", {"template": template})
    cabinet = (
        "command.i",
        "bookmarks.i",
        "date.i",
        "draftheads.i",
        "draftobsolete.i",
        "visibleheads.i",
        "workingparent.i",
    )
    for filename in cabinet:
        name = filename[:-2]
        header = name + ":\n"
        if name not in nodedict:
            continue
        rawcontent = _readnode(repo, filename, nodedict[name])
        if "date.i" == filename:
            splitdate = rawcontent.split(" ")
            datetuple = (float(splitdate[0]), int(splitdate[1]))
            content = util.datestr(datetuple)
        elif filename in {"draftheads.i", "visibleheads.i"}:
            try:
                oldnodes = _readindex(repo, reverseindex + 1)
                oldheads = _readnode(repo, filename, oldnodes[filename[:-2]])
            except IndexError:  # index is oldest log
                content = rawcontent
            else:
                content = "ADDED:\n\t" + "\n\t".join(
                    sorted(
                        set(rawcontent.split("\n")) -
                        set(oldheads.split("\n"))))
                content += "\nREMOVED:\n\t" + "\n\t".join(
                    sorted(
                        set(oldheads.split("\n")) -
                        set(rawcontent.split("\n"))))
        elif "command.i" == filename:
            if "" == rawcontent:
                content = "unknown command(s) run, gap in log"
            else:
                content = rawcontent.split("\0", 1)[1]
        else:
            content = rawcontent
        fm.startitem()
        fm.write("content", "%s", header + content)
    fm.write("content", "%s", "unfinished:\t" + nodedict["unfinished"])
    fm.end()
예제 #3
0
파일: commands.py 프로젝트: miscreant1/eden
def backupdisable(ui, repo, **opts):
    """temporarily disable automatic backup or sync

    Disables automatic background backup or sync for the specified duration.
    """

    if not background.autobackupenabled(repo):
        ui.write(_("background backup was already disabled\n"), notice=_("note"))

    try:
        duration = int(opts.get("hours", 1)) * 60 * 60
    except ValueError:
        raise error.Abort(
            _(
                "error: argument 'hours': invalid int value: '{value}'\n".format(
                    value=opts.get("hours")
                )
            )
        )

    timestamp = int(time.time()) + duration
    background.disableautobackup(repo, timestamp)
    ui.write(
        _("background backup is now disabled until %s\n")
        % util.datestr(util.makedate(timestamp)),
        component="commitcloud",
    )

    try:
        with backuplock.trylock(repo):
            pass
    except error.LockHeld as e:
        if e.lockinfo.isrunning():
            ui.warn(
                _(
                    "'@prog@ cloud disable' does not affect running backup processes\n"
                    "(kill the background process - pid %s on %s - gracefully if needed)\n"
                )
                % (e.lockinfo.uniqueid, e.lockinfo.namespace),
                notice=_("warning"),
            )
    return 0
예제 #4
0
파일: hg.py 프로젝트: jsoref/eden
    def getcommit(self, rev):
        ctx = self._changectx(rev)
        _parents = self._parents(ctx)
        parents = [p.hex() for p in _parents]
        optparents = [p.hex() for p in ctx.parents() if p and p not in _parents]
        crev = rev

        return common.commit(
            author=ctx.user(),
            date=util.datestr(ctx.date(), "%Y-%m-%d %H:%M:%S %1%2"),
            desc=ctx.description(),
            rev=crev,
            parents=parents,
            optparents=optparents,
            branch=ctx.branch(),
            extra=ctx.extra(),
            sortkey=ctx.rev(),
            saverev=self.saverev,
            phase=ctx.phase(),
        )
예제 #5
0
    def _construct_commit(self, obj, parents=None):
        """
        Constructs a common.commit object from an unmarshalled
        `p4 describe` output
        """
        desc = self.recode(obj.get("desc", ""))
        date = (int(obj["time"]), 0)  # timezone not set
        if parents is None:
            parents = []

        return common.commit(
            author=self.recode(obj["user"]),
            date=util.datestr(date, "%Y-%m-%d %H:%M:%S %1%2"),
            parents=parents,
            desc=desc,
            branch=None,
            rev=obj["change"],
            extra={
                "p4": obj["change"],
                "convert_revision": obj["change"]
            },
        )
예제 #6
0
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage, state, tr):
    # Pull all the new heads and any bookmark hashes we don't have. We need to
    # filter cloudrefs before pull as pull doesn't check if a rev is present
    # locally.
    unfi = repo
    newheads = [head for head in cloudrefs.heads if head not in unfi]
    if maxage is not None and maxage >= 0:
        mindate = time.time() - maxage * 86400
        omittedheads = [
            head
            for head in newheads
            if head in cloudrefs.headdates and cloudrefs.headdates[head] < mindate
        ]
        if omittedheads:
            repo.ui.status(_("omitting heads that are older than %d days:\n") % maxage)
            for head in omittedheads:
                headdatestr = util.datestr(util.makedate(cloudrefs.headdates[head]))
                repo.ui.status(_("  %s from %s\n") % (head[:12], headdatestr))
        newheads = [head for head in newheads if head not in omittedheads]
    else:
        omittedheads = []
    omittedbookmarks = []
    omittedremotebookmarks = []

    newvisibleheads = None
    if visibility.tracking(repo):
        localheads = _getheads(repo)
        localheadsset = set(localheads)
        cloudheads = [head for head in cloudrefs.heads if head not in omittedheads]
        cloudheadsset = set(cloudheads)
        if localheadsset != cloudheadsset:
            oldvisibleheads = [
                head
                for head in lastsyncstate.heads
                if head not in lastsyncstate.omittedheads
            ]
            newvisibleheads = util.removeduplicates(
                oldvisibleheads + cloudheads + localheads
            )
            toremove = {
                head
                for head in oldvisibleheads
                if head not in localheadsset or head not in cloudheadsset
            }
            newvisibleheads = [head for head in newvisibleheads if head not in toremove]

    remotebookmarknewnodes = set()
    remotebookmarkupdates = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        (remotebookmarkupdates, remotebookmarknewnodes) = _processremotebookmarks(
            repo, cloudrefs.remotebookmarks, lastsyncstate
        )

    try:
        snapshot = extensions.find("snapshot")
    except KeyError:
        snapshot = None
        addedsnapshots = []
        removedsnapshots = []
        newsnapshots = lastsyncstate.snapshots
    else:
        addedsnapshots = [
            s for s in cloudrefs.snapshots if s not in lastsyncstate.snapshots
        ]
        removedsnapshots = [
            s for s in lastsyncstate.snapshots if s not in cloudrefs.snapshots
        ]
        newsnapshots = cloudrefs.snapshots
        newheads += addedsnapshots

    if remotebookmarknewnodes or newheads:
        # Partition the heads into groups we can pull together.
        headgroups = _partitionheads(
            list(remotebookmarknewnodes) + newheads, cloudrefs.headdates
        )
        _pullheadgroups(repo, remotepath, headgroups)

    omittedbookmarks.extend(
        _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate)
    )

    newremotebookmarks = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        newremotebookmarks, omittedremotebookmarks = _updateremotebookmarks(
            repo, tr, remotebookmarkupdates
        )

    if snapshot:
        with repo.lock(), repo.transaction("sync-snapshots") as tr:
            repo.snapshotlist.update(
                tr, addnodes=addedsnapshots, removenodes=removedsnapshots
            )

    _mergeobsmarkers(repo, tr, cloudrefs.obsmarkers)

    if newvisibleheads is not None:
        visibility.setvisibleheads(repo, [nodemod.bin(n) for n in newvisibleheads])

    # Obsmarker sharing is unreliable.  Some of the commits that should now
    # be visible might be hidden still, and some commits that should be
    # hidden might still be visible.  Create local obsmarkers to resolve
    # this.
    if obsolete.isenabled(repo, obsolete.createmarkersopt) and not repo.ui.configbool(
        "mutation", "proxy-obsstore"
    ):
        unfi = repo
        # Commits that are only visible in the cloud are commits that are
        # ancestors of the cloud heads but are hidden locally.
        cloudvisibleonly = list(
            unfi.set(
                "not public() & ::%ls & hidden()",
                [head for head in cloudrefs.heads if head not in omittedheads],
            )
        )
        # Commits that are only hidden in the cloud are commits that are
        # ancestors of the previous cloud heads that are not ancestors of the
        # current cloud heads, but have not been hidden or obsoleted locally.
        cloudhiddenonly = list(
            unfi.set(
                "(not public() & ::%ls) - (not public() & ::%ls) - hidden() - obsolete()",
                [
                    head
                    for head in lastsyncstate.heads
                    if head not in lastsyncstate.omittedheads
                ],
                [head for head in cloudrefs.heads if head not in omittedheads],
            )
        )
        if cloudvisibleonly or cloudhiddenonly:
            msg = _(
                "detected obsmarker inconsistency (fixing by obsoleting [%s] and reviving [%s])\n"
            ) % (
                ", ".join([nodemod.short(ctx.node()) for ctx in cloudhiddenonly]),
                ", ".join([nodemod.short(ctx.node()) for ctx in cloudvisibleonly]),
            )
            repo.ui.log("commitcloud_sync", msg)
            repo.ui.warn(msg)
            repo._commitcloudskippendingobsmarkers = True
            with repo.lock():
                obsolete.createmarkers(repo, [(ctx, ()) for ctx in cloudhiddenonly])
                obsolete.revive(cloudvisibleonly)
            repo._commitcloudskippendingobsmarkers = False

    # We have now synced the repo to the cloud version.  Store this.
    logsyncop(
        repo,
        "from_cloud",
        cloudrefs.version,
        lastsyncstate.heads,
        cloudrefs.heads,
        lastsyncstate.bookmarks,
        cloudrefs.bookmarks,
        lastsyncstate.remotebookmarks,
        newremotebookmarks,
        lastsyncstate.snapshots,
        newsnapshots,
    )
    lastsyncstate.update(
        tr,
        newversion=cloudrefs.version,
        newheads=cloudrefs.heads,
        newbookmarks=cloudrefs.bookmarks,
        newremotebookmarks=newremotebookmarks,
        newmaxage=maxage,
        newomittedheads=omittedheads,
        newomittedbookmarks=omittedbookmarks,
        newomittedremotebookmarks=omittedremotebookmarks,
        newsnapshots=newsnapshots,
    )

    # Also update backup state.  These new heads are already backed up,
    # otherwise the server wouldn't have told us about them.
    state.update([nodemod.bin(head) for head in newheads], tr)
예제 #7
0
        def log(self, event, *msg, **opts):
            global lastui
            super(blackboxui, self).log(event, *msg, **opts)

            if not "*" in self.track and not event in self.track:
                return

            if not msg or not msg[0]:
                return

            if self._bbvfs:
                ui = self
            else:
                # certain ui instances exist outside the context of
                # a repo, so just default to the last blackbox that
                # was seen.
                ui = lastui()

            if not ui:
                return
            vfs = ui._bbvfs
            if not vfs:
                return

            repo = getattr(ui, "_bbrepo", lambda: None)()
            if not lastui() or repo:
                lastui = weakref.ref(ui)
            if getattr(ui, "_bbinlog", False):
                # recursion and failure guard
                return
            ui._bbinlog = True
            default = self.configdate("devel", "default-date")
            date = util.datestr(default, "%Y/%m/%d %H:%M:%S")
            user = util.getuser()
            pid = "%d" % util.getpid()
            if len(msg) == 1:
                # Don't even try to format the string if there is only one
                # argument.
                formattedmsg = msg[0]
            else:
                try:
                    formattedmsg = msg[0] % msg[1:]
                except TypeError:
                    # If fails with `TypeError: not enough arguments for format
                    # string`, concatenate the arguments gracefully.
                    formattedmsg = " ".join(msg)
            rev = "(unknown)"
            changed = ""
            # Only log the current commit if the changelog has already been
            # loaded.
            if repo and "changelog" in repo.__dict__:
                try:
                    ctx = repo[None]
                    parents = ctx.parents()
                    rev = "+".join([hex(p.node()) for p in parents])
                except Exception:
                    # This can happen if the dirstate file is sufficiently
                    # corrupt that we can't extract the parents. In that case,
                    # just don't set the rev.
                    pass
                if ui.configbool("blackbox", "dirty") and ctx.dirty(
                        missing=True, merge=False, branch=False):
                    changed = "+"
            if ui.configbool("blackbox", "logsource"):
                src = " [%s]" % event
            else:
                src = ""
            requestid = ui.environ.get("HGREQUESTID") or ""
            if requestid:
                src += "[%s]" % requestid
            try:
                fmt = "%s %s @%s%s (%s)%s> %s"
                args = (date, user, rev, changed, pid, src, formattedmsg)
                with _openlogfile(ui, vfs) as fp:
                    line = fmt % args
                    if not line.endswith("\n"):
                        line += "\n"
                    fp.write(encodeutf8(line))
            except (IOError, OSError) as err:
                self.debug("warning: cannot write to blackbox.log: %s\n" %
                           err.strerror)
                # do not restore _bbinlog intentionally to avoid failed
                # logging again
            else:
                ui._bbinlog = False
예제 #8
0
파일: sync.py 프로젝트: simpkins/eden
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage,
                       state, tr):
    # Pull all the new heads and any bookmark hashes we don't have. We need to
    # filter cloudrefs before pull as pull doesn't check if a rev is present
    # locally.
    newheads = [
        nodemod.hex(n) for n in repo.changelog.filternodes(
            [nodemod.bin(h) for h in cloudrefs.heads], inverse=True)
    ]
    assert newheads == newheads
    if maxage is not None and maxage >= 0:
        mindate = time.time() - maxage * 86400
        omittedheads = [
            head for head in newheads if head in cloudrefs.headdates
            and cloudrefs.headdates[head] < mindate
        ]
        if omittedheads:
            omittedheadslen = len(omittedheads)
            repo.ui.status(
                _n(
                    "omitting %d head that is older than %d days:\n",
                    "omitting %d heads that are older than %d days:\n",
                    omittedheadslen,
                ) % (omittedheadslen, maxage))
            counter = 0
            for head in reversed(omittedheads):
                if counter == _maxomittedheadsoutput:
                    remaining = len(omittedheads) - counter
                    repo.ui.status(
                        _n("  and %d older head\n", "  and %d older heads\n",
                           remaining) % remaining)
                    break
                headdatestr = util.datestr(
                    util.makedate(cloudrefs.headdates[head]))
                repo.ui.status(_("  %s from %s\n") % (head[:12], headdatestr))
                counter = counter + 1

        omittedheads = set(omittedheads)
        newheads = [head for head in newheads if head not in omittedheads]
    else:
        omittedheads = set()
    omittedbookmarks = []
    omittedremotebookmarks = []

    newvisibleheads = None
    if visibility.tracking(repo):
        localheads = _getheads(repo)
        localheadsset = set(localheads)
        cloudheads = [
            head for head in cloudrefs.heads if head not in omittedheads
        ]
        cloudheadsset = set(cloudheads)
        if localheadsset != cloudheadsset:
            oldvisibleheads = [
                head for head in lastsyncstate.heads
                if head not in lastsyncstate.omittedheads
            ]
            newvisibleheads = util.removeduplicates(oldvisibleheads +
                                                    cloudheads + localheads)
            toremove = {
                head
                for head in oldvisibleheads
                if head not in localheadsset or head not in cloudheadsset
            }
            newvisibleheads = [
                head for head in newvisibleheads if head not in toremove
            ]

    remotebookmarknewnodes = set()
    remotebookmarkupdates = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        (remotebookmarkupdates,
         remotebookmarknewnodes) = _processremotebookmarks(
             repo, cloudrefs.remotebookmarks, lastsyncstate)

    if remotebookmarknewnodes or newheads:
        # Partition the heads into groups we can pull together.
        headgroups = _partitionheads(repo.ui,
                                     list(remotebookmarknewnodes) + newheads,
                                     cloudrefs.headdates)
        _pullheadgroups(repo, remotepath, headgroups)

    omittedbookmarks.extend(
        _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate,
                        omittedheads, maxage))

    newremotebookmarks = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        omittedremotebookmarks = _updateremotebookmarks(
            repo, tr, remotebookmarkupdates)
        newremotebookmarks = cloudrefs.remotebookmarks

    if newvisibleheads is not None:
        visibility.setvisibleheads(repo,
                                   [nodemod.bin(n) for n in newvisibleheads])

    # We have now synced the repo to the cloud version.  Store this.
    logsyncop(
        repo,
        "from_cloud",
        cloudrefs.version,
        lastsyncstate.heads,
        cloudrefs.heads,
        lastsyncstate.bookmarks,
        cloudrefs.bookmarks,
        lastsyncstate.remotebookmarks,
        newremotebookmarks,
    )
    lastsyncstate.update(
        tr,
        newversion=cloudrefs.version,
        newheads=cloudrefs.heads,
        newbookmarks=cloudrefs.bookmarks,
        newremotebookmarks=newremotebookmarks,
        newmaxage=maxage,
        newomittedheads=list(omittedheads),
        newomittedbookmarks=omittedbookmarks,
        newomittedremotebookmarks=omittedremotebookmarks,
    )

    # Also update backup state.  These new heads are already backed up,
    # otherwise the server wouldn't have told us about them.
    state.update([nodemod.bin(head) for head in newheads], tr)
예제 #9
0
def summary(repo):
    ui = repo.ui
    # commitcloud config should eventually replace the infinitepushbackup one
    if not ui.configbool("infinitepushbackup",
                         "enablestatus") or not ui.configbool(
                             "commitcloud", "enablestatus"):
        return

    # Output backup status if enablestatus is on
    if not background.autobackupenabled(repo):
        timestamp = background.autobackupdisableduntil(repo)
        if timestamp is not None:
            ui.write(
                _("background backup is currently disabled until %s\n"
                  "so your commits are not being backed up.\n"
                  "(run 'hg cloud enable' to turn automatic backups back on)\n"
                  ) % util.datestr(util.makedate(int(timestamp))),
                notice=_("note"),
            )
        else:
            ui.write(
                _("background backup is currently disabled so your commits are not being backed up.\n"
                  ),
                notice=_("note"),
            )

    (workspacename,
     usernamemigration) = workspace.currentworkspacewithusernamecheck(repo)
    if workspacename:
        subscription.check(repo)
        backuplock.status(repo)
        lastsyncstate = syncstate.SyncState(repo, workspacename)
        if lastsyncstate.omittedheads or lastsyncstate.omittedbookmarks:
            hintutil.trigger("commitcloud-old-commits", repo)
        if usernamemigration:
            hintutil.trigger("commitcloud-username-migration", repo)

    # Don't output the summary if a backup is currently in progress.
    if backuplock.islocked(repo):
        return

    unbackeduprevs = repo.revs("notbackedup()")

    # Count the number of changesets that haven't been backed up for 10 minutes.
    # If there is only one, also print out its hash.
    backuptime = time.time() - 10 * 60  # 10 minutes ago
    count = 0
    singleunbackeduprev = None
    for rev in unbackeduprevs:
        if repo[rev].date()[0] <= backuptime:
            singleunbackeduprev = rev
            count += 1
    if count > 0:
        if count > 1:
            ui.warn(_("%d changesets are not backed up.\n") % count,
                    notice=_("note"))
        else:
            ui.warn(
                _("changeset %s is not backed up.\n") %
                nodemod.short(repo[singleunbackeduprev].node()),
                notice=_("note"),
            )
        if workspacename:
            ui.warn(_("(run 'hg cloud sync' to synchronize your workspace)\n"))
        else:
            ui.warn(_("(run 'hg cloud backup' to perform a backup)\n"))
        ui.warn(
            _("(if this fails, please report to %s)\n") %
            ccerror.getsupportcontact(ui))
예제 #10
0
        def parselogentry(orig_paths, revnum, author, date, message):
            """Return the parsed commit object or None, and True if
            the revision is a branch root.
            """
            self.ui.debug(
                "parsing revision %d (%d changes)\n" % (revnum, len(orig_paths))
            )

            branched = False
            rev = self.revid(revnum)
            # branch log might return entries for a parent we already have

            if rev in self.commits or revnum < to_revnum:
                return None, branched

            parents = []
            # check whether this revision is the start of a branch or part
            # of a branch renaming
            orig_paths = sorted(pycompat.iteritems(orig_paths))
            root_paths = [(p, e) for p, e in orig_paths if self.module.startswith(p)]
            if root_paths:
                path, ent = root_paths[-1]
                if ent.copyfrom_path:
                    branched = True
                    newpath = ent.copyfrom_path + self.module[len(path) :]
                    # ent.copyfrom_rev may not be the actual last revision
                    previd = self.latest(newpath, ent.copyfrom_rev)
                    if previd is not None:
                        prevmodule, prevnum = revsplit(previd)[1:]
                        if prevnum >= self.startrev:
                            parents = [previd]
                            self.ui.note(
                                _("found parent of branch %s at %d: %s\n")
                                % (self.module, prevnum, prevmodule)
                            )
                else:
                    self.ui.debug("no copyfrom path, don't know what to do.\n")

            paths = []
            # filter out unrelated paths
            for path, ent in orig_paths:
                if self.getrelpath(path) is None:
                    continue
                paths.append((path, ent))

            # Example SVN datetime. Includes microseconds.
            # ISO-8601 conformant
            # '2007-01-04T17:35:00.902377Z'
            date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
            if self.ui.configbool("convert", "localtimezone"):
                date = makedatetimestamp(date[0])

            if message:
                log = self.recode(message)
            else:
                log = ""

            if author:
                author = self.recode(author)
            else:
                author = ""

            try:
                branch = self.module.split("/")[-1]
                if branch == self.trunkname:
                    branch = None
            except IndexError:
                branch = None

            cset = commit(
                author=author,
                date=util.datestr(date, "%Y-%m-%d %H:%M:%S %1%2"),
                desc=log,
                parents=parents,
                branch=branch,
                rev=rev,
            )

            self.commits[rev] = cset
            # The parents list is *shared* among self.paths and the
            # commit object. Both will be updated below.
            self.paths[rev] = (paths, cset.parents)
            if self.child_cset and not self.child_cset.parents:
                self.child_cset.parents[:] = [rev]
            self.child_cset = cset
            return cset, branched
예제 #11
0
def backgroundbackup(repo, command=None, dest=None):
    """start background backup"""
    ui = repo.ui
    if command is not None:
        background_cmd = command
    elif workspace.currentworkspace(repo):
        background_cmd = ["hg", "cloud", "sync"]
    else:
        background_cmd = ["hg", "cloud", "backup"]
    infinitepush_bgssh = ui.config("infinitepush", "bgssh")
    if infinitepush_bgssh:
        background_cmd += ["--config", "ui.ssh=%s" % infinitepush_bgssh]

    # developer config: infinitepushbackup.bgdebuglocks
    if ui.configbool("infinitepushbackup", "bgdebuglocks"):
        background_cmd += ["--config", "devel.debug-lockers=true"]

    # developer config: infinitepushbackup.bgdebug
    if ui.configbool("infinitepushbackup", "bgdebug", False):
        background_cmd.append("--debug")

    if dest:
        background_cmd += ["--dest", dest]

    logfile = None
    logdir = ui.config("infinitepushbackup", "logdir")
    if logdir:
        # make newly created files and dirs non-writable
        oldumask = os.umask(0o022)
        try:
            try:
                # the user name from the machine
                username = util.getuser()
            except Exception:
                username = "******"

            if not _checkcommonlogdir(logdir):
                raise WrongPermissionsException(logdir)

            userlogdir = os.path.join(logdir, username)
            util.makedirs(userlogdir)

            if not _checkuserlogdir(userlogdir):
                raise WrongPermissionsException(userlogdir)

            reponame = os.path.basename(repo.sharedroot)
            _removeoldlogfiles(userlogdir, reponame)
            logfile = getlogfilename(logdir, username, reponame)
        except (OSError, IOError) as e:
            ui.debug("background backup log is disabled: %s\n" % e)
        except WrongPermissionsException as e:
            ui.debug(
                (
                    "%s directory has incorrect permission, "
                    + "background backup logging will be disabled\n"
                )
                % e.logdir
            )
        finally:
            os.umask(oldumask)

    if not logfile:
        logfile = os.devnull

    with open(logfile, "a") as f:
        timestamp = util.datestr(util.makedate(), "%Y-%m-%d %H:%M:%S %z")
        fullcmd = " ".join(util.shellquote(arg) for arg in background_cmd)
        f.write("\n%s starting: %s\n" % (timestamp, fullcmd))

    Stdio = bindings.process.Stdio
    out = Stdio.open(logfile, append=True, create=True)
    bindings.process.Command.new(background_cmd[0]).args(
        background_cmd[1:]
    ).avoidinherithandles().newsession().stdin(Stdio.null()).stdout(out).stderr(
        out
    ).spawn()
예제 #12
0
def _preview(ui, repo, reverseindex):
    # Print smartlog like preview of undo
    # Input:
    #   ui:
    #   repo: mercurial.localrepo
    # Output:
    #   returns 1 on index error, 0 otherwise

    # override "UNDOINDEX" as a variable usable in template
    if not _gapcheck(ui, repo, reverseindex):
        repo.ui.status(
            _("WARN: missing history between present and this"
              " state\n"))
    overrides = {("templates", "UNDOINDEX"): str(reverseindex)}

    opts = {}
    opts["template"] = "{undopreview}"
    repo = repo.unfiltered()

    try:
        nodedict = _readindex(repo, reverseindex)
        curdict = _readindex(repo, reverseindex)
    except IndexError:
        return 1

    bookstring = _readnode(repo, "bookmarks.i", nodedict["bookmarks"])
    oldmarks = bookstring.split("\n")
    oldpairs = set()
    for mark in oldmarks:
        kv = mark.rsplit(" ", 1)
        if len(kv) == 2:
            oldpairs.update(kv)
    bookstring = _readnode(repo, "bookmarks.i", curdict["bookmarks"])
    curmarks = bookstring.split("\n")
    curpairs = set()
    for mark in curmarks:
        kv = mark.rsplit(" ", 1)
        if len(kv) == 2:
            curpairs.update(kv)

    diffpairs = oldpairs.symmetric_difference(curpairs)
    # extract hashes from diffpairs

    bookdiffs = []
    for kv in diffpairs:
        bookdiffs += kv[0]

    revstring = revsetlang.formatspec(
        "ancestor(olddraft(0), olddraft(%s)) +"
        "(draft() & ::((olddraft(0) - olddraft(%s)) + "
        "(olddraft(%s) - olddraft(0)) + %ls + '.' + "
        "oldworkingcopyparent(%s)))",
        reverseindex,
        reverseindex,
        reverseindex,
        bookdiffs,
        reverseindex,
    )

    opts["rev"] = [revstring]
    try:
        with ui.configoverride(overrides):
            cmdutil.graphlog(ui, repo, None, opts)
        # informative output
        nodedict = _readindex(repo, reverseindex)
        time = _readnode(repo, "date.i", nodedict["date"])
        time = util.datestr([float(x) for x in time.split(" ")])
    except IndexError:
        # don't print anything
        return 1

    try:
        nodedict = _readindex(repo, reverseindex - 1)
        commandstr = _readnode(repo, "command.i", nodedict["command"])
        commandlist = commandstr.split("\0")[1:]
        commandstr = " ".join(commandlist)
        uimessage = _("undo to %s, before %s\n") % (time, commandstr)
        repo.ui.status((uimessage))
    except IndexError:
        repo.ui.status(
            _("most recent state: undoing here won't change"
              " anything\n"))
    return 0
예제 #13
0
def _undoto(ui, repo, reverseindex, keep=False, branch=None):
    # undo to specific reverseindex
    # branch is a changectx hash (potentially short form)
    # which identifies its branch via localbranch revset

    if branch and repo.ui.configbool("experimental", "narrow-heads"):
        raise error.Abort(
            _("'undo --branch' is no longer supported in the current setup"))

    if repo != repo.unfiltered():
        raise error.ProgrammingError(_("_undoto expects unfilterd repo"))
    try:
        nodedict = _readindex(repo, reverseindex)
    except IndexError:
        raise error.Abort(_("index out of bounds"))

    # bookmarks
    bookstring = _readnode(repo, "bookmarks.i", nodedict["bookmarks"])
    booklist = bookstring.split("\n")
    if branch:
        spec = revsetlang.formatspec("_localbranch(%s)", branch)
        branchcommits = tohexnode(repo, spec)
    else:
        branchcommits = False

    # copy implementation for bookmarks
    itercopy = []
    for mark in pycompat.iteritems(repo._bookmarks):
        itercopy.append(mark)
    bmremove = []
    for mark in itercopy:
        if not branchcommits or hex(mark[1]) in branchcommits:
            bmremove.append((mark[0], None))
    repo._bookmarks.applychanges(repo, repo.currenttransaction(), bmremove)
    bmchanges = []
    for mark in booklist:
        if mark:
            kv = mark.rsplit(" ", 1)
            if not branchcommits or kv[1] in branchcommits or (
                    kv[0], None) in bmremove:
                bmchanges.append((kv[0], bin(kv[1])))
    repo._bookmarks.applychanges(repo, repo.currenttransaction(), bmchanges)

    # working copy parent
    workingcopyparent = _readnode(repo, "workingparent.i",
                                  nodedict["workingparent"])
    if not keep:
        if not branchcommits or workingcopyparent in branchcommits:
            # bailifchanged is run, so this should be safe
            hg.clean(repo, workingcopyparent, show_stats=False)
    elif not branchcommits or workingcopyparent in branchcommits:
        # keeps working copy files
        prednode = bin(workingcopyparent)
        predctx = repo[prednode]

        changedfiles = []
        wctx = repo[None]
        wctxmanifest = wctx.manifest()
        predctxmanifest = predctx.manifest()
        dirstate = repo.dirstate
        diff = predctxmanifest.diff(wctxmanifest)
        changedfiles.extend(pycompat.iterkeys(diff))

        with dirstate.parentchange():
            dirstate.rebuild(prednode, predctxmanifest, changedfiles)
            # we want added and removed files to be shown
            # properly, not with ? and ! prefixes
            for filename, data in pycompat.iteritems(diff):
                if data[0][0] is None:
                    dirstate.add(filename)
                if data[1][0] is None:
                    dirstate.remove(filename)

    # visible changesets
    addedrevs = revsetlang.formatspec("olddraft(0) - olddraft(%d)",
                                      reverseindex)
    removedrevs = revsetlang.formatspec("olddraft(%d) - olddraft(0)",
                                        reverseindex)
    if not branch:
        if repo.ui.configbool("experimental", "narrow-heads"):
            # Assuming mutation and visibility are used. Restore visibility heads
            # directly.
            _restoreheads(repo, reverseindex)
        else:
            # Legacy path.
            smarthide(repo, addedrevs, removedrevs)
            revealcommits(repo, removedrevs)
    else:
        localadds = revsetlang.formatspec(
            "(olddraft(0) - olddraft(%d)) and"
            " _localbranch(%s)", reverseindex, branch)
        localremoves = revsetlang.formatspec(
            "(olddraft(%d) - olddraft(0)) and"
            " _localbranch(%s)", reverseindex, branch)
        smarthide(repo, localadds, removedrevs)
        smarthide(repo, addedrevs, localremoves, local=True)
        revealcommits(repo, localremoves)

    # informative output
    time = _readnode(repo, "date.i", nodedict["date"])
    time = util.datestr([float(x) for x in time.split(" ")])

    nodedict = _readindex(repo, reverseindex - 1)
    commandstr = _readnode(repo, "command.i", nodedict["command"])
    commandlist = commandstr.split("\0")[1:]
    commandstr = " ".join(commandlist)
    uimessage = _("undone to %s, before %s\n") % (time, commandstr)
    if reverseindex == 1 and commandlist[0] in ("commit", "amend"):
        command = commandlist[0]
        if command == "commit" and "--amend" in commandlist:
            command = "amend"
        oldcommithash = _readnode(repo, "workingparent.i",
                                  nodedict["workingparent"])
        shorthash = short(bin(oldcommithash))
        hintutil.trigger("undo-uncommit-unamend", command, shorthash)
    repo.ui.status((uimessage))
예제 #14
0
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage,
                       state, tr):
    pullcmd, pullopts = ccutil.getcommandandoptions("pull|pul")

    try:
        remotenames = extensions.find("remotenames")
    except KeyError:
        remotenames = None

    # Pull all the new heads and any bookmark hashes we don't have. We need to
    # filter cloudrefs before pull as pull doesn't check if a rev is present
    # locally.
    unfi = repo.unfiltered()
    newheads = [head for head in cloudrefs.heads if head not in unfi]
    if maxage is not None and maxage >= 0:
        mindate = time.time() - maxage * 86400
        omittedheads = [
            head for head in newheads if head in cloudrefs.headdates
            and cloudrefs.headdates[head] < mindate
        ]
        if omittedheads:
            repo.ui.status(
                _("omitting heads that are older than %d days:\n") % maxage)
            for head in omittedheads:
                headdatestr = util.datestr(
                    util.makedate(cloudrefs.headdates[head]))
                repo.ui.status(_("  %s from %s\n") % (head[:12], headdatestr))
        newheads = [head for head in newheads if head not in omittedheads]
    else:
        omittedheads = []
    omittedbookmarks = []

    newvisibleheads = None
    if visibility.tracking(repo):
        localheads = _getheads(repo)
        localheadsset = set(localheads)
        cloudheads = [
            head for head in cloudrefs.heads if head not in omittedheads
        ]
        cloudheadsset = set(cloudheads)
        if localheadsset != cloudheadsset:
            oldvisibleheads = [
                head for head in lastsyncstate.heads
                if head not in lastsyncstate.omittedheads
            ]
            newvisibleheads = util.removeduplicates(oldvisibleheads +
                                                    cloudheads + localheads)
            toremove = {
                head
                for head in oldvisibleheads
                if head not in localheadsset or head not in cloudheadsset
            }
            newvisibleheads = [
                head for head in newvisibleheads if head not in toremove
            ]

    remotebookmarknodes = []
    newremotebookmarks = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        newremotebookmarks = _processremotebookmarks(repo,
                                                     cloudrefs.remotebookmarks,
                                                     lastsyncstate)

        # Pull public commits, which remote bookmarks point to, if they are not
        # present locally.
        for node in newremotebookmarks.values():
            if node not in unfi:
                remotebookmarknodes.append(node)

    try:
        snapshot = extensions.find("snapshot")
    except KeyError:
        snapshot = None
        addedsnapshots = []
        removedsnapshots = []
        newsnapshots = lastsyncstate.snapshots
    else:
        addedsnapshots = [
            s for s in cloudrefs.snapshots if s not in lastsyncstate.snapshots
        ]
        removedsnapshots = [
            s for s in lastsyncstate.snapshots if s not in cloudrefs.snapshots
        ]
        newsnapshots = cloudrefs.snapshots

    # TODO(alexeyqu): pull snapshots separately
    newheads += addedsnapshots

    backuplock.progresspulling(repo, [nodemod.bin(node) for node in newheads])

    if remotebookmarknodes or newheads:
        # Partition the heads into groups we can pull together.
        headgroups = ([remotebookmarknodes] if remotebookmarknodes else
                      []) + _partitionheads(newheads, cloudrefs.headdates)

        def disabled(*args, **kwargs):
            pass

        # Disable pulling of obsmarkers
        wrapobs = extensions.wrappedfunction(exchange, "_pullobsolete",
                                             disabled)

        # Disable pulling of bookmarks
        wrapbook = extensions.wrappedfunction(exchange, "_pullbookmarks",
                                              disabled)

        # Disable pulling of remote bookmarks
        if remotenames:
            wrapremotenames = extensions.wrappedfunction(
                remotenames, "pullremotenames", disabled)
        else:
            wrapremotenames = util.nullcontextmanager()

        # Disable automigration and prefetching of trees
        configoverride = repo.ui.configoverride(
            {
                ("pull", "automigrate"): False,
                ("treemanifest", "pullprefetchrevs"): ""
            },
            "cloudsyncpull",
        )

        prog = progress.bar(repo.ui,
                            _("pulling from commit cloud"),
                            total=len(headgroups))
        with wrapobs, wrapbook, wrapremotenames, configoverride, prog:
            for index, headgroup in enumerate(headgroups):
                headgroupstr = " ".join([head[:12] for head in headgroup])
                repo.ui.status(_("pulling %s\n") % headgroupstr)
                prog.value = (index, headgroupstr)
                pullopts["rev"] = headgroup
                pullcmd(repo.ui, repo, remotepath, **pullopts)
                repo.connectionpool.close()

    omittedbookmarks.extend(
        _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate))

    if _isremotebookmarkssyncenabled(repo.ui):
        _updateremotebookmarks(repo, tr, newremotebookmarks)

    if snapshot:
        with repo.lock(), repo.transaction("sync-snapshots") as tr:
            repo.snapshotlist.update(tr,
                                     addnodes=addedsnapshots,
                                     removenodes=removedsnapshots)

    _mergeobsmarkers(repo, tr, cloudrefs.obsmarkers)

    if newvisibleheads is not None:
        visibility.setvisibleheads(repo,
                                   [nodemod.bin(n) for n in newvisibleheads])

    # Obsmarker sharing is unreliable.  Some of the commits that should now
    # be visible might be hidden still, and some commits that should be
    # hidden might still be visible.  Create local obsmarkers to resolve
    # this.
    if obsolete.isenabled(repo, obsolete.createmarkersopt):
        unfi = repo.unfiltered()
        # Commits that are only visible in the cloud are commits that are
        # ancestors of the cloud heads but are hidden locally.
        cloudvisibleonly = list(
            unfi.set(
                "not public() & ::%ls & hidden()",
                [head for head in cloudrefs.heads if head not in omittedheads],
            ))
        # Commits that are only hidden in the cloud are commits that are
        # ancestors of the previous cloud heads that are not ancestors of the
        # current cloud heads, but have not been hidden or obsoleted locally.
        cloudhiddenonly = list(
            unfi.set(
                "(not public() & ::%ls) - (not public() & ::%ls) - hidden() - obsolete()",
                [
                    head for head in lastsyncstate.heads
                    if head not in lastsyncstate.omittedheads
                ],
                [head for head in cloudrefs.heads if head not in omittedheads],
            ))
        if cloudvisibleonly or cloudhiddenonly:
            msg = _(
                "detected obsmarker inconsistency (fixing by obsoleting [%s] and reviving [%s])\n"
            ) % (
                ", ".join(
                    [nodemod.short(ctx.node()) for ctx in cloudhiddenonly]),
                ", ".join(
                    [nodemod.short(ctx.node()) for ctx in cloudvisibleonly]),
            )
            repo.ui.log("commitcloud_sync", msg)
            repo.ui.warn(msg)
            repo._commitcloudskippendingobsmarkers = True
            with repo.lock():
                obsolete.createmarkers(repo,
                                       [(ctx, ()) for ctx in cloudhiddenonly])
                obsolete.revive(cloudvisibleonly)
            repo._commitcloudskippendingobsmarkers = False

    # We have now synced the repo to the cloud version.  Store this.
    logsyncop(
        repo,
        "from_cloud",
        cloudrefs.version,
        lastsyncstate.heads,
        cloudrefs.heads,
        lastsyncstate.bookmarks,
        cloudrefs.bookmarks,
        lastsyncstate.remotebookmarks,
        newremotebookmarks,
        lastsyncstate.snapshots,
        newsnapshots,
    )
    lastsyncstate.update(
        tr,
        cloudrefs.version,
        cloudrefs.heads,
        cloudrefs.bookmarks,
        omittedheads,
        omittedbookmarks,
        maxage,
        newremotebookmarks,
        newsnapshots,
    )

    # Also update backup state.  These new heads are already backed up,
    # otherwise the server wouldn't have told us about them.
    state.update([nodemod.bin(head) for head in newheads], tr)