예제 #1
0
파일: journal.py 프로젝트: ahornby/eden
    def _buildjournalentry(self, namespace, name, oldhashes, newhashes):
        if not isinstance(oldhashes, list):
            oldhashes = [oldhashes]
        if not isinstance(newhashes, list):
            newhashes = [newhashes]

        return journalentry(
            util.makedate(),
            self.user,
            self.command,
            namespace,
            name,
            oldhashes,
            newhashes,
        )
예제 #2
0
파일: shelve.py 프로젝트: mitrandir77/eden
def listcmd(ui, repo, pats, opts):
    """subcommand that displays the list of shelves"""
    pats = set(pats)
    width = 80
    if not ui.plain():
        width = ui.termwidth()
    namelabel = "shelve.newest"
    ui.pager("shelve")
    for mtime, name in listshelves(repo):
        sname = util.split(name)[1]
        if pats and sname not in pats:
            continue
        ui.write(sname, label=namelabel)
        namelabel = "shelve.name"
        if ui.quiet:
            ui.write("\n")
            continue
        ui.write(" " * (16 - len(sname)))
        used = 16
        age = "(%s)" % templatefilters.age(util.makedate(mtime), abbrev=True)
        ui.write(age, label="shelve.age")
        ui.write(" " * (12 - len(age)))
        used += 12
        with open(name + "." + patchextension, "rb") as fp:
            while True:
                line = fp.readline()
                if not line:
                    break
                if not line.startswith(b"#"):
                    desc = pycompat.decodeutf8(line.rstrip())
                    if ui.formatted:
                        desc = util.ellipsis(desc, width - used)
                    ui.write(desc)
                    break
            ui.write("\n")
            if not (opts["patch"] or opts["stat"]):
                continue
            difflines = fp.readlines()
            if opts["patch"]:
                for chunk, label in patch.difflabel(iter, difflines):
                    ui.writebytes(chunk, label=label)
            if opts["stat"]:
                for chunk, label in patch.diffstatui(difflines, width=width):
                    ui.write(chunk, label=label)
예제 #3
0
파일: commands.py 프로젝트: miscreant1/eden
def backupdisable(ui, repo, **opts):
    """temporarily disable automatic backup or sync

    Disables automatic background backup or sync for the specified duration.
    """

    if not background.autobackupenabled(repo):
        ui.write(_("background backup was already disabled\n"), notice=_("note"))

    try:
        duration = int(opts.get("hours", 1)) * 60 * 60
    except ValueError:
        raise error.Abort(
            _(
                "error: argument 'hours': invalid int value: '{value}'\n".format(
                    value=opts.get("hours")
                )
            )
        )

    timestamp = int(time.time()) + duration
    background.disableautobackup(repo, timestamp)
    ui.write(
        _("background backup is now disabled until %s\n")
        % util.datestr(util.makedate(timestamp)),
        component="commitcloud",
    )

    try:
        with backuplock.trylock(repo):
            pass
    except error.LockHeld as e:
        if e.lockinfo.isrunning():
            ui.warn(
                _(
                    "'@prog@ cloud disable' does not affect running backup processes\n"
                    "(kill the background process - pid %s on %s - gracefully if needed)\n"
                )
                % (e.lockinfo.uniqueid, e.lockinfo.namespace),
                notice=_("warning"),
            )
    return 0
예제 #4
0
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage, state, tr):
    # Pull all the new heads and any bookmark hashes we don't have. We need to
    # filter cloudrefs before pull as pull doesn't check if a rev is present
    # locally.
    unfi = repo
    newheads = [head for head in cloudrefs.heads if head not in unfi]
    if maxage is not None and maxage >= 0:
        mindate = time.time() - maxage * 86400
        omittedheads = [
            head
            for head in newheads
            if head in cloudrefs.headdates and cloudrefs.headdates[head] < mindate
        ]
        if omittedheads:
            repo.ui.status(_("omitting heads that are older than %d days:\n") % maxage)
            for head in omittedheads:
                headdatestr = util.datestr(util.makedate(cloudrefs.headdates[head]))
                repo.ui.status(_("  %s from %s\n") % (head[:12], headdatestr))
        newheads = [head for head in newheads if head not in omittedheads]
    else:
        omittedheads = []
    omittedbookmarks = []
    omittedremotebookmarks = []

    newvisibleheads = None
    if visibility.tracking(repo):
        localheads = _getheads(repo)
        localheadsset = set(localheads)
        cloudheads = [head for head in cloudrefs.heads if head not in omittedheads]
        cloudheadsset = set(cloudheads)
        if localheadsset != cloudheadsset:
            oldvisibleheads = [
                head
                for head in lastsyncstate.heads
                if head not in lastsyncstate.omittedheads
            ]
            newvisibleheads = util.removeduplicates(
                oldvisibleheads + cloudheads + localheads
            )
            toremove = {
                head
                for head in oldvisibleheads
                if head not in localheadsset or head not in cloudheadsset
            }
            newvisibleheads = [head for head in newvisibleheads if head not in toremove]

    remotebookmarknewnodes = set()
    remotebookmarkupdates = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        (remotebookmarkupdates, remotebookmarknewnodes) = _processremotebookmarks(
            repo, cloudrefs.remotebookmarks, lastsyncstate
        )

    try:
        snapshot = extensions.find("snapshot")
    except KeyError:
        snapshot = None
        addedsnapshots = []
        removedsnapshots = []
        newsnapshots = lastsyncstate.snapshots
    else:
        addedsnapshots = [
            s for s in cloudrefs.snapshots if s not in lastsyncstate.snapshots
        ]
        removedsnapshots = [
            s for s in lastsyncstate.snapshots if s not in cloudrefs.snapshots
        ]
        newsnapshots = cloudrefs.snapshots
        newheads += addedsnapshots

    if remotebookmarknewnodes or newheads:
        # Partition the heads into groups we can pull together.
        headgroups = _partitionheads(
            list(remotebookmarknewnodes) + newheads, cloudrefs.headdates
        )
        _pullheadgroups(repo, remotepath, headgroups)

    omittedbookmarks.extend(
        _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate)
    )

    newremotebookmarks = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        newremotebookmarks, omittedremotebookmarks = _updateremotebookmarks(
            repo, tr, remotebookmarkupdates
        )

    if snapshot:
        with repo.lock(), repo.transaction("sync-snapshots") as tr:
            repo.snapshotlist.update(
                tr, addnodes=addedsnapshots, removenodes=removedsnapshots
            )

    _mergeobsmarkers(repo, tr, cloudrefs.obsmarkers)

    if newvisibleheads is not None:
        visibility.setvisibleheads(repo, [nodemod.bin(n) for n in newvisibleheads])

    # Obsmarker sharing is unreliable.  Some of the commits that should now
    # be visible might be hidden still, and some commits that should be
    # hidden might still be visible.  Create local obsmarkers to resolve
    # this.
    if obsolete.isenabled(repo, obsolete.createmarkersopt) and not repo.ui.configbool(
        "mutation", "proxy-obsstore"
    ):
        unfi = repo
        # Commits that are only visible in the cloud are commits that are
        # ancestors of the cloud heads but are hidden locally.
        cloudvisibleonly = list(
            unfi.set(
                "not public() & ::%ls & hidden()",
                [head for head in cloudrefs.heads if head not in omittedheads],
            )
        )
        # Commits that are only hidden in the cloud are commits that are
        # ancestors of the previous cloud heads that are not ancestors of the
        # current cloud heads, but have not been hidden or obsoleted locally.
        cloudhiddenonly = list(
            unfi.set(
                "(not public() & ::%ls) - (not public() & ::%ls) - hidden() - obsolete()",
                [
                    head
                    for head in lastsyncstate.heads
                    if head not in lastsyncstate.omittedheads
                ],
                [head for head in cloudrefs.heads if head not in omittedheads],
            )
        )
        if cloudvisibleonly or cloudhiddenonly:
            msg = _(
                "detected obsmarker inconsistency (fixing by obsoleting [%s] and reviving [%s])\n"
            ) % (
                ", ".join([nodemod.short(ctx.node()) for ctx in cloudhiddenonly]),
                ", ".join([nodemod.short(ctx.node()) for ctx in cloudvisibleonly]),
            )
            repo.ui.log("commitcloud_sync", msg)
            repo.ui.warn(msg)
            repo._commitcloudskippendingobsmarkers = True
            with repo.lock():
                obsolete.createmarkers(repo, [(ctx, ()) for ctx in cloudhiddenonly])
                obsolete.revive(cloudvisibleonly)
            repo._commitcloudskippendingobsmarkers = False

    # We have now synced the repo to the cloud version.  Store this.
    logsyncop(
        repo,
        "from_cloud",
        cloudrefs.version,
        lastsyncstate.heads,
        cloudrefs.heads,
        lastsyncstate.bookmarks,
        cloudrefs.bookmarks,
        lastsyncstate.remotebookmarks,
        newremotebookmarks,
        lastsyncstate.snapshots,
        newsnapshots,
    )
    lastsyncstate.update(
        tr,
        newversion=cloudrefs.version,
        newheads=cloudrefs.heads,
        newbookmarks=cloudrefs.bookmarks,
        newremotebookmarks=newremotebookmarks,
        newmaxage=maxage,
        newomittedheads=omittedheads,
        newomittedbookmarks=omittedbookmarks,
        newomittedremotebookmarks=omittedremotebookmarks,
        newsnapshots=newsnapshots,
    )

    # Also update backup state.  These new heads are already backed up,
    # otherwise the server wouldn't have told us about them.
    state.update([nodemod.bin(head) for head in newheads], tr)
예제 #5
0
 def commitfunc(**kwargs):
     if not repo.ui.configbool("tweakdefaults", "histeditkeepdate"):
         kwargs["date"] = util.makedate(time.time())
     origcommitfunc(**kwargs)
예제 #6
0
def currentdate():
    return "%d %d" % util.makedate(time.time())
예제 #7
0
파일: sync.py 프로젝트: simpkins/eden
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage,
                       state, tr):
    # Pull all the new heads and any bookmark hashes we don't have. We need to
    # filter cloudrefs before pull as pull doesn't check if a rev is present
    # locally.
    newheads = [
        nodemod.hex(n) for n in repo.changelog.filternodes(
            [nodemod.bin(h) for h in cloudrefs.heads], inverse=True)
    ]
    assert newheads == newheads
    if maxage is not None and maxage >= 0:
        mindate = time.time() - maxage * 86400
        omittedheads = [
            head for head in newheads if head in cloudrefs.headdates
            and cloudrefs.headdates[head] < mindate
        ]
        if omittedheads:
            omittedheadslen = len(omittedheads)
            repo.ui.status(
                _n(
                    "omitting %d head that is older than %d days:\n",
                    "omitting %d heads that are older than %d days:\n",
                    omittedheadslen,
                ) % (omittedheadslen, maxage))
            counter = 0
            for head in reversed(omittedheads):
                if counter == _maxomittedheadsoutput:
                    remaining = len(omittedheads) - counter
                    repo.ui.status(
                        _n("  and %d older head\n", "  and %d older heads\n",
                           remaining) % remaining)
                    break
                headdatestr = util.datestr(
                    util.makedate(cloudrefs.headdates[head]))
                repo.ui.status(_("  %s from %s\n") % (head[:12], headdatestr))
                counter = counter + 1

        omittedheads = set(omittedheads)
        newheads = [head for head in newheads if head not in omittedheads]
    else:
        omittedheads = set()
    omittedbookmarks = []
    omittedremotebookmarks = []

    newvisibleheads = None
    if visibility.tracking(repo):
        localheads = _getheads(repo)
        localheadsset = set(localheads)
        cloudheads = [
            head for head in cloudrefs.heads if head not in omittedheads
        ]
        cloudheadsset = set(cloudheads)
        if localheadsset != cloudheadsset:
            oldvisibleheads = [
                head for head in lastsyncstate.heads
                if head not in lastsyncstate.omittedheads
            ]
            newvisibleheads = util.removeduplicates(oldvisibleheads +
                                                    cloudheads + localheads)
            toremove = {
                head
                for head in oldvisibleheads
                if head not in localheadsset or head not in cloudheadsset
            }
            newvisibleheads = [
                head for head in newvisibleheads if head not in toremove
            ]

    remotebookmarknewnodes = set()
    remotebookmarkupdates = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        (remotebookmarkupdates,
         remotebookmarknewnodes) = _processremotebookmarks(
             repo, cloudrefs.remotebookmarks, lastsyncstate)

    if remotebookmarknewnodes or newheads:
        # Partition the heads into groups we can pull together.
        headgroups = _partitionheads(repo.ui,
                                     list(remotebookmarknewnodes) + newheads,
                                     cloudrefs.headdates)
        _pullheadgroups(repo, remotepath, headgroups)

    omittedbookmarks.extend(
        _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate,
                        omittedheads, maxage))

    newremotebookmarks = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        omittedremotebookmarks = _updateremotebookmarks(
            repo, tr, remotebookmarkupdates)
        newremotebookmarks = cloudrefs.remotebookmarks

    if newvisibleheads is not None:
        visibility.setvisibleheads(repo,
                                   [nodemod.bin(n) for n in newvisibleheads])

    # We have now synced the repo to the cloud version.  Store this.
    logsyncop(
        repo,
        "from_cloud",
        cloudrefs.version,
        lastsyncstate.heads,
        cloudrefs.heads,
        lastsyncstate.bookmarks,
        cloudrefs.bookmarks,
        lastsyncstate.remotebookmarks,
        newremotebookmarks,
    )
    lastsyncstate.update(
        tr,
        newversion=cloudrefs.version,
        newheads=cloudrefs.heads,
        newbookmarks=cloudrefs.bookmarks,
        newremotebookmarks=newremotebookmarks,
        newmaxage=maxage,
        newomittedheads=list(omittedheads),
        newomittedbookmarks=omittedbookmarks,
        newomittedremotebookmarks=omittedremotebookmarks,
    )

    # Also update backup state.  These new heads are already backed up,
    # otherwise the server wouldn't have told us about them.
    state.update([nodemod.bin(head) for head in newheads], tr)
예제 #8
0
파일: synthrepo.py 프로젝트: zerkella/eden
def synthesize(ui, repo, descpath, **opts):
    """synthesize commits based on a model of an existing repository

    The model must have been generated by :hg:`analyze`. Commits will
    be generated randomly according to the probabilities described in
    the model. If --initfiles is set, the repository will be seeded with
    the given number files following the modeled repository's directory
    structure.

    When synthesizing new content, commit descriptions, and user
    names, words will be chosen randomly from a dictionary that is
    presumed to contain one word per line. Use --dict to specify the
    path to an alternate dictionary to use.
    """
    try:
        fp = hg.openpath(ui, descpath)
    except Exception as err:
        raise error.Abort("%s: %s" % (descpath, err[0].strerror))
    desc = json.load(fp)
    fp.close()

    def cdf(l):
        if not l:
            return [], []
        vals, probs = zip(*sorted(l, key=lambda x: x[1], reverse=True))
        t = float(sum(probs, 0))
        s, cdfs = 0, []
        for v in probs:
            s += v
            cdfs.append(s / t)
        return vals, cdfs

    lineschanged = cdf(desc["lineschanged"])
    fileschanged = cdf(desc["fileschanged"])
    filesadded = cdf(desc["filesadded"])
    dirsadded = cdf(desc["dirsadded"])
    filesremoved = cdf(desc["filesremoved"])
    linelengths = cdf(desc["linelengths"])
    parents = cdf(desc["parents"])
    p1distance = cdf(desc["p1distance"])
    p2distance = cdf(desc["p2distance"])
    interarrival = cdf(desc["interarrival"])
    linesinfilesadded = cdf(desc["linesinfilesadded"])
    tzoffset = cdf(desc["tzoffset"])

    dictfile = opts.get("dict") or "/usr/share/dict/words"
    try:
        fp = open(dictfile, "rU")
    except IOError as err:
        raise error.Abort("%s: %s" % (dictfile, err.strerror))
    words = fp.read().splitlines()
    fp.close()

    initdirs = {}
    if desc["initdirs"]:
        for k, v in desc["initdirs"]:
            initdirs[k.encode("utf-8").replace(".hg", "_hg")] = v
        initdirs = renamedirs(initdirs, words)
    initdirscdf = cdf(initdirs)

    def pick(cdf):
        return cdf[0][bisect.bisect_left(cdf[1], random.random())]

    def pickpath():
        return os.path.join(pick(initdirscdf), random.choice(words))

    def makeline(minimum=0):
        total = max(minimum, pick(linelengths))
        c, l = 0, []
        while c < total:
            w = random.choice(words)
            c += len(w) + 1
            l.append(w)
        return " ".join(l)

    wlock = repo.wlock()
    lock = repo.lock()

    nevertouch = {".hgsub", ".hgignore", ".hgtags"}

    progress = ui.progress
    _synthesizing = _("synthesizing")
    _files = _("initial files")
    _changesets = _("changesets")

    # Synthesize a single initial revision adding files to the repo according
    # to the modeled directory structure.
    initcount = int(opts["initfiles"])
    if initcount and initdirs:
        pctx = repo[None].parents()[0]
        dirs = set(pctx.dirs())
        files = {}

        def validpath(path):
            # Don't pick filenames which are already directory names.
            if path in dirs:
                return False
            # Don't pick directories which were used as file names.
            while path:
                if path in files:
                    return False
                path = os.path.dirname(path)
            return True

        for i in xrange(0, initcount):
            ui.progress(_synthesizing, i, unit=_files, total=initcount)

            path = pickpath()
            while not validpath(path):
                path = pickpath()
            data = "%s contents\n" % path
            files[path] = data
            dir = os.path.dirname(path)
            while dir and dir not in dirs:
                dirs.add(dir)
                dir = os.path.dirname(dir)

        def filectxfn(repo, memctx, path):
            return context.memfilectx(repo, memctx, path, files[path])

        ui.progress(_synthesizing, None)
        message = "synthesized wide repo with %d files" % (len(files), )
        mc = context.memctx(
            repo,
            [pctx.node(), nullid],
            message,
            pycompat.iterkeys(files),
            filectxfn,
            ui.username(),
            "%d %d" % util.makedate(),
        )
        initnode = mc.commit()
        if ui.debugflag:
            hexfn = hex
        else:
            hexfn = short
        ui.status(
            _("added commit %s with %d files\n") %
            (hexfn(initnode), len(files)))

    # Synthesize incremental revisions to the repository, adding repo depth.
    count = int(opts["count"])
    heads = set(map(repo.changelog.rev, repo.heads()))
    for i in xrange(count):
        progress(_synthesizing, i, unit=_changesets, total=count)

        node = repo.changelog.node
        revs = len(repo)

        def pickhead(heads, distance):
            if heads:
                lheads = sorted(heads)
                rev = revs - min(pick(distance), revs)
                if rev < lheads[-1]:
                    rev = lheads[bisect.bisect_left(lheads, rev)]
                else:
                    rev = lheads[-1]
                return rev, node(rev)
            return nullrev, nullid

        r1 = revs - min(pick(p1distance), revs)
        p1 = node(r1)

        # the number of heads will grow without bound if we use a pure
        # model, so artificially constrain their proliferation
        toomanyheads = len(heads) > random.randint(1, 20)
        if p2distance[0] and (pick(parents) == 2 or toomanyheads):
            r2, p2 = pickhead(heads.difference([r1]), p2distance)
        else:
            r2, p2 = nullrev, nullid

        pl = [p1, p2]
        pctx = repo[r1]
        mf = pctx.manifest()
        mfk = mf.keys()
        changes = {}
        if mfk:
            for __ in xrange(pick(fileschanged)):
                for __ in xrange(10):
                    fctx = pctx.filectx(random.choice(mfk))
                    path = fctx.path()
                    if not (path in nevertouch or fctx.isbinary()
                            or "l" in fctx.flags()):
                        break
                lines = fctx.data().splitlines()
                add, remove = pick(lineschanged)
                for __ in xrange(remove):
                    if not lines:
                        break
                    del lines[random.randrange(0, len(lines))]
                for __ in xrange(add):
                    lines.insert(random.randint(0, len(lines)), makeline())
                path = fctx.path()
                changes[path] = "\n".join(lines) + "\n"
            for __ in xrange(pick(filesremoved)):
                path = random.choice(mfk)
                for __ in xrange(10):
                    path = random.choice(mfk)
                    if path not in changes:
                        break
        if filesadded:
            dirs = list(pctx.dirs())
            dirs.insert(0, "")
        for __ in xrange(pick(filesadded)):
            pathstr = ""
            while pathstr in dirs:
                path = [random.choice(dirs)]
                if pick(dirsadded):
                    path.append(random.choice(words))
                path.append(random.choice(words))
                pathstr = "/".join(filter(None, path))
            data = ("\n".join(makeline()
                              for __ in xrange(pick(linesinfilesadded))) +
                    "\n")
            changes[pathstr] = data

        def filectxfn(repo, memctx, path):
            if path not in changes:
                return None
            return context.memfilectx(repo, memctx, path, changes[path])

        if not changes:
            continue
        if revs:
            date = repo["tip"].date()[0] + pick(interarrival)
        else:
            date = time.time() - (86400 * count)
        # dates in mercurial must be positive, fit in 32-bit signed integers.
        date = min(0x7FFFFFFF, max(0, date))
        user = random.choice(words) + "@" + random.choice(words)
        mc = context.memctx(
            repo,
            pl,
            makeline(minimum=2),
            sorted(changes),
            filectxfn,
            user,
            "%d %d" % (date, pick(tzoffset)),
        )
        newnode = mc.commit()
        heads.add(repo.changelog.rev(newnode))
        heads.discard(r1)
        heads.discard(r2)

    lock.release()
    wlock.release()
예제 #9
0
def summary(repo):
    ui = repo.ui
    # commitcloud config should eventually replace the infinitepushbackup one
    if not ui.configbool("infinitepushbackup",
                         "enablestatus") or not ui.configbool(
                             "commitcloud", "enablestatus"):
        return

    # Output backup status if enablestatus is on
    if not background.autobackupenabled(repo):
        timestamp = background.autobackupdisableduntil(repo)
        if timestamp is not None:
            ui.write(
                _("background backup is currently disabled until %s\n"
                  "so your commits are not being backed up.\n"
                  "(run 'hg cloud enable' to turn automatic backups back on)\n"
                  ) % util.datestr(util.makedate(int(timestamp))),
                notice=_("note"),
            )
        else:
            ui.write(
                _("background backup is currently disabled so your commits are not being backed up.\n"
                  ),
                notice=_("note"),
            )

    (workspacename,
     usernamemigration) = workspace.currentworkspacewithusernamecheck(repo)
    if workspacename:
        subscription.check(repo)
        backuplock.status(repo)
        lastsyncstate = syncstate.SyncState(repo, workspacename)
        if lastsyncstate.omittedheads or lastsyncstate.omittedbookmarks:
            hintutil.trigger("commitcloud-old-commits", repo)
        if usernamemigration:
            hintutil.trigger("commitcloud-username-migration", repo)

    # Don't output the summary if a backup is currently in progress.
    if backuplock.islocked(repo):
        return

    unbackeduprevs = repo.revs("notbackedup()")

    # Count the number of changesets that haven't been backed up for 10 minutes.
    # If there is only one, also print out its hash.
    backuptime = time.time() - 10 * 60  # 10 minutes ago
    count = 0
    singleunbackeduprev = None
    for rev in unbackeduprevs:
        if repo[rev].date()[0] <= backuptime:
            singleunbackeduprev = rev
            count += 1
    if count > 0:
        if count > 1:
            ui.warn(_("%d changesets are not backed up.\n") % count,
                    notice=_("note"))
        else:
            ui.warn(
                _("changeset %s is not backed up.\n") %
                nodemod.short(repo[singleunbackeduprev].node()),
                notice=_("note"),
            )
        if workspacename:
            ui.warn(_("(run 'hg cloud sync' to synchronize your workspace)\n"))
        else:
            ui.warn(_("(run 'hg cloud backup' to perform a backup)\n"))
        ui.warn(
            _("(if this fails, please report to %s)\n") %
            ccerror.getsupportcontact(ui))
예제 #10
0
def backgroundbackup(repo, command=None, dest=None):
    """start background backup"""
    ui = repo.ui
    if command is not None:
        background_cmd = command
    elif workspace.currentworkspace(repo):
        background_cmd = ["hg", "cloud", "sync"]
    else:
        background_cmd = ["hg", "cloud", "backup"]
    infinitepush_bgssh = ui.config("infinitepush", "bgssh")
    if infinitepush_bgssh:
        background_cmd += ["--config", "ui.ssh=%s" % infinitepush_bgssh]

    # developer config: infinitepushbackup.bgdebuglocks
    if ui.configbool("infinitepushbackup", "bgdebuglocks"):
        background_cmd += ["--config", "devel.debug-lockers=true"]

    # developer config: infinitepushbackup.bgdebug
    if ui.configbool("infinitepushbackup", "bgdebug", False):
        background_cmd.append("--debug")

    if dest:
        background_cmd += ["--dest", dest]

    logfile = None
    logdir = ui.config("infinitepushbackup", "logdir")
    if logdir:
        # make newly created files and dirs non-writable
        oldumask = os.umask(0o022)
        try:
            try:
                # the user name from the machine
                username = util.getuser()
            except Exception:
                username = "******"

            if not _checkcommonlogdir(logdir):
                raise WrongPermissionsException(logdir)

            userlogdir = os.path.join(logdir, username)
            util.makedirs(userlogdir)

            if not _checkuserlogdir(userlogdir):
                raise WrongPermissionsException(userlogdir)

            reponame = os.path.basename(repo.sharedroot)
            _removeoldlogfiles(userlogdir, reponame)
            logfile = getlogfilename(logdir, username, reponame)
        except (OSError, IOError) as e:
            ui.debug("background backup log is disabled: %s\n" % e)
        except WrongPermissionsException as e:
            ui.debug(
                (
                    "%s directory has incorrect permission, "
                    + "background backup logging will be disabled\n"
                )
                % e.logdir
            )
        finally:
            os.umask(oldumask)

    if not logfile:
        logfile = os.devnull

    with open(logfile, "a") as f:
        timestamp = util.datestr(util.makedate(), "%Y-%m-%d %H:%M:%S %z")
        fullcmd = " ".join(util.shellquote(arg) for arg in background_cmd)
        f.write("\n%s starting: %s\n" % (timestamp, fullcmd))

    Stdio = bindings.process.Stdio
    out = Stdio.open(logfile, append=True, create=True)
    bindings.process.Command.new(background_cmd[0]).args(
        background_cmd[1:]
    ).avoidinherithandles().newsession().stdin(Stdio.null()).stdout(out).stderr(
        out
    ).spawn()
예제 #11
0
def _logdate(repo, tr):
    revstring = " ".join(str(x) for x in util.makedate())
    return writelog(repo, tr, "date.i", revstring)
예제 #12
0
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage,
                       state, tr):
    pullcmd, pullopts = ccutil.getcommandandoptions("pull|pul")

    try:
        remotenames = extensions.find("remotenames")
    except KeyError:
        remotenames = None

    # Pull all the new heads and any bookmark hashes we don't have. We need to
    # filter cloudrefs before pull as pull doesn't check if a rev is present
    # locally.
    unfi = repo.unfiltered()
    newheads = [head for head in cloudrefs.heads if head not in unfi]
    if maxage is not None and maxage >= 0:
        mindate = time.time() - maxage * 86400
        omittedheads = [
            head for head in newheads if head in cloudrefs.headdates
            and cloudrefs.headdates[head] < mindate
        ]
        if omittedheads:
            repo.ui.status(
                _("omitting heads that are older than %d days:\n") % maxage)
            for head in omittedheads:
                headdatestr = util.datestr(
                    util.makedate(cloudrefs.headdates[head]))
                repo.ui.status(_("  %s from %s\n") % (head[:12], headdatestr))
        newheads = [head for head in newheads if head not in omittedheads]
    else:
        omittedheads = []
    omittedbookmarks = []

    newvisibleheads = None
    if visibility.tracking(repo):
        localheads = _getheads(repo)
        localheadsset = set(localheads)
        cloudheads = [
            head for head in cloudrefs.heads if head not in omittedheads
        ]
        cloudheadsset = set(cloudheads)
        if localheadsset != cloudheadsset:
            oldvisibleheads = [
                head for head in lastsyncstate.heads
                if head not in lastsyncstate.omittedheads
            ]
            newvisibleheads = util.removeduplicates(oldvisibleheads +
                                                    cloudheads + localheads)
            toremove = {
                head
                for head in oldvisibleheads
                if head not in localheadsset or head not in cloudheadsset
            }
            newvisibleheads = [
                head for head in newvisibleheads if head not in toremove
            ]

    remotebookmarknodes = []
    newremotebookmarks = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        newremotebookmarks = _processremotebookmarks(repo,
                                                     cloudrefs.remotebookmarks,
                                                     lastsyncstate)

        # Pull public commits, which remote bookmarks point to, if they are not
        # present locally.
        for node in newremotebookmarks.values():
            if node not in unfi:
                remotebookmarknodes.append(node)

    try:
        snapshot = extensions.find("snapshot")
    except KeyError:
        snapshot = None
        addedsnapshots = []
        removedsnapshots = []
        newsnapshots = lastsyncstate.snapshots
    else:
        addedsnapshots = [
            s for s in cloudrefs.snapshots if s not in lastsyncstate.snapshots
        ]
        removedsnapshots = [
            s for s in lastsyncstate.snapshots if s not in cloudrefs.snapshots
        ]
        newsnapshots = cloudrefs.snapshots

    # TODO(alexeyqu): pull snapshots separately
    newheads += addedsnapshots

    backuplock.progresspulling(repo, [nodemod.bin(node) for node in newheads])

    if remotebookmarknodes or newheads:
        # Partition the heads into groups we can pull together.
        headgroups = ([remotebookmarknodes] if remotebookmarknodes else
                      []) + _partitionheads(newheads, cloudrefs.headdates)

        def disabled(*args, **kwargs):
            pass

        # Disable pulling of obsmarkers
        wrapobs = extensions.wrappedfunction(exchange, "_pullobsolete",
                                             disabled)

        # Disable pulling of bookmarks
        wrapbook = extensions.wrappedfunction(exchange, "_pullbookmarks",
                                              disabled)

        # Disable pulling of remote bookmarks
        if remotenames:
            wrapremotenames = extensions.wrappedfunction(
                remotenames, "pullremotenames", disabled)
        else:
            wrapremotenames = util.nullcontextmanager()

        # Disable automigration and prefetching of trees
        configoverride = repo.ui.configoverride(
            {
                ("pull", "automigrate"): False,
                ("treemanifest", "pullprefetchrevs"): ""
            },
            "cloudsyncpull",
        )

        prog = progress.bar(repo.ui,
                            _("pulling from commit cloud"),
                            total=len(headgroups))
        with wrapobs, wrapbook, wrapremotenames, configoverride, prog:
            for index, headgroup in enumerate(headgroups):
                headgroupstr = " ".join([head[:12] for head in headgroup])
                repo.ui.status(_("pulling %s\n") % headgroupstr)
                prog.value = (index, headgroupstr)
                pullopts["rev"] = headgroup
                pullcmd(repo.ui, repo, remotepath, **pullopts)
                repo.connectionpool.close()

    omittedbookmarks.extend(
        _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate))

    if _isremotebookmarkssyncenabled(repo.ui):
        _updateremotebookmarks(repo, tr, newremotebookmarks)

    if snapshot:
        with repo.lock(), repo.transaction("sync-snapshots") as tr:
            repo.snapshotlist.update(tr,
                                     addnodes=addedsnapshots,
                                     removenodes=removedsnapshots)

    _mergeobsmarkers(repo, tr, cloudrefs.obsmarkers)

    if newvisibleheads is not None:
        visibility.setvisibleheads(repo,
                                   [nodemod.bin(n) for n in newvisibleheads])

    # Obsmarker sharing is unreliable.  Some of the commits that should now
    # be visible might be hidden still, and some commits that should be
    # hidden might still be visible.  Create local obsmarkers to resolve
    # this.
    if obsolete.isenabled(repo, obsolete.createmarkersopt):
        unfi = repo.unfiltered()
        # Commits that are only visible in the cloud are commits that are
        # ancestors of the cloud heads but are hidden locally.
        cloudvisibleonly = list(
            unfi.set(
                "not public() & ::%ls & hidden()",
                [head for head in cloudrefs.heads if head not in omittedheads],
            ))
        # Commits that are only hidden in the cloud are commits that are
        # ancestors of the previous cloud heads that are not ancestors of the
        # current cloud heads, but have not been hidden or obsoleted locally.
        cloudhiddenonly = list(
            unfi.set(
                "(not public() & ::%ls) - (not public() & ::%ls) - hidden() - obsolete()",
                [
                    head for head in lastsyncstate.heads
                    if head not in lastsyncstate.omittedheads
                ],
                [head for head in cloudrefs.heads if head not in omittedheads],
            ))
        if cloudvisibleonly or cloudhiddenonly:
            msg = _(
                "detected obsmarker inconsistency (fixing by obsoleting [%s] and reviving [%s])\n"
            ) % (
                ", ".join(
                    [nodemod.short(ctx.node()) for ctx in cloudhiddenonly]),
                ", ".join(
                    [nodemod.short(ctx.node()) for ctx in cloudvisibleonly]),
            )
            repo.ui.log("commitcloud_sync", msg)
            repo.ui.warn(msg)
            repo._commitcloudskippendingobsmarkers = True
            with repo.lock():
                obsolete.createmarkers(repo,
                                       [(ctx, ()) for ctx in cloudhiddenonly])
                obsolete.revive(cloudvisibleonly)
            repo._commitcloudskippendingobsmarkers = False

    # We have now synced the repo to the cloud version.  Store this.
    logsyncop(
        repo,
        "from_cloud",
        cloudrefs.version,
        lastsyncstate.heads,
        cloudrefs.heads,
        lastsyncstate.bookmarks,
        cloudrefs.bookmarks,
        lastsyncstate.remotebookmarks,
        newremotebookmarks,
        lastsyncstate.snapshots,
        newsnapshots,
    )
    lastsyncstate.update(
        tr,
        cloudrefs.version,
        cloudrefs.heads,
        cloudrefs.bookmarks,
        omittedheads,
        omittedbookmarks,
        maxage,
        newremotebookmarks,
        newsnapshots,
    )

    # Also update backup state.  These new heads are already backed up,
    # otherwise the server wouldn't have told us about them.
    state.update([nodemod.bin(head) for head in newheads], tr)