예제 #1
0
def logsyncop(
    repo,
    op,
    version,
    oldheads,
    newheads,
    oldbm,
    newbm,
    oldrbm,
    newrbm,
    oldsnapshots,
    newsnapshots,
):
    oldheadsset = set(oldheads)
    newheadsset = set(newheads)
    oldbmset = set(oldbm)
    newbmset = set(newbm)
    oldrbmset = set(oldrbm)
    newrbmset = set(newrbm)
    oldsnapset = set(oldsnapshots)
    newsnapset = set(newsnapshots)
    addedheads = blackbox.shortlist(
        [h for h in newheads if h not in oldheadsset])
    removedheads = blackbox.shortlist(
        [h for h in oldheads if h not in newheadsset])
    addedbm = blackbox.shortlist([h for h in newbm if h not in oldbmset])
    removedbm = blackbox.shortlist([h for h in oldbm if h not in newbmset])
    addedrbm = blackbox.shortlist([h for h in newrbm if h not in oldrbmset])
    removedrbm = blackbox.shortlist([h for h in oldrbm if h not in newrbmset])
    addedsnaps = blackbox.shortlist(
        [h for h in newsnapshots if h not in oldsnapset])
    removedsnaps = blackbox.shortlist(
        [h for h in oldsnapshots if h not in newsnapset])
    blackbox.log({
        "commit_cloud_sync": {
            "op": op,
            "version": version,
            "added_heads": addedheads,
            "removed_heads": removedheads,
            "added_bookmarks": addedbm,
            "removed_bookmarks": removedbm,
            "added_remote_bookmarks": addedrbm,
            "removed_remote_bookmarks": removedrbm,
            "added_snapshots": addedsnaps,
            "removed_snapshots": removedsnaps,
        }
    })
    util.info("commit-cloud-sync", op=op, version=version)
예제 #2
0
파일: __init__.py 프로젝트: jsoref/eden
def _walk(self, match, event):
    """Replacement for filesystem._walk, hooking into Watchman.

    Whenever listignored is False and the Watchman client is available, use
    Watchman combined with saved state to possibly return only a subset of
    files."""

    state = self._fsmonitorstate
    clock, ignorehash, notefiles = state.get()
    if not clock:
        if state.walk_on_invalidate:
            raise fsmonitorfallback("no clock")
        # Initial NULL clock value, see
        # https://facebook.github.io/watchman/docs/clockspec.html
        clock = "c:0:0"
        notefiles = []

    ignore = self.dirstate._ignore

    # experimental config: experimental.fsmonitor.skipignore
    if not self._ui.configbool("experimental", "fsmonitor.skipignore"):
        if ignorehash and _hashignore(
                ignore) != ignorehash and clock != "c:0:0":
            # ignore list changed -- can't rely on Watchman state any more
            if state.walk_on_invalidate:
                raise fsmonitorfallback("ignore rules changed")
            notefiles = []
            clock = "c:0:0"

    matchfn = match.matchfn
    matchalways = match.always()
    dmap = self.dirstate._map
    if util.safehasattr(dmap, "_map"):
        # for better performance, directly access the inner dirstate map if the
        # standard dirstate implementation is in use.
        dmap = dmap._map
    if "treestate" in self._repo.requirements:
        # treestate has a fast path to filter out ignored directories.
        ignorevisitdir = self.dirstate._ignore.visitdir

        def dirfilter(path):
            result = ignorevisitdir(path.rstrip("/"))
            return result == "all"

        nonnormalset = self.dirstate._map.nonnormalsetfiltered(dirfilter)
    else:
        nonnormalset = self.dirstate._map.nonnormalset

    event["old_clock"] = clock
    event["old_files"] = blackbox.shortlist(nonnormalset)

    copymap = self.dirstate._map.copymap
    getkind = stat.S_IFMT
    dirkind = stat.S_IFDIR
    regkind = stat.S_IFREG
    lnkkind = stat.S_IFLNK
    join = self.dirstate._join
    normcase = util.normcase
    fresh_instance = False

    exact = False
    if match.isexact():  # match.exact
        exact = True

    if not exact and self.dirstate._checkcase:
        # note that even though we could receive directory entries, we're only
        # interested in checking if a file with the same name exists. So only
        # normalize files if possible.
        normalize = self.dirstate._normalizefile
    else:
        normalize = None

    # step 2: query Watchman
    try:
        # Use the user-configured timeout for the query.
        # Add a little slack over the top of the user query to allow for
        # overheads while transferring the data
        self._watchmanclient.settimeout(state.timeout + 0.1)
        result = self._watchmanclient.command(
            "query",
            {
                "fields": ["mode", "mtime", "size", "exists", "name"],
                "since":
                clock,
                "expression": [
                    "not",
                    [
                        "anyof", ["dirname", ".hg"],
                        ["name", ".hg", "wholename"]
                    ],
                ],
                "sync_timeout":
                int(state.timeout * 1000),
                "empty_on_fresh_instance":
                state.walk_on_invalidate,
            },
        )
    except Exception as ex:
        event["is_error"] = True
        _handleunavailable(self._ui, state, ex)
        self._watchmanclient.clearconnection()
        # XXX: Legacy scuba logging. Remove this once the source of truth
        # is moved to the Rust Event.
        self._ui.log("fsmonitor_status", fsmonitor_status="exception")
        if self._ui.configbool("fsmonitor", "fallback-on-watchman-exception"):
            raise fsmonitorfallback("exception during run")
        else:
            raise ex
    else:
        # We need to propagate the last observed clock up so that we
        # can use it for our next query
        event["new_clock"] = result["clock"]
        event["is_fresh"] = result["is_fresh_instance"]
        state.setlastclock(result["clock"])
        state.setlastisfresh(result["is_fresh_instance"])
        if result["is_fresh_instance"]:
            if not self._ui.plain() and self._ui.configbool(
                    "fsmonitor", "warn-fresh-instance"):
                oldpid = _watchmanpid(event["old_clock"])
                newpid = _watchmanpid(event["new_clock"])
                if oldpid is not None and newpid is not None:
                    self._ui.warn(
                        _("warning: watchman has recently restarted (old pid %s, new pid %s) - operation will be slower than usual\n"
                          ) % (oldpid, newpid))
                elif oldpid is None and newpid is not None:
                    self._ui.warn(
                        _("warning: watchman has recently started (pid %s) - operation will be slower than usual\n"
                          ) % (newpid, ))
                else:
                    self._ui.warn(
                        _("warning: watchman failed to catch up with file change events and requires a full scan - operation will be slower than usual\n"
                          ))

            if state.walk_on_invalidate:
                state.invalidate(reason="fresh_instance")
                raise fsmonitorfallback("fresh instance")
            fresh_instance = True
            # Ignore any prior noteable files from the state info
            notefiles = []
        else:
            count = len(result["files"])
            state.setwatchmanchangedfilecount(count)
            event["new_files"] = blackbox.shortlist(
                (e["name"] for e in result["files"]), count)
        # XXX: Legacy scuba logging. Remove this once the source of truth
        # is moved to the Rust Event.
        if event["is_fresh"]:
            self._ui.log("fsmonitor_status", fsmonitor_status="fresh")
        else:
            self._ui.log("fsmonitor_status", fsmonitor_status="normal")

    results = {}

    # for file paths which require normalization and we encounter a case
    # collision, we store our own foldmap
    if normalize:
        foldmap = dict((normcase(k), k) for k in results)

    switch_slashes = pycompat.ossep == "\\"
    # The order of the results is, strictly speaking, undefined.
    # For case changes on a case insensitive filesystem we may receive
    # two entries, one with exists=True and another with exists=False.
    # The exists=True entries in the same response should be interpreted
    # as being happens-after the exists=False entries due to the way that
    # Watchman tracks files.  We use this property to reconcile deletes
    # for name case changes.
    ignorelist = []
    ignorelistappend = ignorelist.append
    for entry in result["files"]:
        fname = entry["name"]
        if _fixencoding:
            fname = _watchmantofsencoding(fname)
        if switch_slashes:
            fname = fname.replace("\\", "/")
        if normalize:
            normed = normcase(fname)
            fname = normalize(fname, True, True)
            foldmap[normed] = fname
        fmode = entry["mode"]
        fexists = entry["exists"]
        kind = getkind(fmode)

        if not fexists:
            # if marked as deleted and we don't already have a change
            # record, mark it as deleted.  If we already have an entry
            # for fname then it was either part of walkexplicit or was
            # an earlier result that was a case change
            if (fname not in results and fname in dmap
                    and (matchalways or matchfn(fname))):
                results[fname] = None
        elif kind == dirkind:
            if fname in dmap and (matchalways or matchfn(fname)):
                results[fname] = None
        elif kind == regkind or kind == lnkkind:
            if fname in dmap:
                if matchalways or matchfn(fname):
                    results[fname] = entry
            else:
                ignored = ignore(fname)
                if ignored:
                    ignorelistappend(fname)
                if (matchalways or matchfn(fname)) and not ignored:
                    results[fname] = entry
        elif fname in dmap and (matchalways or matchfn(fname)):
            results[fname] = None
        elif fname in match.files():
            match.bad(fname, filesystem.badtype(kind))

    # step 3: query notable files we don't already know about
    # XXX try not to iterate over the entire dmap
    if normalize:
        # any notable files that have changed case will already be handled
        # above, so just check membership in the foldmap
        notefiles = set((normalize(f, True, True) for f in notefiles
                         if normcase(f) not in foldmap))
    visit = set((f for f in notefiles if (
        f not in results and matchfn(f) and (f in dmap or not ignore(f)))))

    if not fresh_instance:
        if matchalways:
            visit.update(f for f in nonnormalset if f not in results)
            visit.update(f for f in copymap if f not in results)
        else:
            visit.update(f for f in nonnormalset
                         if f not in results and matchfn(f))
            visit.update(f for f in copymap if f not in results and matchfn(f))
    else:
        if matchalways:
            visit.update(f for f in dmap if f not in results)
            visit.update(f for f in copymap if f not in results)
        else:
            visit.update(f for f in dmap if f not in results and matchfn(f))
            visit.update(f for f in copymap if f not in results and matchfn(f))

    # audit returns False for paths with one of its parent directories being a
    # symlink.
    audit = pathutil.pathauditor(self.dirstate._root, cached=True).check
    auditpass = [f for f in visit if audit(f)]
    auditpass.sort()
    auditfail = visit.difference(auditpass)
    droplist = []
    droplistappend = droplist.append
    for f in auditfail:
        # For auditfail paths, they should be treated as not existed in working
        # copy.
        filestate = dmap.get(f, ("?", ))[0]
        if filestate in ("?", ):
            # do not exist in working parents, remove them from treestate and
            # avoid walking through them.
            droplistappend(f)
            results.pop(f, None)
        else:
            # tracked, mark as deleted
            results[f] = None

    nf = iter(auditpass).next
    for st in util.statfiles([join(f) for f in auditpass]):
        f = nf()
        if (st and not ignore(f)) or f in dmap:
            results[f] = st
        elif not st:
            # '?' (untracked) file was deleted from the filesystem - remove it
            # from treestate.
            #
            # We can only update the dirstate (and treestate) while holding the
            # wlock. That happens inside poststatus.__call__ -> state.set. So
            # buffer what files to "drop" so state.set can clean them up.
            entry = dmap.get(f, None)
            if entry and entry[0] == "?":
                droplistappend(f)
    # The droplist and ignorelist need to match setlastclock()
    state.setdroplist(droplist)
    state.setignorelist(ignorelist)

    results.pop(".hg", None)
    return results.iteritems()