Exemplo n.º 1
0
def _handlelfs(repo, missing):
    """Special case if lfs is enabled

    If lfs is enabled then we need to call prepush hook
    to make sure large files are uploaded to lfs
    """
    try:
        lfsmod = extensions.find("lfs")
    except KeyError:
        # Ignore if lfs extension is not enabled
        pass
    else:
        with perftrace.trace("Upload LFS Blobs"):
            lfsmod.wrapper.uploadblobsfromrevs(repo, missing)

    # But wait! LFS could also be provided via remotefilelog.
    try:
        remotefilelog = extensions.find("remotefilelog")
    except KeyError:
        # Ignore if remotefilelog extension is not enabled
        pass
    else:
        if remotefilelog.shallowrepo.requirement in repo.requirements:
            with perftrace.trace("Upload LFS Blobs"):
                remotefilelog.uploadblobs(repo, missing)
Exemplo n.º 2
0
 def staterelease():
     if origrelease:
         origrelease()
     if l.stateupdate:
         with perftrace.trace("Watchman State Exit"):
             l.stateupdate.exit()
         l.stateupdate = None
Exemplo n.º 3
0
        def wlock(self, *args, **kwargs):
            l = super(hgeventsrepo, self).wlock(*args, **kwargs)
            if not self.ui.configbool("experimental",
                                      "fsmonitor.transaction_notify"):
                return l
            if l.held != 1:
                return l
            origrelease = l.releasefn

            def staterelease():
                if origrelease:
                    origrelease()
                if l.stateupdate:
                    with perftrace.trace("Watchman State Exit"):
                        l.stateupdate.exit()
                    l.stateupdate = None

            try:
                l.stateupdate = None
                l.stateupdate = watchmanclient.state_update(
                    self, name="hg.transaction")
                with perftrace.trace("Watchman State Enter"):
                    l.stateupdate.enter()
                l.releasefn = staterelease
            except Exception:
                # Swallow any errors; fire and forget
                pass
            return l
Exemplo n.º 4
0
    def _linkrev(self):
        if self._filenode == nullid:
            return nullrev

        p1, p2, linknode, copyfrom = self.getnodeinfo()
        rev = self._repo.changelog.nodemap.get(linknode)
        if rev is not None:
            return rev

        # Search all commits for the appropriate linkrev (slow, but uncommon)
        repo = self._repo
        path = self._path
        fileid = self._filenode
        cl = repo.changelog
        mfl = repo.manifestlog

        with repo.ui.timesection("scanlinkrev"), repo.ui.configoverride({
            ("treemanifest", "fetchdepth"):
                1
        }), perftrace.trace("Scanning for Linkrev"), progress.bar(
                repo.ui,
                _("scanning for linkrev of %s") % path) as prog:
            perftrace.tracevalue("Path", path)
            allrevs = repo.revs("_all()")
            allrevs.sort(reverse=True)
            for i, rev in enumerate(allrevs):
                prog.value = i
                node = cl.node(rev)
                data = cl.read(
                    node)  # get changeset data (we avoid object creation)
                if path in data[3]:  # checking the 'files' field.
                    # The file has been touched, check if the hash is what we're
                    # looking for.
                    if fileid == mfl[data[0]].read().get(path):
                        perftrace.tracevalue("Distance", i)
                        return rev

        # Couldn't find the linkrev. This should generally not happen, and will
        # likely cause a crash.
        return None
Exemplo n.º 5
0
def pushbackupbookmarks(repo, remotepath, getconnection, backupstate):
    """
    Push a backup bundle to the server that updates the infinitepush backup
    bookmarks.
    """
    unfi = repo

    # Create backup bookmarks for the heads and bookmarks of the user.  We
    # need to include only commit that have been successfully backed up, so
    # that we can sure they are available on the server.
    clrev = unfi.changelog.rev
    ancestors = unfi.changelog.ancestors(
        [clrev(head) for head in backupstate.heads], inclusive=True)
    # Get the heads of visible draft commits that are already backed up,
    # including commits made visible by bookmarks.
    #
    # For historical compatibility, we ignore obsolete and secret commits
    # as they are normally excluded from backup bookmarks.
    with perftrace.trace("Compute Heads"):
        revset = "heads((draft() & ::((draft() - obsolete() - hidden()) + bookmark())) & (draft() & ::%ln))"
        heads = [
            nodemod.hex(head) for head in unfi.nodes(revset, backupstate.heads)
        ]
    # Get the bookmarks that point to ancestors of backed up draft commits or
    # to commits that are public.
    with perftrace.trace("Compute Bookmarks"):
        bookmarks = {}
        for name, node in pycompat.iteritems(repo._bookmarks):
            ctx = repo[node]
            if ctx.rev() in ancestors or ctx.phase() == phases.public:
                bookmarks[name] = ctx.hex()

    infinitepushbookmarks = {}
    prefix = _backupbookmarkprefix(repo)
    localstate = _readlocalbackupstate(repo, remotepath)

    if localstate is None:
        # If there is nothing to backup, don't push any backup bookmarks yet.
        # The user may wish to restore the previous backup.
        if not heads and not bookmarks:
            return

        # Delete all server bookmarks and replace them with the full set.  The
        # server knows to do deletes before adds, and deletes are done by glob
        # pattern (see infinitepush.bundleparts.bundle2scratchbookmarks).
        infinitepushbookmarks["/".join((prefix, "heads", "*"))] = ""
        infinitepushbookmarks["/".join((prefix, "bookmarks", "*"))] = ""
        oldheads = set()
        oldbookmarks = {}
    else:
        # Generate a delta update based on the local state.
        oldheads, oldbookmarks = localstate

        if set(oldheads) == set(heads) and oldbookmarks == bookmarks:
            return

        for oldhead in oldheads:
            if oldhead not in heads:
                infinitepushbookmarks["/".join(
                    (prefix, "heads", oldhead))] = ""
        for oldbookmark in oldbookmarks:
            if oldbookmark not in bookmarks:
                infinitepushbookmarks["/".join(
                    (prefix, "bookmarks", _escapebookmark(oldbookmark)))] = ""

    for bookmark, hexnode in bookmarks.items():
        if bookmark not in oldbookmarks or hexnode != oldbookmarks[bookmark]:
            name = "/".join((prefix, "bookmarks", _escapebookmark(bookmark)))
            infinitepushbookmarks[name] = hexnode
    for hexhead in heads:
        if hexhead not in oldheads:
            name = "/".join((prefix, "heads", hexhead))
            infinitepushbookmarks[name] = hexhead

    if not infinitepushbookmarks:
        return

    # developer config: infinitepushbackup.backupbookmarklimit
    backupbookmarklimit = repo.ui.configint("infinitepushbackup",
                                            "backupbookmarklimit", 1000)
    if len(infinitepushbookmarks) > backupbookmarklimit:
        repo.ui.warn(
            _("not pushing backup bookmarks for %s as there are too many (%s > %s)\n"
              ) % (prefix, len(infinitepushbookmarks), backupbookmarklimit),
            notice=_("warning"),
            component="commitcloud",
        )
        return

    # Push a bundle containing the new bookmarks to the server.
    with perftrace.trace(
            "Push Backup Bookmark Bundle"), getconnection() as conn:
        dependencies.infinitepush.pushbackupbundle(repo.ui, repo, conn.peer,
                                                   None, infinitepushbookmarks)

    # Store the new local backup state.
    _writelocalbackupstate(repo, remotepath, heads, bookmarks)
Exemplo n.º 6
0
    def prefetch(self,
                 fileids,
                 force=False,
                 fetchdata=True,
                 fetchhistory=True):
        """downloads the given file versions to the cache
        """
        repo = self.repo
        idstocheck = set()
        for file, id in fileids:
            # hack
            # - we don't use .hgtags
            # - workingctx produces ids with length 42,
            #   which we skip since they aren't in any cache
            if file == ".hgtags" or len(
                    id) == 42 or not repo.shallowmatch(file):
                continue

            idstocheck.add((file, bin(id)))

        batchlfsdownloads = self.ui.configbool("remotefilelog",
                                               "_batchlfsdownloads", True)
        dolfsprefetch = self.ui.configbool("remotefilelog", "dolfsprefetch",
                                           True)

        idstocheck = list(idstocheck)
        if repo.fileslog._ruststore:
            if not force:
                contentstore = repo.fileslog.contentstore
                metadatastore = repo.fileslog.metadatastore
            else:
                contentstore, metadatastore = repo.fileslog.makesharedonlyruststore(
                    repo)

            if fetchdata:
                contentstore.prefetch(idstocheck)
            if fetchhistory:
                metadatastore.prefetch(idstocheck)

            if batchlfsdownloads and dolfsprefetch:
                self._lfsprefetch(fileids)

            if force:
                # Yay, since the shared-only stores and the regular ones aren't
                # shared, we need to commit data to force the stores to be
                # rebuilt. Forced prefetch are very rare and thus it is most
                # likely OK to do this.
                contentstore = None
                metadatastore = None
                repo.commitpending()

            return

        datastore = self.datastore
        historystore = self.historystore
        if force:
            datastore = unioncontentstore(*repo.fileslog.shareddatastores)
            historystore = unionmetadatastore(
                *repo.fileslog.sharedhistorystores)

        perftrace.tracevalue("Keys", len(idstocheck))
        missingids = set()
        with perftrace.trace("Get Missing"):
            if fetchdata:
                missingids.update(datastore.getmissing(idstocheck))
                perftrace.tracevalue("Missing Data", len(missingids))
            if fetchhistory:
                missinghistory = historystore.getmissing(idstocheck)
                missingids.update(missinghistory)
                perftrace.tracevalue("Missing History", len(missinghistory))

        # partition missing nodes into nullid and not-nullid so we can
        # warn about this filtering potentially shadowing bugs.
        nullids = len([None for unused, id in missingids if id == nullid])
        if nullids:
            missingids = [(f, id) for f, id in missingids if id != nullid]
            repo.ui.develwarn(
                ("remotefilelog not fetching %d null revs"
                 " - this is likely hiding bugs" % nullids),
                config="remotefilelog-ext",
            )
        if missingids:
            global fetches, fetched, fetchcost
            fetches += 1

            missingids = [(file, hex(id)) for file, id in missingids]

            fetched += len(missingids)

            start = time.time()
            with self.ui.timesection("fetchingfiles"):
                self.request(missingids, fetchdata, fetchhistory)
            fetchcost += time.time() - start
            if not batchlfsdownloads and dolfsprefetch:
                self._lfsprefetch(fileids)
        if batchlfsdownloads and dolfsprefetch:
            self._lfsprefetch(fileids)
Exemplo n.º 7
0
def _backup(
    repo,
    backupstate,
    remotepath,
    getconnection,
    revs=None,
):
    """backs up the given revisions to commit cloud

    Returns (backedup, failed), where "backedup" is a revset of the commits that
    were backed up, and "failed" is a revset of the commits that could not be
    backed up.
    """
    unfi = repo

    if revs is None:
        # No revs specified.  Back up all visible commits that are not already
        # backed up.
        revset = "heads(not public() - hidden() - (not public() & ::%ln))"
        heads = unfi.revs(revset, backupstate.heads)
    else:
        # Some revs were specified.  Back up all of those commits that are not
        # already backed up.
        heads = unfi.revs(
            "heads((not public() & ::%ld) - (not public() & ::%ln))",
            revs,
            backupstate.heads,
        )

    if not heads:
        return smartset.baseset(repo=repo), smartset.baseset(repo=repo)

    # Check if any of the heads are already available on the server.
    headnodes = list(unfi.nodes("%ld", heads))
    remoteheadnodes = {
        head
        for head, backedup in zip(
            headnodes,
            dependencies.infinitepush.isbackedupnodes(
                getconnection, [nodemod.hex(n) for n in headnodes]),
        ) if backedup
    }
    if remoteheadnodes:
        backupstate.update(remoteheadnodes)

    heads = unfi.revs("%ld - %ln", heads, remoteheadnodes)

    if not heads:
        return smartset.baseset(repo=repo), smartset.baseset(repo=repo)

    # Filter out any commits that have been marked as bad.
    badnodes = repo.ui.configlist("infinitepushbackup", "dontbackupnodes", [])
    if badnodes:
        badnodes = [node for node in badnodes if node in unfi]
        # The nodes we can't back up are the bad nodes and their descendants,
        # minus any commits that we know are already backed up anyway.
        badnodes = list(
            unfi.nodes(
                "(not public() & ::%ld) & (%ls::) - (not public() & ::%ln)",
                heads,
                badnodes,
                backupstate.heads,
            ))
        if badnodes:
            repo.ui.warn(
                _("not backing up commits marked as bad: %s\n") %
                ", ".join([nodemod.hex(node) for node in badnodes]))
            heads = unfi.revs("heads((not public() & ::%ld) - %ln)", heads,
                              badnodes)

    # Limit the number of heads we backup in a single operation.
    backuplimit = repo.ui.configint("infinitepushbackup", "maxheadstobackup")
    if backuplimit is not None and backuplimit >= 0:
        if len(heads) > backuplimit:
            repo.ui.status(
                _n(
                    "backing up only the most recent %d head\n",
                    "backing up only the most recent %d heads\n",
                    backuplimit,
                ) % backuplimit)
            heads = sorted(heads, reverse=True)[:backuplimit]

    # Back up the new heads.
    backingup = unfi.nodes("(not public() & ::%ld) - (not public() & ::%ln)",
                           heads, backupstate.heads)
    backuplock.progressbackingup(repo, list(backingup))
    with perftrace.trace("Push Backup Bundles"):
        newheads, failedheads = dependencies.infinitepush.pushbackupbundlestacks(
            repo.ui,
            unfi,
            getconnection,
            [nodemod.hex(n) for n in unfi.nodes("%ld", heads)],
        )

    # The commits that got backed up are all the ancestors of the new backup
    # heads, minus any commits that were already backed up at the start.
    backedup = unfi.revs("(not public() & ::%ls) - (not public() & ::%ln)",
                         newheads, backupstate.heads)
    # The commits that failed to get backed up are the ancestors of the failed
    # heads, except for commits that are also ancestors of a successfully backed
    # up head, or commits that were already known to be backed up.
    failed = unfi.revs(
        "(not public() & ::%ls) - (not public() & ::%ls) - (not public() & ::%ln)",
        failedheads,
        newheads,
        backupstate.heads,
    )

    backupstate.update(unfi.nodes("%ld", backedup))

    return backedup, failed
Exemplo n.º 8
0
    def _adjustlinknode(self, path, filelog, fnode, srcrev, inclusive=False):
        """return the first ancestor of <srcrev> introducing <fnode>

        If the linkrev of the file revision does not point to an ancestor of
        srcrev, we'll walk down the ancestors until we find one introducing
        this file revision.

        :repo: a localrepository object (used to access changelog and manifest)
        :path: the file path
        :fnode: the nodeid of the file revision
        :filelog: the filelog of this path
        :srcrev: the changeset revision we search ancestors from
        :inclusive: if true, the src revision will also be checked

        Note: This is based on adjustlinkrev in core, but it's quite different.

        adjustlinkrev depends on the fact that the linkrev is the bottom most
        node, and uses that as a stopping point for the ancestor traversal. We
        can't do that here because the linknode is not guaranteed to be the
        bottom most one.

        In our code here, we actually know what a bunch of potential ancestor
        linknodes are, so instead of stopping the cheap-ancestor-traversal when
        we get to a linkrev, we stop when we see any of the known linknodes.
        """
        repo = self._repo
        cl = repo.changelog
        mfl = repo.manifestlog
        linknode = self.getnodeinfo()[2]

        if srcrev is None:
            # wctx case, used by workingfilectx during mergecopy
            revs = [p.rev() for p in self._repo[None].parents()]
            inclusive = True  # we skipped the real (revless) source
        else:
            revs = [srcrev]

        if self._verifylinknode(revs, linknode):
            return linknode

        commonlogkwargs = {
            "revs": " ".join([hex(cl.node(rev)) for rev in revs]),
            "fnode": hex(fnode),
            "filepath": path,
            "user": shallowutil.getusername(repo.ui),
            "reponame": shallowutil.getreponame(repo.ui),
        }

        repo.ui.log("linkrevfixup", "adjusting linknode", **commonlogkwargs)

        # Adjustlinknodes accesses the file node in the manifest for a variety
        # of manifests. Let's prevent us from downloading large numbers of trees
        # by temporarily limiting the fetch depth to 1.
        with repo.ui.timesection("adjustlinknode"), repo.ui.configoverride({
            ("treemanifest", "fetchdepth"):
                1
        }), perftrace.trace("Adjust Linknode"), progress.bar(
                repo.ui,
                _("adjusting linknode for %s") % self._path) as prog:
            perftrace.tracevalue("Path", self._path)
            perftrace.tracevalue("Source Nodes",
                                 [hex(cl.node(rev)) for rev in revs])
            pc = repo._phasecache
            seenpublic = False
            iterancs = repo.revs("reverse(::%ld)",
                                 revs).prefetch("text").iterctx()
            for i, ctx in enumerate(iterancs):
                ancrev = ctx.rev()
                if ancrev == srcrev and not inclusive:
                    continue
                prog.value = i
                # First, check locally-available history.
                lnode = self._nodefromancrev(ancrev, cl, mfl, path, fnode)
                if lnode is not None:
                    return lnode

                # adjusting linknode can be super-slow. To mitigate the issue
                # we use two heuristics: calling fastlog and forcing remotefilelog
                # prefetch
                if not seenpublic and pc.phase(repo, ancrev) == phases.public:
                    # If the commit is public and fastlog is enabled for this repo
                    # then we can try to fetch the right linknode via fastlog.
                    if repo.ui.configbool("fastlog", "enabled"):
                        lnode = self._linknodeviafastlog(
                            repo, path, ancrev, fnode, cl, mfl,
                            commonlogkwargs)
                        if lnode:
                            return lnode
                    # If fastlog is not enabled and/or failed, let's try
                    # prefetching
                    lnode = self._forceprefetch(repo, path, fnode, revs,
                                                commonlogkwargs)
                    if lnode:
                        return lnode
                    seenpublic = True

        return linknode