def _walk(self, match, event, span): """Replacement for filesystem._walk, hooking into Watchman. Whenever listignored is False and the Watchman client is available, use Watchman combined with saved state to possibly return only a subset of files.""" if self._ui.configbool("fsmonitor", "watchman-query-lock"): lock = self._repo._lock( self._repo.localvfs, "watchman-query-lock", True, # Wait for the lock None, None, _("watchman-query %s") % self._repo.origroot, ) else: lock = util.nullcontextmanager() with lock: return _innerwalk(self, match, event, span)
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage, state, tr): pullcmd, pullopts = ccutil.getcommandandoptions("pull|pul") try: remotenames = extensions.find("remotenames") except KeyError: remotenames = None # Pull all the new heads and any bookmark hashes we don't have. We need to # filter cloudrefs before pull as pull doesn't check if a rev is present # locally. unfi = repo.unfiltered() newheads = [head for head in cloudrefs.heads if head not in unfi] if maxage is not None and maxage >= 0: mindate = time.time() - maxage * 86400 omittedheads = [ head for head in newheads if head in cloudrefs.headdates and cloudrefs.headdates[head] < mindate ] if omittedheads: repo.ui.status( _("omitting heads that are older than %d days:\n") % maxage) for head in omittedheads: headdatestr = util.datestr( util.makedate(cloudrefs.headdates[head])) repo.ui.status(_(" %s from %s\n") % (head[:12], headdatestr)) newheads = [head for head in newheads if head not in omittedheads] else: omittedheads = [] omittedbookmarks = [] newvisibleheads = None if visibility.tracking(repo): localheads = _getheads(repo) localheadsset = set(localheads) cloudheads = [ head for head in cloudrefs.heads if head not in omittedheads ] cloudheadsset = set(cloudheads) if localheadsset != cloudheadsset: oldvisibleheads = [ head for head in lastsyncstate.heads if head not in lastsyncstate.omittedheads ] newvisibleheads = util.removeduplicates(oldvisibleheads + cloudheads + localheads) toremove = { head for head in oldvisibleheads if head not in localheadsset or head not in cloudheadsset } newvisibleheads = [ head for head in newvisibleheads if head not in toremove ] remotebookmarknodes = [] newremotebookmarks = {} if _isremotebookmarkssyncenabled(repo.ui): newremotebookmarks = _processremotebookmarks(repo, cloudrefs.remotebookmarks, lastsyncstate) # Pull public commits, which remote bookmarks point to, if they are not # present locally. for node in newremotebookmarks.values(): if node not in unfi: remotebookmarknodes.append(node) try: snapshot = extensions.find("snapshot") except KeyError: snapshot = None addedsnapshots = [] removedsnapshots = [] newsnapshots = lastsyncstate.snapshots else: addedsnapshots = [ s for s in cloudrefs.snapshots if s not in lastsyncstate.snapshots ] removedsnapshots = [ s for s in lastsyncstate.snapshots if s not in cloudrefs.snapshots ] newsnapshots = cloudrefs.snapshots # TODO(alexeyqu): pull snapshots separately newheads += addedsnapshots backuplock.progresspulling(repo, [nodemod.bin(node) for node in newheads]) if remotebookmarknodes or newheads: # Partition the heads into groups we can pull together. headgroups = ([remotebookmarknodes] if remotebookmarknodes else []) + _partitionheads(newheads, cloudrefs.headdates) def disabled(*args, **kwargs): pass # Disable pulling of obsmarkers wrapobs = extensions.wrappedfunction(exchange, "_pullobsolete", disabled) # Disable pulling of bookmarks wrapbook = extensions.wrappedfunction(exchange, "_pullbookmarks", disabled) # Disable pulling of remote bookmarks if remotenames: wrapremotenames = extensions.wrappedfunction( remotenames, "pullremotenames", disabled) else: wrapremotenames = util.nullcontextmanager() # Disable automigration and prefetching of trees configoverride = repo.ui.configoverride( { ("pull", "automigrate"): False, ("treemanifest", "pullprefetchrevs"): "" }, "cloudsyncpull", ) prog = progress.bar(repo.ui, _("pulling from commit cloud"), total=len(headgroups)) with wrapobs, wrapbook, wrapremotenames, configoverride, prog: for index, headgroup in enumerate(headgroups): headgroupstr = " ".join([head[:12] for head in headgroup]) repo.ui.status(_("pulling %s\n") % headgroupstr) prog.value = (index, headgroupstr) pullopts["rev"] = headgroup pullcmd(repo.ui, repo, remotepath, **pullopts) repo.connectionpool.close() omittedbookmarks.extend( _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate)) if _isremotebookmarkssyncenabled(repo.ui): _updateremotebookmarks(repo, tr, newremotebookmarks) if snapshot: with repo.lock(), repo.transaction("sync-snapshots") as tr: repo.snapshotlist.update(tr, addnodes=addedsnapshots, removenodes=removedsnapshots) _mergeobsmarkers(repo, tr, cloudrefs.obsmarkers) if newvisibleheads is not None: visibility.setvisibleheads(repo, [nodemod.bin(n) for n in newvisibleheads]) # Obsmarker sharing is unreliable. Some of the commits that should now # be visible might be hidden still, and some commits that should be # hidden might still be visible. Create local obsmarkers to resolve # this. if obsolete.isenabled(repo, obsolete.createmarkersopt): unfi = repo.unfiltered() # Commits that are only visible in the cloud are commits that are # ancestors of the cloud heads but are hidden locally. cloudvisibleonly = list( unfi.set( "not public() & ::%ls & hidden()", [head for head in cloudrefs.heads if head not in omittedheads], )) # Commits that are only hidden in the cloud are commits that are # ancestors of the previous cloud heads that are not ancestors of the # current cloud heads, but have not been hidden or obsoleted locally. cloudhiddenonly = list( unfi.set( "(not public() & ::%ls) - (not public() & ::%ls) - hidden() - obsolete()", [ head for head in lastsyncstate.heads if head not in lastsyncstate.omittedheads ], [head for head in cloudrefs.heads if head not in omittedheads], )) if cloudvisibleonly or cloudhiddenonly: msg = _( "detected obsmarker inconsistency (fixing by obsoleting [%s] and reviving [%s])\n" ) % ( ", ".join( [nodemod.short(ctx.node()) for ctx in cloudhiddenonly]), ", ".join( [nodemod.short(ctx.node()) for ctx in cloudvisibleonly]), ) repo.ui.log("commitcloud_sync", msg) repo.ui.warn(msg) repo._commitcloudskippendingobsmarkers = True with repo.lock(): obsolete.createmarkers(repo, [(ctx, ()) for ctx in cloudhiddenonly]) obsolete.revive(cloudvisibleonly) repo._commitcloudskippendingobsmarkers = False # We have now synced the repo to the cloud version. Store this. logsyncop( repo, "from_cloud", cloudrefs.version, lastsyncstate.heads, cloudrefs.heads, lastsyncstate.bookmarks, cloudrefs.bookmarks, lastsyncstate.remotebookmarks, newremotebookmarks, lastsyncstate.snapshots, newsnapshots, ) lastsyncstate.update( tr, cloudrefs.version, cloudrefs.heads, cloudrefs.bookmarks, omittedheads, omittedbookmarks, maxage, newremotebookmarks, newsnapshots, ) # Also update backup state. These new heads are already backed up, # otherwise the server wouldn't have told us about them. state.update([nodemod.bin(head) for head in newheads], tr)