def _updateheads(self, repo, newheads, tr): newheads = list(newheads) # Remove heads that are not actually heads, and preserve the ordering # in self.heads for heads that have not changed. unfi = repo.unfiltered() if len(newheads) > 1: realnewheads = list( unfi.nodes( "%ld", unfi.changelog.index2.headsancestors( list(unfi.revs("%ln", newheads))), )) else: realnewheads = list(newheads) realnewheadsset = set(realnewheads) newheads = util.removeduplicates( [head for head in self.heads if head in realnewheadsset] + realnewheads) if self.heads != newheads: self._logchange(self.heads, newheads) self.heads = newheads self.dirty = True self._invisiblerevs = None add = self._allheads.add for head in newheads: add(head) repo.invalidatevolatilesets() if self.dirty: tr.addfilegenerator("visibility", ("visibleheads", ), self._write) tr.addpostclose("allheads", lambda _tr: self._allheads.flush())
def _updateheads(self, repo, newheads, tr): newheads = list(newheads) # Remove heads that are not actually heads, and preserve the ordering # in self.heads for heads that have not changed. unfi = repo if len(newheads) > 1: cl = unfi.changelog hasnode = cl.hasnode realnewheads = list( cl.dag.headsancestors( [h for h in newheads if hasnode(h) and h != node.nullid])) else: realnewheads = list(newheads) realnewheadsset = set(realnewheads) oldheads = self.heads newheads = util.removeduplicates( [head for head in oldheads if head in realnewheadsset] + realnewheads) if oldheads != newheads: self._changecount += 1 self._logchange(oldheads, newheads) self.heads = newheads self._invisiblerevs = None add = self._allheads.add for head in newheads: add(head) repo.invalidatevolatilesets() tr.addpostclose("log-visibility", self._logposttransaction) tr.addpostclose("allheads", lambda _tr: self._allheads.flush())
def addbundleheads(self, bundleheads): # Some of the bundleheads may be descendants of the visible heads. # There shouldn't be too much overlap, so we ignore this and just add # the bundle heads as additional visible heads. self.heads = util.removeduplicates(self.heads + bundleheads)
def _submitlocalchanges(repo, reponame, workspacename, lastsyncstate, failed, serv, tr): localheads = _getheads(repo) localbookmarks = _getbookmarks(repo) localremotebookmarks = _getremotebookmarks(repo) localsnapshots = _getsnapshots(repo, lastsyncstate) obsmarkers = obsmarkersmod.getsyncingobsmarkers(repo) # If any commits failed to back up, exclude them. Revert any bookmark changes # that point to failed commits. if failed: localheads = [ nodemod.hex(head) for head in repo.nodes("heads(draft() & ::%ls - %ld::)", localheads, failed) ] failedset = set(repo.nodes("draft() & %ld::", failed)) for name, bookmarknode in list(localbookmarks.items()): if nodemod.bin(bookmarknode) in failedset: if name in lastsyncstate.bookmarks: localbookmarks[name] = lastsyncstate.bookmarks[name] else: del localbookmarks[name] # Work out what we should have synced locally (and haven't deliberately # omitted) omittedheads = set(lastsyncstate.omittedheads) omittedbookmarks = set(lastsyncstate.omittedbookmarks) omittedremotebookmarks = set(lastsyncstate.omittedremotebookmarks) localsyncedheads = [ head for head in lastsyncstate.heads if head not in omittedheads ] localsyncedbookmarks = { name: node for name, node in lastsyncstate.bookmarks.items() if name not in omittedbookmarks } localsyncedremotebookmarks = { name: node for name, node in lastsyncstate.remotebookmarks.items() if name not in omittedremotebookmarks } remotebookmarkschanged = ( _isremotebookmarkssyncenabled(repo.ui) and localremotebookmarks != localsyncedremotebookmarks ) localsnapshotsset = set(localsnapshots) if ( set(localheads) == set(localsyncedheads) and localbookmarks == localsyncedbookmarks and not remotebookmarkschanged and lastsyncstate.version != 0 and not obsmarkers and localsnapshotsset == set(lastsyncstate.snapshots) ): # Nothing to send. return True, None # The local repo has changed. We must send these changes to the # cloud. # Work out the new cloud heads and bookmarks by merging in the # omitted items. We need to preserve the ordering of the cloud # heads so that smartlogs generally match. localandomittedheads = set(localheads).union(lastsyncstate.omittedheads) newcloudheads = util.removeduplicates( [head for head in lastsyncstate.heads if head in localandomittedheads] + localheads ) newcloudbookmarks = { name: localbookmarks.get(name, lastsyncstate.bookmarks.get(name)) for name in set(localbookmarks.keys()).union(lastsyncstate.omittedbookmarks) } # Work out what the new omitted heads and bookmarks are. newomittedheads = list(set(newcloudheads).difference(localheads)) newomittedbookmarks = list( set(newcloudbookmarks.keys()).difference(localbookmarks.keys()) ) oldremotebookmarks = [] newremotebookmarks = {} newomittedremotebookmarks = [] if _isremotebookmarkssyncenabled(repo.ui): # do not need to submit local remote bookmarks if the feature is not enabled oldremotebookmarks = lastsyncstate.remotebookmarks.keys() newremotebookmarks = { name: localremotebookmarks.get( name, lastsyncstate.remotebookmarks.get(name) ) for name in set(localremotebookmarks.keys()).union( lastsyncstate.omittedremotebookmarks ) } newomittedremotebookmarks = list( set(newremotebookmarks.keys()).difference(localremotebookmarks.keys()) ) backuplock.progress(repo, "finishing synchronizing with '%s'" % workspacename) synced, cloudrefs = serv.updatereferences( reponame, workspacename, lastsyncstate.version, lastsyncstate.heads, newcloudheads, lastsyncstate.bookmarks.keys(), newcloudbookmarks, obsmarkers, oldremotebookmarks, newremotebookmarks, lastsyncstate.snapshots, localsnapshots, logopts={"metalogroot": hex(repo.svfs.metalog.root())}, ) if synced: logsyncop( repo, "to_cloud", cloudrefs.version, lastsyncstate.heads, newcloudheads, lastsyncstate.bookmarks, newcloudbookmarks, oldremotebookmarks, newremotebookmarks, lastsyncstate.snapshots, localsnapshots, ) lastsyncstate.update( tr, newversion=cloudrefs.version, newheads=newcloudheads, newbookmarks=newcloudbookmarks, newremotebookmarks=newremotebookmarks, newomittedheads=newomittedheads, newomittedbookmarks=newomittedbookmarks, newomittedremotebookmarks=newomittedremotebookmarks, newsnapshots=localsnapshots, ) obsmarkersmod.clearsyncingobsmarkers(repo) return synced, cloudrefs
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage, state, tr): # Pull all the new heads and any bookmark hashes we don't have. We need to # filter cloudrefs before pull as pull doesn't check if a rev is present # locally. unfi = repo newheads = [head for head in cloudrefs.heads if head not in unfi] if maxage is not None and maxage >= 0: mindate = time.time() - maxage * 86400 omittedheads = [ head for head in newheads if head in cloudrefs.headdates and cloudrefs.headdates[head] < mindate ] if omittedheads: repo.ui.status(_("omitting heads that are older than %d days:\n") % maxage) for head in omittedheads: headdatestr = util.datestr(util.makedate(cloudrefs.headdates[head])) repo.ui.status(_(" %s from %s\n") % (head[:12], headdatestr)) newheads = [head for head in newheads if head not in omittedheads] else: omittedheads = [] omittedbookmarks = [] omittedremotebookmarks = [] newvisibleheads = None if visibility.tracking(repo): localheads = _getheads(repo) localheadsset = set(localheads) cloudheads = [head for head in cloudrefs.heads if head not in omittedheads] cloudheadsset = set(cloudheads) if localheadsset != cloudheadsset: oldvisibleheads = [ head for head in lastsyncstate.heads if head not in lastsyncstate.omittedheads ] newvisibleheads = util.removeduplicates( oldvisibleheads + cloudheads + localheads ) toremove = { head for head in oldvisibleheads if head not in localheadsset or head not in cloudheadsset } newvisibleheads = [head for head in newvisibleheads if head not in toremove] remotebookmarknewnodes = set() remotebookmarkupdates = {} if _isremotebookmarkssyncenabled(repo.ui): (remotebookmarkupdates, remotebookmarknewnodes) = _processremotebookmarks( repo, cloudrefs.remotebookmarks, lastsyncstate ) try: snapshot = extensions.find("snapshot") except KeyError: snapshot = None addedsnapshots = [] removedsnapshots = [] newsnapshots = lastsyncstate.snapshots else: addedsnapshots = [ s for s in cloudrefs.snapshots if s not in lastsyncstate.snapshots ] removedsnapshots = [ s for s in lastsyncstate.snapshots if s not in cloudrefs.snapshots ] newsnapshots = cloudrefs.snapshots newheads += addedsnapshots if remotebookmarknewnodes or newheads: # Partition the heads into groups we can pull together. headgroups = _partitionheads( list(remotebookmarknewnodes) + newheads, cloudrefs.headdates ) _pullheadgroups(repo, remotepath, headgroups) omittedbookmarks.extend( _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate) ) newremotebookmarks = {} if _isremotebookmarkssyncenabled(repo.ui): newremotebookmarks, omittedremotebookmarks = _updateremotebookmarks( repo, tr, remotebookmarkupdates ) if snapshot: with repo.lock(), repo.transaction("sync-snapshots") as tr: repo.snapshotlist.update( tr, addnodes=addedsnapshots, removenodes=removedsnapshots ) _mergeobsmarkers(repo, tr, cloudrefs.obsmarkers) if newvisibleheads is not None: visibility.setvisibleheads(repo, [nodemod.bin(n) for n in newvisibleheads]) # Obsmarker sharing is unreliable. Some of the commits that should now # be visible might be hidden still, and some commits that should be # hidden might still be visible. Create local obsmarkers to resolve # this. if obsolete.isenabled(repo, obsolete.createmarkersopt) and not repo.ui.configbool( "mutation", "proxy-obsstore" ): unfi = repo # Commits that are only visible in the cloud are commits that are # ancestors of the cloud heads but are hidden locally. cloudvisibleonly = list( unfi.set( "not public() & ::%ls & hidden()", [head for head in cloudrefs.heads if head not in omittedheads], ) ) # Commits that are only hidden in the cloud are commits that are # ancestors of the previous cloud heads that are not ancestors of the # current cloud heads, but have not been hidden or obsoleted locally. cloudhiddenonly = list( unfi.set( "(not public() & ::%ls) - (not public() & ::%ls) - hidden() - obsolete()", [ head for head in lastsyncstate.heads if head not in lastsyncstate.omittedheads ], [head for head in cloudrefs.heads if head not in omittedheads], ) ) if cloudvisibleonly or cloudhiddenonly: msg = _( "detected obsmarker inconsistency (fixing by obsoleting [%s] and reviving [%s])\n" ) % ( ", ".join([nodemod.short(ctx.node()) for ctx in cloudhiddenonly]), ", ".join([nodemod.short(ctx.node()) for ctx in cloudvisibleonly]), ) repo.ui.log("commitcloud_sync", msg) repo.ui.warn(msg) repo._commitcloudskippendingobsmarkers = True with repo.lock(): obsolete.createmarkers(repo, [(ctx, ()) for ctx in cloudhiddenonly]) obsolete.revive(cloudvisibleonly) repo._commitcloudskippendingobsmarkers = False # We have now synced the repo to the cloud version. Store this. logsyncop( repo, "from_cloud", cloudrefs.version, lastsyncstate.heads, cloudrefs.heads, lastsyncstate.bookmarks, cloudrefs.bookmarks, lastsyncstate.remotebookmarks, newremotebookmarks, lastsyncstate.snapshots, newsnapshots, ) lastsyncstate.update( tr, newversion=cloudrefs.version, newheads=cloudrefs.heads, newbookmarks=cloudrefs.bookmarks, newremotebookmarks=newremotebookmarks, newmaxage=maxage, newomittedheads=omittedheads, newomittedbookmarks=omittedbookmarks, newomittedremotebookmarks=omittedremotebookmarks, newsnapshots=newsnapshots, ) # Also update backup state. These new heads are already backed up, # otherwise the server wouldn't have told us about them. state.update([nodemod.bin(head) for head in newheads], tr)
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage, state, tr): # Pull all the new heads and any bookmark hashes we don't have. We need to # filter cloudrefs before pull as pull doesn't check if a rev is present # locally. newheads = [ nodemod.hex(n) for n in repo.changelog.filternodes( [nodemod.bin(h) for h in cloudrefs.heads], inverse=True) ] assert newheads == newheads if maxage is not None and maxage >= 0: mindate = time.time() - maxage * 86400 omittedheads = [ head for head in newheads if head in cloudrefs.headdates and cloudrefs.headdates[head] < mindate ] if omittedheads: omittedheadslen = len(omittedheads) repo.ui.status( _n( "omitting %d head that is older than %d days:\n", "omitting %d heads that are older than %d days:\n", omittedheadslen, ) % (omittedheadslen, maxage)) counter = 0 for head in reversed(omittedheads): if counter == _maxomittedheadsoutput: remaining = len(omittedheads) - counter repo.ui.status( _n(" and %d older head\n", " and %d older heads\n", remaining) % remaining) break headdatestr = util.datestr( util.makedate(cloudrefs.headdates[head])) repo.ui.status(_(" %s from %s\n") % (head[:12], headdatestr)) counter = counter + 1 omittedheads = set(omittedheads) newheads = [head for head in newheads if head not in omittedheads] else: omittedheads = set() omittedbookmarks = [] omittedremotebookmarks = [] newvisibleheads = None if visibility.tracking(repo): localheads = _getheads(repo) localheadsset = set(localheads) cloudheads = [ head for head in cloudrefs.heads if head not in omittedheads ] cloudheadsset = set(cloudheads) if localheadsset != cloudheadsset: oldvisibleheads = [ head for head in lastsyncstate.heads if head not in lastsyncstate.omittedheads ] newvisibleheads = util.removeduplicates(oldvisibleheads + cloudheads + localheads) toremove = { head for head in oldvisibleheads if head not in localheadsset or head not in cloudheadsset } newvisibleheads = [ head for head in newvisibleheads if head not in toremove ] remotebookmarknewnodes = set() remotebookmarkupdates = {} if _isremotebookmarkssyncenabled(repo.ui): (remotebookmarkupdates, remotebookmarknewnodes) = _processremotebookmarks( repo, cloudrefs.remotebookmarks, lastsyncstate) if remotebookmarknewnodes or newheads: # Partition the heads into groups we can pull together. headgroups = _partitionheads(repo.ui, list(remotebookmarknewnodes) + newheads, cloudrefs.headdates) _pullheadgroups(repo, remotepath, headgroups) omittedbookmarks.extend( _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate, omittedheads, maxage)) newremotebookmarks = {} if _isremotebookmarkssyncenabled(repo.ui): omittedremotebookmarks = _updateremotebookmarks( repo, tr, remotebookmarkupdates) newremotebookmarks = cloudrefs.remotebookmarks if newvisibleheads is not None: visibility.setvisibleheads(repo, [nodemod.bin(n) for n in newvisibleheads]) # We have now synced the repo to the cloud version. Store this. logsyncop( repo, "from_cloud", cloudrefs.version, lastsyncstate.heads, cloudrefs.heads, lastsyncstate.bookmarks, cloudrefs.bookmarks, lastsyncstate.remotebookmarks, newremotebookmarks, ) lastsyncstate.update( tr, newversion=cloudrefs.version, newheads=cloudrefs.heads, newbookmarks=cloudrefs.bookmarks, newremotebookmarks=newremotebookmarks, newmaxage=maxage, newomittedheads=list(omittedheads), newomittedbookmarks=omittedbookmarks, newomittedremotebookmarks=omittedremotebookmarks, ) # Also update backup state. These new heads are already backed up, # otherwise the server wouldn't have told us about them. state.update([nodemod.bin(head) for head in newheads], tr)
def _submitlocalchanges(repo, reponame, workspacename, lastsyncstate, failed, serv, tr): localheads = _getheads(repo) localbookmarks = _getbookmarks(repo) localremotebookmarks = _getremotebookmarks(repo) localsnapshots = _getsnapshots(repo, lastsyncstate) obsmarkers = obsmarkersmod.getsyncingobsmarkers(repo) # If any commits failed to back up, exclude them. Revert any bookmark changes # that point to failed commits. if failed: localheads = [ nodemod.hex(head) for head in repo.nodes( "heads(draft() & ::%ls - %ld::)", localheads, failed) ] failedset = set(repo.nodes("draft() & %ld::", failed)) for name, bookmarknode in localbookmarks.items(): if nodemod.bin(bookmarknode) in failedset: if name in lastsyncstate.bookmarks: localbookmarks[name] = lastsyncstate.bookmarks[name] else: del localbookmarks[name] # Work out what we should have synced locally (and haven't deliberately # omitted) omittedheads = set(lastsyncstate.omittedheads) omittedbookmarks = set(lastsyncstate.omittedbookmarks) localsyncedheads = [ head for head in lastsyncstate.heads if head not in omittedheads ] localsyncedbookmarks = { name: node for name, node in lastsyncstate.bookmarks.items() if name not in omittedbookmarks } remotebookmarkschanged = (_isremotebookmarkssyncenabled( repo.ui) and localremotebookmarks != lastsyncstate.remotebookmarks) localsnapshotsset = set(localsnapshots) if (set(localheads) == set(localsyncedheads) and localbookmarks == localsyncedbookmarks and not remotebookmarkschanged and lastsyncstate.version != 0 and not obsmarkers and localsnapshotsset == set(lastsyncstate.snapshots)): # Nothing to send. return True, None # The local repo has changed. We must send these changes to the # cloud. # Work out the new cloud heads and bookmarks by merging in the # omitted items. We need to preserve the ordering of the cloud # heads so that smartlogs generally match. localandomittedheads = set(localheads).union(lastsyncstate.omittedheads) newcloudheads = util.removeduplicates( [head for head in lastsyncstate.heads if head in localandomittedheads] + localheads) newcloudbookmarks = { name: localbookmarks.get(name, lastsyncstate.bookmarks.get(name)) for name in set(localbookmarks.keys()).union( lastsyncstate.omittedbookmarks) } # Work out what the new omitted heads and bookmarks are. newomittedheads = list(set(newcloudheads).difference(localheads)) newomittedbookmarks = list( set(newcloudbookmarks.keys()).difference(localbookmarks.keys())) newcloudsnapshots = util.removeduplicates( [s for s in lastsyncstate.snapshots if s in localsnapshotsset] + localsnapshots) # Check for workspace oscillation. This is where we try to revert the # workspace back to how it was immediately prior to applying the cloud # changes at the start of the sync. This is usually an error caused by # inconsistent obsmarkers. if lastsyncstate.oscillating(newcloudheads, newcloudbookmarks, newcloudsnapshots): raise ccerror.SynchronizationError( repo.ui, _("oscillating commit cloud workspace detected.\n" "check for commits that are visible in one repo but hidden in another,\n" "and hide or unhide those commits in all places."), ) oldremotebookmarks = [] newremotebookmarks = {} if _isremotebookmarkssyncenabled(repo.ui): # do not need to submit local remote bookmarks if the feature is not enabled oldremotebookmarks = lastsyncstate.remotebookmarks.keys() newremotebookmarks = localremotebookmarks backuplock.progress(repo, "finishing synchronizing with '%s'" % workspacename) synced, cloudrefs = serv.updatereferences( reponame, workspacename, lastsyncstate.version, lastsyncstate.heads, newcloudheads, lastsyncstate.bookmarks.keys(), newcloudbookmarks, obsmarkers, oldremotebookmarks, newremotebookmarks, lastsyncstate.snapshots, localsnapshots, ) if synced: logsyncop( repo, "to_cloud", cloudrefs.version, lastsyncstate.heads, newcloudheads, lastsyncstate.bookmarks, newcloudbookmarks, oldremotebookmarks, newremotebookmarks, lastsyncstate.snapshots, localsnapshots, ) lastsyncstate.update( tr, cloudrefs.version, newcloudheads, newcloudbookmarks, newomittedheads, newomittedbookmarks, lastsyncstate.maxage, newremotebookmarks, localsnapshots, ) obsmarkersmod.clearsyncingobsmarkers(repo) return synced, cloudrefs
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage, state, tr): pullcmd, pullopts = ccutil.getcommandandoptions("pull|pul") try: remotenames = extensions.find("remotenames") except KeyError: remotenames = None # Pull all the new heads and any bookmark hashes we don't have. We need to # filter cloudrefs before pull as pull doesn't check if a rev is present # locally. unfi = repo.unfiltered() newheads = [head for head in cloudrefs.heads if head not in unfi] if maxage is not None and maxage >= 0: mindate = time.time() - maxage * 86400 omittedheads = [ head for head in newheads if head in cloudrefs.headdates and cloudrefs.headdates[head] < mindate ] if omittedheads: repo.ui.status( _("omitting heads that are older than %d days:\n") % maxage) for head in omittedheads: headdatestr = util.datestr( util.makedate(cloudrefs.headdates[head])) repo.ui.status(_(" %s from %s\n") % (head[:12], headdatestr)) newheads = [head for head in newheads if head not in omittedheads] else: omittedheads = [] omittedbookmarks = [] newvisibleheads = None if visibility.tracking(repo): localheads = _getheads(repo) localheadsset = set(localheads) cloudheads = [ head for head in cloudrefs.heads if head not in omittedheads ] cloudheadsset = set(cloudheads) if localheadsset != cloudheadsset: oldvisibleheads = [ head for head in lastsyncstate.heads if head not in lastsyncstate.omittedheads ] newvisibleheads = util.removeduplicates(oldvisibleheads + cloudheads + localheads) toremove = { head for head in oldvisibleheads if head not in localheadsset or head not in cloudheadsset } newvisibleheads = [ head for head in newvisibleheads if head not in toremove ] remotebookmarknodes = [] newremotebookmarks = {} if _isremotebookmarkssyncenabled(repo.ui): newremotebookmarks = _processremotebookmarks(repo, cloudrefs.remotebookmarks, lastsyncstate) # Pull public commits, which remote bookmarks point to, if they are not # present locally. for node in newremotebookmarks.values(): if node not in unfi: remotebookmarknodes.append(node) try: snapshot = extensions.find("snapshot") except KeyError: snapshot = None addedsnapshots = [] removedsnapshots = [] newsnapshots = lastsyncstate.snapshots else: addedsnapshots = [ s for s in cloudrefs.snapshots if s not in lastsyncstate.snapshots ] removedsnapshots = [ s for s in lastsyncstate.snapshots if s not in cloudrefs.snapshots ] newsnapshots = cloudrefs.snapshots # TODO(alexeyqu): pull snapshots separately newheads += addedsnapshots backuplock.progresspulling(repo, [nodemod.bin(node) for node in newheads]) if remotebookmarknodes or newheads: # Partition the heads into groups we can pull together. headgroups = ([remotebookmarknodes] if remotebookmarknodes else []) + _partitionheads(newheads, cloudrefs.headdates) def disabled(*args, **kwargs): pass # Disable pulling of obsmarkers wrapobs = extensions.wrappedfunction(exchange, "_pullobsolete", disabled) # Disable pulling of bookmarks wrapbook = extensions.wrappedfunction(exchange, "_pullbookmarks", disabled) # Disable pulling of remote bookmarks if remotenames: wrapremotenames = extensions.wrappedfunction( remotenames, "pullremotenames", disabled) else: wrapremotenames = util.nullcontextmanager() # Disable automigration and prefetching of trees configoverride = repo.ui.configoverride( { ("pull", "automigrate"): False, ("treemanifest", "pullprefetchrevs"): "" }, "cloudsyncpull", ) prog = progress.bar(repo.ui, _("pulling from commit cloud"), total=len(headgroups)) with wrapobs, wrapbook, wrapremotenames, configoverride, prog: for index, headgroup in enumerate(headgroups): headgroupstr = " ".join([head[:12] for head in headgroup]) repo.ui.status(_("pulling %s\n") % headgroupstr) prog.value = (index, headgroupstr) pullopts["rev"] = headgroup pullcmd(repo.ui, repo, remotepath, **pullopts) repo.connectionpool.close() omittedbookmarks.extend( _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate)) if _isremotebookmarkssyncenabled(repo.ui): _updateremotebookmarks(repo, tr, newremotebookmarks) if snapshot: with repo.lock(), repo.transaction("sync-snapshots") as tr: repo.snapshotlist.update(tr, addnodes=addedsnapshots, removenodes=removedsnapshots) _mergeobsmarkers(repo, tr, cloudrefs.obsmarkers) if newvisibleheads is not None: visibility.setvisibleheads(repo, [nodemod.bin(n) for n in newvisibleheads]) # Obsmarker sharing is unreliable. Some of the commits that should now # be visible might be hidden still, and some commits that should be # hidden might still be visible. Create local obsmarkers to resolve # this. if obsolete.isenabled(repo, obsolete.createmarkersopt): unfi = repo.unfiltered() # Commits that are only visible in the cloud are commits that are # ancestors of the cloud heads but are hidden locally. cloudvisibleonly = list( unfi.set( "not public() & ::%ls & hidden()", [head for head in cloudrefs.heads if head not in omittedheads], )) # Commits that are only hidden in the cloud are commits that are # ancestors of the previous cloud heads that are not ancestors of the # current cloud heads, but have not been hidden or obsoleted locally. cloudhiddenonly = list( unfi.set( "(not public() & ::%ls) - (not public() & ::%ls) - hidden() - obsolete()", [ head for head in lastsyncstate.heads if head not in lastsyncstate.omittedheads ], [head for head in cloudrefs.heads if head not in omittedheads], )) if cloudvisibleonly or cloudhiddenonly: msg = _( "detected obsmarker inconsistency (fixing by obsoleting [%s] and reviving [%s])\n" ) % ( ", ".join( [nodemod.short(ctx.node()) for ctx in cloudhiddenonly]), ", ".join( [nodemod.short(ctx.node()) for ctx in cloudvisibleonly]), ) repo.ui.log("commitcloud_sync", msg) repo.ui.warn(msg) repo._commitcloudskippendingobsmarkers = True with repo.lock(): obsolete.createmarkers(repo, [(ctx, ()) for ctx in cloudhiddenonly]) obsolete.revive(cloudvisibleonly) repo._commitcloudskippendingobsmarkers = False # We have now synced the repo to the cloud version. Store this. logsyncop( repo, "from_cloud", cloudrefs.version, lastsyncstate.heads, cloudrefs.heads, lastsyncstate.bookmarks, cloudrefs.bookmarks, lastsyncstate.remotebookmarks, newremotebookmarks, lastsyncstate.snapshots, newsnapshots, ) lastsyncstate.update( tr, cloudrefs.version, cloudrefs.heads, cloudrefs.bookmarks, omittedheads, omittedbookmarks, maxage, newremotebookmarks, newsnapshots, ) # Also update backup state. These new heads are already backed up, # otherwise the server wouldn't have told us about them. state.update([nodemod.bin(head) for head in newheads], tr)