def _hidenodes(repo, nodes): unfi = repo if obsolete.isenabled(repo, obsolete.createmarkersopt): markers = [(unfi[n], ()) for n in nodes] obsolete.createmarkers(repo, markers) if visibility.tracking(repo): visibility.remove(repo, nodes)
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage, state, tr): # Pull all the new heads and any bookmark hashes we don't have. We need to # filter cloudrefs before pull as pull doesn't check if a rev is present # locally. unfi = repo newheads = [head for head in cloudrefs.heads if head not in unfi] if maxage is not None and maxage >= 0: mindate = time.time() - maxage * 86400 omittedheads = [ head for head in newheads if head in cloudrefs.headdates and cloudrefs.headdates[head] < mindate ] if omittedheads: repo.ui.status(_("omitting heads that are older than %d days:\n") % maxage) for head in omittedheads: headdatestr = util.datestr(util.makedate(cloudrefs.headdates[head])) repo.ui.status(_(" %s from %s\n") % (head[:12], headdatestr)) newheads = [head for head in newheads if head not in omittedheads] else: omittedheads = [] omittedbookmarks = [] omittedremotebookmarks = [] newvisibleheads = None if visibility.tracking(repo): localheads = _getheads(repo) localheadsset = set(localheads) cloudheads = [head for head in cloudrefs.heads if head not in omittedheads] cloudheadsset = set(cloudheads) if localheadsset != cloudheadsset: oldvisibleheads = [ head for head in lastsyncstate.heads if head not in lastsyncstate.omittedheads ] newvisibleheads = util.removeduplicates( oldvisibleheads + cloudheads + localheads ) toremove = { head for head in oldvisibleheads if head not in localheadsset or head not in cloudheadsset } newvisibleheads = [head for head in newvisibleheads if head not in toremove] remotebookmarknewnodes = set() remotebookmarkupdates = {} if _isremotebookmarkssyncenabled(repo.ui): (remotebookmarkupdates, remotebookmarknewnodes) = _processremotebookmarks( repo, cloudrefs.remotebookmarks, lastsyncstate ) try: snapshot = extensions.find("snapshot") except KeyError: snapshot = None addedsnapshots = [] removedsnapshots = [] newsnapshots = lastsyncstate.snapshots else: addedsnapshots = [ s for s in cloudrefs.snapshots if s not in lastsyncstate.snapshots ] removedsnapshots = [ s for s in lastsyncstate.snapshots if s not in cloudrefs.snapshots ] newsnapshots = cloudrefs.snapshots newheads += addedsnapshots if remotebookmarknewnodes or newheads: # Partition the heads into groups we can pull together. headgroups = _partitionheads( list(remotebookmarknewnodes) + newheads, cloudrefs.headdates ) _pullheadgroups(repo, remotepath, headgroups) omittedbookmarks.extend( _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate) ) newremotebookmarks = {} if _isremotebookmarkssyncenabled(repo.ui): newremotebookmarks, omittedremotebookmarks = _updateremotebookmarks( repo, tr, remotebookmarkupdates ) if snapshot: with repo.lock(), repo.transaction("sync-snapshots") as tr: repo.snapshotlist.update( tr, addnodes=addedsnapshots, removenodes=removedsnapshots ) _mergeobsmarkers(repo, tr, cloudrefs.obsmarkers) if newvisibleheads is not None: visibility.setvisibleheads(repo, [nodemod.bin(n) for n in newvisibleheads]) # Obsmarker sharing is unreliable. Some of the commits that should now # be visible might be hidden still, and some commits that should be # hidden might still be visible. Create local obsmarkers to resolve # this. if obsolete.isenabled(repo, obsolete.createmarkersopt) and not repo.ui.configbool( "mutation", "proxy-obsstore" ): unfi = repo # Commits that are only visible in the cloud are commits that are # ancestors of the cloud heads but are hidden locally. cloudvisibleonly = list( unfi.set( "not public() & ::%ls & hidden()", [head for head in cloudrefs.heads if head not in omittedheads], ) ) # Commits that are only hidden in the cloud are commits that are # ancestors of the previous cloud heads that are not ancestors of the # current cloud heads, but have not been hidden or obsoleted locally. cloudhiddenonly = list( unfi.set( "(not public() & ::%ls) - (not public() & ::%ls) - hidden() - obsolete()", [ head for head in lastsyncstate.heads if head not in lastsyncstate.omittedheads ], [head for head in cloudrefs.heads if head not in omittedheads], ) ) if cloudvisibleonly or cloudhiddenonly: msg = _( "detected obsmarker inconsistency (fixing by obsoleting [%s] and reviving [%s])\n" ) % ( ", ".join([nodemod.short(ctx.node()) for ctx in cloudhiddenonly]), ", ".join([nodemod.short(ctx.node()) for ctx in cloudvisibleonly]), ) repo.ui.log("commitcloud_sync", msg) repo.ui.warn(msg) repo._commitcloudskippendingobsmarkers = True with repo.lock(): obsolete.createmarkers(repo, [(ctx, ()) for ctx in cloudhiddenonly]) obsolete.revive(cloudvisibleonly) repo._commitcloudskippendingobsmarkers = False # We have now synced the repo to the cloud version. Store this. logsyncop( repo, "from_cloud", cloudrefs.version, lastsyncstate.heads, cloudrefs.heads, lastsyncstate.bookmarks, cloudrefs.bookmarks, lastsyncstate.remotebookmarks, newremotebookmarks, lastsyncstate.snapshots, newsnapshots, ) lastsyncstate.update( tr, newversion=cloudrefs.version, newheads=cloudrefs.heads, newbookmarks=cloudrefs.bookmarks, newremotebookmarks=newremotebookmarks, newmaxage=maxage, newomittedheads=omittedheads, newomittedbookmarks=omittedbookmarks, newomittedremotebookmarks=omittedremotebookmarks, newsnapshots=newsnapshots, ) # Also update backup state. These new heads are already backed up, # otherwise the server wouldn't have told us about them. state.update([nodemod.bin(head) for head in newheads], tr)
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage, state, tr): # Pull all the new heads and any bookmark hashes we don't have. We need to # filter cloudrefs before pull as pull doesn't check if a rev is present # locally. newheads = [ nodemod.hex(n) for n in repo.changelog.filternodes( [nodemod.bin(h) for h in cloudrefs.heads], inverse=True) ] assert newheads == newheads if maxage is not None and maxage >= 0: mindate = time.time() - maxage * 86400 omittedheads = [ head for head in newheads if head in cloudrefs.headdates and cloudrefs.headdates[head] < mindate ] if omittedheads: omittedheadslen = len(omittedheads) repo.ui.status( _n( "omitting %d head that is older than %d days:\n", "omitting %d heads that are older than %d days:\n", omittedheadslen, ) % (omittedheadslen, maxage)) counter = 0 for head in reversed(omittedheads): if counter == _maxomittedheadsoutput: remaining = len(omittedheads) - counter repo.ui.status( _n(" and %d older head\n", " and %d older heads\n", remaining) % remaining) break headdatestr = util.datestr( util.makedate(cloudrefs.headdates[head])) repo.ui.status(_(" %s from %s\n") % (head[:12], headdatestr)) counter = counter + 1 omittedheads = set(omittedheads) newheads = [head for head in newheads if head not in omittedheads] else: omittedheads = set() omittedbookmarks = [] omittedremotebookmarks = [] newvisibleheads = None if visibility.tracking(repo): localheads = _getheads(repo) localheadsset = set(localheads) cloudheads = [ head for head in cloudrefs.heads if head not in omittedheads ] cloudheadsset = set(cloudheads) if localheadsset != cloudheadsset: oldvisibleheads = [ head for head in lastsyncstate.heads if head not in lastsyncstate.omittedheads ] newvisibleheads = util.removeduplicates(oldvisibleheads + cloudheads + localheads) toremove = { head for head in oldvisibleheads if head not in localheadsset or head not in cloudheadsset } newvisibleheads = [ head for head in newvisibleheads if head not in toremove ] remotebookmarknewnodes = set() remotebookmarkupdates = {} if _isremotebookmarkssyncenabled(repo.ui): (remotebookmarkupdates, remotebookmarknewnodes) = _processremotebookmarks( repo, cloudrefs.remotebookmarks, lastsyncstate) if remotebookmarknewnodes or newheads: # Partition the heads into groups we can pull together. headgroups = _partitionheads(repo.ui, list(remotebookmarknewnodes) + newheads, cloudrefs.headdates) _pullheadgroups(repo, remotepath, headgroups) omittedbookmarks.extend( _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate, omittedheads, maxage)) newremotebookmarks = {} if _isremotebookmarkssyncenabled(repo.ui): omittedremotebookmarks = _updateremotebookmarks( repo, tr, remotebookmarkupdates) newremotebookmarks = cloudrefs.remotebookmarks if newvisibleheads is not None: visibility.setvisibleheads(repo, [nodemod.bin(n) for n in newvisibleheads]) # We have now synced the repo to the cloud version. Store this. logsyncop( repo, "from_cloud", cloudrefs.version, lastsyncstate.heads, cloudrefs.heads, lastsyncstate.bookmarks, cloudrefs.bookmarks, lastsyncstate.remotebookmarks, newremotebookmarks, ) lastsyncstate.update( tr, newversion=cloudrefs.version, newheads=cloudrefs.heads, newbookmarks=cloudrefs.bookmarks, newremotebookmarks=newremotebookmarks, newmaxage=maxage, newomittedheads=list(omittedheads), newomittedbookmarks=omittedbookmarks, newomittedremotebookmarks=omittedremotebookmarks, ) # Also update backup state. These new heads are already backed up, # otherwise the server wouldn't have told us about them. state.update([nodemod.bin(head) for head in newheads], tr)
def split(ui, repo, *revs, **opts): """split a changeset into smaller changesets Prompt for hunks to be selected until exhausted. Each selection of hunks will form a separate changeset, in order from parent to child: the first selection will form the first changeset, the second selection will form the second changeset, and so on. Operates on the current revision by default. Use --rev to split a given changeset instead. """ newcommits = [] revarg = (list(revs) + opts.get("rev")) or ["."] if len(revarg) != 1: msg = _("more than one revset is given") hnt = _( "use either `hg split <rs>` or `hg split --rev <rs>`, not both") raise error.Abort(msg, hint=hnt) rev = scmutil.revsingle(repo, revarg[0]) if opts.get("no_rebase"): torebase = () else: torebase = list( map(hex, repo.nodes("descendants(%d) - (%d)", rev, rev))) with repo.wlock(), repo.lock(): cmdutil.bailifchanged(repo) if torebase: cmdutil.checkunfinished(repo) ctx = repo[rev] r = ctx.hex() allowunstable = visibility.tracking(repo) or obsolete.isenabled( repo, obsolete.allowunstableopt) if not allowunstable: # XXX We should check head revs if repo.revs("(%d::) - %d", rev, rev): raise error.Abort( _("cannot split commit: %s not a head") % ctx) if len(ctx.parents()) > 1: raise error.Abort(_("cannot split merge commits")) prev = ctx.p1() bmupdate = common.bookmarksupdater(repo, ctx.node()) bookactive = repo._activebookmark if bookactive is not None: repo.ui.status(_("(leaving bookmark %s)\n") % repo._activebookmark) bookmarks.deactivate(repo) hg.update(repo, prev) commands.revert(ui, repo, rev=r, all=True) def haschanges(): modified, added, removed, deleted = repo.status()[:4] return modified or added or removed or deleted # We need to detect the case where the user selects all remaining # changes, as that will end the split. That's the commit we want to # mark as the result of the split. To do this, wrap the recordfilter # function and compare the output to see if it contains all the # originalchunks. shouldrecordmutation = [False] def mutinfo(extra): if shouldrecordmutation[0]: return mutation.record( repo, extra, [ctx.node()], "split", splitting=[c.node() for c in newcommits], ) def recordfilter(ui, originalchunks, operation=None): chunks, newopts = cmdutil.recordfilter(ui, originalchunks, operation) if cmdutil.comparechunks(chunks, originalchunks): shouldrecordmutation[0] = True return chunks, newopts msg = ("HG: This is the original pre-split commit message. " "Edit it as appropriate.\n\n") msg += ctx.description() opts["message"] = msg opts["edit"] = True opts["_commitmutinfofunc"] = mutinfo try: while haschanges(): pats = () with repo.transaction("split"): cmdutil.dorecord(ui, repo, commands.commit, "commit", False, recordfilter, *pats, **opts) # TODO: Does no seem like the best way to do this # We should make dorecord return the newly created commit newcommits.append(repo["."]) if haschanges(): if ui.prompt("Done splitting? [yN]", default="n") == "y": shouldrecordmutation[0] = True with repo.transaction("split"): commands.commit(ui, repo, **opts) newcommits.append(repo["."]) break else: ui.status(_("no more change to split\n")) except Exception: # Rollback everything hg.updaterepo(repo, r, True) # overwrite=True if newcommits: visibility.remove(repo, [c.node() for c in newcommits]) if bookactive is not None: bookmarks.activate(repo, bookactive) raise if newcommits: phabdiffs = {} for c in newcommits: phabdiff = diffprops.parserevfromcommitmsg( repo[c].description()) if phabdiff: phabdiffs.setdefault(phabdiff, []).append(c) if any(len(commits) > 1 for commits in phabdiffs.values()): hintutil.trigger("split-phabricator", ui.config("split", "phabricatoradvice")) tip = repo[newcommits[-1]] with repo.transaction("post-split"): bmupdate(tip.node()) if bookactive is not None: bookmarks.activate(repo, bookactive) if obsolete.isenabled(repo, obsolete.createmarkersopt): obsolete.createmarkers(repo, [(repo[r], newcommits)], operation="split") if torebase: rebaseopts = {"dest": "_destrestack(SRC)", "rev": torebase} rebase.rebase(ui, repo, **rebaseopts) unfi = repo with repo.transaction("post-split-hide"): visibility.remove(repo, [unfi[r].node()])
def _hidenodes(repo, nodes): unfi = repo if visibility.tracking(repo): visibility.remove(repo, nodes)
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage, state, tr): pullcmd, pullopts = ccutil.getcommandandoptions("pull|pul") try: remotenames = extensions.find("remotenames") except KeyError: remotenames = None # Pull all the new heads and any bookmark hashes we don't have. We need to # filter cloudrefs before pull as pull doesn't check if a rev is present # locally. unfi = repo.unfiltered() newheads = [head for head in cloudrefs.heads if head not in unfi] if maxage is not None and maxage >= 0: mindate = time.time() - maxage * 86400 omittedheads = [ head for head in newheads if head in cloudrefs.headdates and cloudrefs.headdates[head] < mindate ] if omittedheads: repo.ui.status( _("omitting heads that are older than %d days:\n") % maxage) for head in omittedheads: headdatestr = util.datestr( util.makedate(cloudrefs.headdates[head])) repo.ui.status(_(" %s from %s\n") % (head[:12], headdatestr)) newheads = [head for head in newheads if head not in omittedheads] else: omittedheads = [] omittedbookmarks = [] newvisibleheads = None if visibility.tracking(repo): localheads = _getheads(repo) localheadsset = set(localheads) cloudheads = [ head for head in cloudrefs.heads if head not in omittedheads ] cloudheadsset = set(cloudheads) if localheadsset != cloudheadsset: oldvisibleheads = [ head for head in lastsyncstate.heads if head not in lastsyncstate.omittedheads ] newvisibleheads = util.removeduplicates(oldvisibleheads + cloudheads + localheads) toremove = { head for head in oldvisibleheads if head not in localheadsset or head not in cloudheadsset } newvisibleheads = [ head for head in newvisibleheads if head not in toremove ] remotebookmarknodes = [] newremotebookmarks = {} if _isremotebookmarkssyncenabled(repo.ui): newremotebookmarks = _processremotebookmarks(repo, cloudrefs.remotebookmarks, lastsyncstate) # Pull public commits, which remote bookmarks point to, if they are not # present locally. for node in newremotebookmarks.values(): if node not in unfi: remotebookmarknodes.append(node) try: snapshot = extensions.find("snapshot") except KeyError: snapshot = None addedsnapshots = [] removedsnapshots = [] newsnapshots = lastsyncstate.snapshots else: addedsnapshots = [ s for s in cloudrefs.snapshots if s not in lastsyncstate.snapshots ] removedsnapshots = [ s for s in lastsyncstate.snapshots if s not in cloudrefs.snapshots ] newsnapshots = cloudrefs.snapshots # TODO(alexeyqu): pull snapshots separately newheads += addedsnapshots backuplock.progresspulling(repo, [nodemod.bin(node) for node in newheads]) if remotebookmarknodes or newheads: # Partition the heads into groups we can pull together. headgroups = ([remotebookmarknodes] if remotebookmarknodes else []) + _partitionheads(newheads, cloudrefs.headdates) def disabled(*args, **kwargs): pass # Disable pulling of obsmarkers wrapobs = extensions.wrappedfunction(exchange, "_pullobsolete", disabled) # Disable pulling of bookmarks wrapbook = extensions.wrappedfunction(exchange, "_pullbookmarks", disabled) # Disable pulling of remote bookmarks if remotenames: wrapremotenames = extensions.wrappedfunction( remotenames, "pullremotenames", disabled) else: wrapremotenames = util.nullcontextmanager() # Disable automigration and prefetching of trees configoverride = repo.ui.configoverride( { ("pull", "automigrate"): False, ("treemanifest", "pullprefetchrevs"): "" }, "cloudsyncpull", ) prog = progress.bar(repo.ui, _("pulling from commit cloud"), total=len(headgroups)) with wrapobs, wrapbook, wrapremotenames, configoverride, prog: for index, headgroup in enumerate(headgroups): headgroupstr = " ".join([head[:12] for head in headgroup]) repo.ui.status(_("pulling %s\n") % headgroupstr) prog.value = (index, headgroupstr) pullopts["rev"] = headgroup pullcmd(repo.ui, repo, remotepath, **pullopts) repo.connectionpool.close() omittedbookmarks.extend( _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate)) if _isremotebookmarkssyncenabled(repo.ui): _updateremotebookmarks(repo, tr, newremotebookmarks) if snapshot: with repo.lock(), repo.transaction("sync-snapshots") as tr: repo.snapshotlist.update(tr, addnodes=addedsnapshots, removenodes=removedsnapshots) _mergeobsmarkers(repo, tr, cloudrefs.obsmarkers) if newvisibleheads is not None: visibility.setvisibleheads(repo, [nodemod.bin(n) for n in newvisibleheads]) # Obsmarker sharing is unreliable. Some of the commits that should now # be visible might be hidden still, and some commits that should be # hidden might still be visible. Create local obsmarkers to resolve # this. if obsolete.isenabled(repo, obsolete.createmarkersopt): unfi = repo.unfiltered() # Commits that are only visible in the cloud are commits that are # ancestors of the cloud heads but are hidden locally. cloudvisibleonly = list( unfi.set( "not public() & ::%ls & hidden()", [head for head in cloudrefs.heads if head not in omittedheads], )) # Commits that are only hidden in the cloud are commits that are # ancestors of the previous cloud heads that are not ancestors of the # current cloud heads, but have not been hidden or obsoleted locally. cloudhiddenonly = list( unfi.set( "(not public() & ::%ls) - (not public() & ::%ls) - hidden() - obsolete()", [ head for head in lastsyncstate.heads if head not in lastsyncstate.omittedheads ], [head for head in cloudrefs.heads if head not in omittedheads], )) if cloudvisibleonly or cloudhiddenonly: msg = _( "detected obsmarker inconsistency (fixing by obsoleting [%s] and reviving [%s])\n" ) % ( ", ".join( [nodemod.short(ctx.node()) for ctx in cloudhiddenonly]), ", ".join( [nodemod.short(ctx.node()) for ctx in cloudvisibleonly]), ) repo.ui.log("commitcloud_sync", msg) repo.ui.warn(msg) repo._commitcloudskippendingobsmarkers = True with repo.lock(): obsolete.createmarkers(repo, [(ctx, ()) for ctx in cloudhiddenonly]) obsolete.revive(cloudvisibleonly) repo._commitcloudskippendingobsmarkers = False # We have now synced the repo to the cloud version. Store this. logsyncop( repo, "from_cloud", cloudrefs.version, lastsyncstate.heads, cloudrefs.heads, lastsyncstate.bookmarks, cloudrefs.bookmarks, lastsyncstate.remotebookmarks, newremotebookmarks, lastsyncstate.snapshots, newsnapshots, ) lastsyncstate.update( tr, cloudrefs.version, cloudrefs.heads, cloudrefs.bookmarks, omittedheads, omittedbookmarks, maxage, newremotebookmarks, newsnapshots, ) # Also update backup state. These new heads are already backed up, # otherwise the server wouldn't have told us about them. state.update([nodemod.bin(head) for head in newheads], tr)