def _rebase(orig, ui, repo, *pats, **opts): if not opts.get("date") and not ui.configbool("tweakdefaults", "rebasekeepdate"): opts["date"] = currentdate() if opts.get("continue") or opts.get("abort") or opts.get("restack"): return orig(ui, repo, *pats, **opts) # 'hg rebase' w/o args should do nothing if not opts.get("dest"): raise error.Abort("you must specify a destination (-d) for the rebase") # 'hg rebase' can fast-forward bookmark prev = repo["."] # Only fast-forward the bookmark if no source nodes were explicitly # specified. if not (opts.get("base") or opts.get("source") or opts.get("rev")): dest = scmutil.revsingle(repo, opts.get("dest")) common = dest.ancestor(prev) if prev == common: activebookmark = repo._activebookmark result = hg.updatetotally(ui, repo, dest.node(), activebookmark) if activebookmark: with repo.wlock(): bookmarks.update(repo, [prev.node()], dest.node()) return result return orig(ui, repo, *pats, **opts)
def snapshotcreate(ui, repo, *args, **opts): """creates a snapshot of the working copy """ def removesnapshotfiles(ui, repo, metadata): match = scmutil.match(repo[None]) files, dirs, error = repo.dirstate._fs.purge( match, [], True, True, False, False ) for m in error: ui.warn(_("warning: %s\n") % m) tr = repo.currenttransaction() if tr: for f in metadata.localvfsfiles: tr.removefilegenerator(f.path) for f in metadata.localvfsfiles: try: repo.localvfs.unlinkpath(f.path, ignoremissing=True) except OSError: ui.warn(_("%s cannot be removed") % f.path) with repo.wlock(), repo.lock(): result = createsnapshotcommit(ui, repo, opts) if not result: ui.status(_("nothing changed\n")) return node, metadata = result node = nodemod.hex(node) with repo.transaction("update-snapshot-list") as tr: repo.snapshotlist.update(tr, addnodes=[node]) ui.status(_("snapshot %s created\n") % (node)) if opts.get("clean"): try: # We want to bring the working copy to the p1 state rev = repo[None].p1() hg.updatetotally(ui, repo, rev, rev, clean=True) removesnapshotfiles(ui, repo, metadata) except (KeyboardInterrupt, Exception) as exc: ui.warn(_("failed to clean the working copy: %s\n") % exc)
def _maybeupdateworkingcopy(repo, currentnode): ui = repo.ui if repo["."].node() != currentnode: return 0 successors = list(repo.nodes("successors(%n) - obsolete()", currentnode)) if len(successors) == 0: return 0 if len(successors) == 1: destination = successors[0] if destination not in repo or destination == currentnode: return 0 ui.status( _("current revision %s has been moved remotely to %s\n") % (nodemod.short(currentnode), nodemod.short(destination)), component="commitcloud", ) if ui.configbool("commitcloud", "updateonmove"): if repo[destination].mutable(): backuplock.progress( repo, "updating %s from %s to %s" % ( repo.wvfs.base, nodemod.short(currentnode), nodemod.short(destination), ), ) ui.status(_("updating to %s\n") % nodemod.short(destination)) with repo.wlock(), repo.lock(), repo.transaction("sync-checkout"): return hg.updatetotally( ui, repo, destination, destination, updatecheck="noconflict" ) else: hintutil.trigger("commitcloud-update-on-move") else: ui.status( _( "current revision %s has been replaced remotely with multiple revisions\n" "(run 'hg update HASH' to go to the desired revision)\n" ) % nodemod.short(currentnode), component="commitcloud", ) return 0
def restore(ui, repo, csid, clean=False): ui.status(_(f"Will restore snapshot {csid}\n"), component="snapshot") snapshot = repo.edenapi.fetchsnapshot( getreponame(repo), { "cs_id": bytes.fromhex(csid), }, ) # Once merges/conflicted states are supported, we'll need to support more # than one parent assert isinstance(snapshot["hg_parents"], bytes) with repo.wlock(): if _hasanychanges(repo): if clean: _fullclean(ui, repo) else: raise error.Abort( _( "Can't restore snapshot with unclean working copy, unless --clean is specified" ) ) ui.status( _(f"Updating to parent {snapshot['hg_parents'].hex()}\n"), component="snapshot", ) hg.updatetotally( ui, repo, repo[snapshot["hg_parents"]], None, updatecheck="abort" ) files2download = [] for (path, fc) in snapshot["file_changes"]: matcher = scmutil.matchfiles(repo, [path]) fctx = repo[None][path] # fc is either a string or a dict, can't use "Deletion" in fc because # that applies to "UntrackedDeletion" as well if fc == "Deletion": cmdutil.remove(ui, repo, matcher, "", False, False) elif fc == "UntrackedDeletion": if not fctx.exists(): # File was hg added and is now missing. Let's add an empty file first repo.wwrite(path, b"", "") cmdutil.add(ui, repo, matcher, prefix="", explicitonly=True) fctx.remove() elif "Change" in fc: if fctx.exists(): # File exists, was modified fctx.remove() files2download.append((path, fc["Change"]["upload_token"])) elif "UntrackedChange" in fc: if fctx.exists(): # File was hg rm'ed and then overwritten cmdutil.remove( ui, repo, matcher, prefix="", after=False, force=False ) files2download.append((path, fc["UntrackedChange"]["upload_token"])) repo.edenapi.downloadfiles(getreponame(repo), repo.root, files2download) for (path, fc) in snapshot["file_changes"]: if "Change" in fc: # Doesn't hurt to add again if it was already tracked cmdutil.add(ui, repo, scmutil.matchfiles(repo, [path]), "", True)
def update(ui, repo, csid, clean=False): ui.status(_("Will restore snapshot {}\n").format(csid.format()), component="snapshot") start_snapshot = time.perf_counter() csid_bytes = bytes.fromhex(csid) snapshot = fetchsnapshot(repo, csid_bytes) # Once merges/conflicted states are supported, we'll need to support more # than one parent assert isinstance(snapshot["hg_parents"], bytes) with repo.wlock(), repo.lock(), repo.transaction("snapshot-restore"): haschanges = _hasanychanges(repo) if haschanges and not clean: raise error.Abort( _("Can't restore snapshot with unclean working copy, unless --clean is specified" )) parent = snapshot["hg_parents"] if parent != repo.dirstate.p1(): if haschanges: _fullclean(ui, repo, []) start_parent_update = time.perf_counter() ui.status( _("Updating to parent {}\n").format(parent.hex()), component="snapshot", ) # This will resolve the parent revision even if it's not available locally # and needs pulling from server. if parent not in repo: repo.pull(headnodes=(parent, )) hg.updatetotally(ui, repo, parent, None, clean=False, updatecheck="abort") duration = time.perf_counter() - start_parent_update ui.status( _("Updated to parent {parent} in {duration:0.5f} seconds\n"). format(parent=parent.hex(), duration=duration), component="snapshot", ) else: if haschanges: # We might be able to reuse files that were already downloaded locally, # so let's not delete files related to the snapshot _fullclean( ui, repo, [f"path:{path}" for (path, _) in snapshot["file_changes"]]) files2download = [] pathtype = [] wctx = repo[None] for (path, fc) in snapshot["file_changes"]: fctx = wctx[path] # fc is either a string or a dict, can't use `"Deletion" in fc` because # that applies to "UntrackedDeletion" as well if fc == "Deletion": wctx.forget([path], quiet=True) if fctx.exists(): fctx.remove() elif fc == "UntrackedDeletion": if repo.dirstate[path] == "?": # File was hg added then deleted repo.dirstate.add(path) elif repo.dirstate[path] == "r": # Missing file, but its marked as deleted. To mark it as missing, # we need to first create a dummy file and mark it as normal repo.wwrite(path, b"", "") repo.dirstate.normal(path) fctx = wctx[path] if fctx.exists(): fctx.remove() elif "Change" in fc: filetype = fc["Change"]["file_type"] pathtype.append((path, filetype)) files2download.append( (path, fc["Change"]["upload_token"], filetype)) elif "UntrackedChange" in fc: wctx.forget([path], quiet=True) filetype = fc["UntrackedChange"]["file_type"] pathtype.append((path, filetype)) files2download.append(( path, fc["UntrackedChange"]["upload_token"], filetype, )) ui.status( _("Downloading files for restoring snapshot\n"), component="snapshot", ) start_download = time.perf_counter() repo.edenapi.downloadfiles(repo.root, files2download) duration = time.perf_counter() - start_download ui.status( _("Downloaded files for restoring snapshot in {duration:0.5f} seconds\n" ).format(duration=duration), component="snapshot", ) # Need to add changed files after they are populated in the working dir wctx.add( [ path for (path, fc) in snapshot["file_changes"] if "Change" in fc ], quiet=True, ) storelatest(repo.metalog(), csid_bytes, snapshot["bubble_id"]) duration = time.perf_counter() - start_snapshot ui.status( _("Restored snapshot in {duration:0.5f} seconds\n").format( duration=duration), component="snapshot", )