def _subscriptionvfs(repo): return vfsmod.vfs( os.path.join( ccutil.getuserconfigpath(repo.ui, "connected_subscribers_path"), ".commitcloud", "joined", ) )
def __init__(self, ui): self.ui = ui self.vfs = vfsmod.vfs( ccutil.getuserconfigpath(self.ui, "user_token_path")) self.vfs.createmode = 0o600 # using platform username self.secretname = (self.servicename + "_" + util.getuser()).upper() self.usesecretstool = self.ui.configbool("commitcloud", "use_secrets_tool")
def repacklockvfs(repo): if util.safehasattr(repo, "name"): # Lock in the shared cache so repacks across multiple copies of the same # repo are coordinated. sharedcachepath = shallowutil.getcachepackpath( repo, constants.FILEPACK_CATEGORY) return vfs.vfs(sharedcachepath) else: return repo.svfs
def testflock(self): testtmp = os.environ["TESTTMP"] opener = vfs.vfs(testtmp) name = "testlock" with extutil.flock(opener.join(name), "testing a lock", timeout=0): otherlock = self.otherprocesslock(opener, name) self.assertEqual(otherlock, locktimeout, "other process should not have taken the lock") otherlock = self.otherprocesslock(opener, name) self.assertEqual(otherlock, locksuccess, "other process should have taken the lock")
def cleanupoldbackups(repo): vfs = vfsmod.vfs(repo.localvfs.join(backupdir)) maxbackups = repo.ui.configint("shelve", "maxbackups") hgfiles = [f for f in vfs.listdir() if f.endswith("." + patchextension)] hgfiles = sorted([(vfs.stat(f).st_mtime, f) for f in hgfiles]) if 0 < maxbackups and maxbackups < len(hgfiles): bordermtime = hgfiles[-maxbackups][0] else: bordermtime = None for mtime, f in hgfiles[:len(hgfiles) - maxbackups]: if mtime == bordermtime: # keep it, because timestamp can't decide exact order of backups continue base = f[:-(1 + len(patchextension))] for ext in shelvefileextensions: vfs.tryunlink(base + "." + ext)
def flock(lockpath, description, timeout=-1): """A flock based lock object. Currently it is always non-blocking. Note that since it is flock based, you can accidentally take it multiple times within one process and the first one to be released will release all of them. So the caller needs to be careful to not create more than one instance per lock. """ # best effort lightweight lock try: import fcntl fcntl.flock except ImportError: # fallback to Mercurial lock vfs = vfsmod.vfs(os.path.dirname(lockpath)) with lockmod.lock(vfs, os.path.basename(lockpath), timeout=timeout): yield return # make sure lock file exists util.makedirs(os.path.dirname(lockpath)) with open(lockpath, "a"): pass lockfd = os.open(lockpath, os.O_RDWR, 0o664) start = time.time() while True: try: fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) break except IOError as ex: if ex.errno == errno.EAGAIN: if timeout != -1 and time.time() - start > timeout: raise error.LockHeld(errno.EAGAIN, lockpath, description, "") else: time.sleep(0.05) continue raise try: yield finally: fcntl.flock(lockfd, fcntl.LOCK_UN) os.close(lockfd)
def basic(repo): print("* neither file exists") # calls function repo.cached repo.invalidate() print("* neither file still exists") # uses cache repo.cached # create empty file f = open("x", "w") f.close() repo.invalidate() print("* empty file x created") # should recreate the object repo.cached f = open("x", "w") f.write("a") f.close() repo.invalidate() print("* file x changed size") # should recreate the object repo.cached repo.invalidate() print("* nothing changed with either file") # stats file again, reuses object repo.cached # atomic replace file, size doesn't change # hopefully st_mtime doesn't change as well so this doesn't use the cache # because of inode change f = vfsmod.vfs(".")("x", "w", atomictemp=True) f.write("b") f.close() repo.invalidate() print("* file x changed inode") repo.cached # create empty file y f = open("y", "w") f.close() repo.invalidate() print("* empty file y created") # should recreate the object repo.cached f = open("y", "w") f.write("A") f.close() repo.invalidate() print("* file y changed size") # should recreate the object repo.cached f = vfsmod.vfs(".")("y", "w", atomictemp=True) f.write("B") f.close() repo.invalidate() print("* file y changed inode") repo.cached f = vfsmod.vfs(".")("x", "w", atomictemp=True) f.write("c") f.close() f = vfsmod.vfs(".")("y", "w", atomictemp=True) f.write("C") f.close() repo.invalidate() print("* both files changed inode") repo.cached
def __init__(self, ui): self.ui = ui self.vfs = vfsmod.vfs( ccutil.getuserconfigpath(self.ui, "user_token_path")) self.vfs.createmode = 0o600
# test revlog interaction about raw data (flagprocessor) from __future__ import absolute_import, print_function import sys from edenscm.mercurial import encoding, node, revlog, transaction, vfs from hghave import require require(["py2"]) # TESTTMP is optional. This makes it convenient to run without run-tests.py tvfs = vfs.vfs(encoding.environ.get("TESTTMP", b"/tmp")) # Enable generaldelta otherwise revlog won't use delta as expected by the test tvfs.options = {"generaldelta": True, "revlogv1": True} # The test wants to control whether to use delta explicitly, based on # "storedeltachains". revlog.revlog._isgooddelta = lambda self, d, tlen: d and self.storedeltachains def abort(msg): print("abort: %s" % msg) # Return 0 so run-tests.py could compare the output. sys.exit() # Register a revlog processor for flag EXTSTORED.
def setUp(self): self.vfs = vfs.vfs(tempfile.mkdtemp(dir=os.getcwd()), audit=False) self.ui = ui.ui()
def shareawarecachevfs(repo): if repo.shared(): return vfsmod.vfs(os.path.join(repo.sharedpath, "cache")) else: return repo.cachevfs