def _acquire(self, path): if self._lock and self._lock.acquired: raise RuntimeError( _("Attempting to lock {} when {} " "is already locked.").format(path, self._lock)) self._lock = lockutils.InterProcessLock(path=path) return self._lock.acquire()
def test_interprocess_lock(self): lock_file = os.path.join(self.lock_dir, 'processlock') pid = os.fork() if pid: # Make sure the child grabs the lock first start = time.time() while not os.path.exists(lock_file): if time.time() - start > 5: self.fail('Timed out waiting for child to grab lock') time.sleep(0) lock1 = lockutils.InterProcessLock('foo') lock1.lockfile = open(lock_file, 'w') # NOTE(bnemec): There is a brief window between when the lock file # is created and when it actually becomes locked. If we happen to # context switch in that window we may succeed in locking the # file. Keep retrying until we either get the expected exception # or timeout waiting. while time.time() - start < 5: try: lock1.trylock() lock1.unlock() time.sleep(0) except IOError: # This is what we expect to happen break else: self.fail('Never caught expected lock exception') # We don't need to wait for the full sleep in the child here os.kill(pid, signal.SIGKILL) else: try: lock2 = lockutils.InterProcessLock('foo') lock2.lockfile = open(lock_file, 'w') have_lock = False while not have_lock: try: lock2.trylock() have_lock = True except IOError: pass finally: # NOTE(bnemec): This is racy, but I don't want to add any # synchronization primitives that might mask a problem # with the one we're trying to test here. time.sleep(.5) os._exit(0)
def get_process_lock(file): if not os.access(file, os.W_OK): msg = output_err(633, file=file) raise exception.HBSDError(message=msg) return lockutils.InterProcessLock(file)
def lockb(wait): b = lockutils.InterProcessLock(os.path.join(tmpdir, 'b')) with b: wait.wait()
def locka(wait): a = lockutils.InterProcessLock(os.path.join(tmpdir, 'a')) with a: wait.wait() self.completed = True