def _ReadFileWithSharedLockBlockingThenWrite(read_file, write_file): with open(read_file, 'r') as f: lock.AcquireFileLock(f, lock.LOCK_SH) content = f.read() with open(write_file, 'a') as f2: lock.AcquireFileLock(f2, lock.LOCK_EX) f2.write(content)
def _AttemptPseudoLockAcquisition(pseudo_lock_path, pseudo_lock_fd_return): """Try to acquire the lock and return a boolean indicating whether the attempt was successful. If the attempt was successful, pseudo_lock_fd_return, which should be an empty array, will be modified to contain a single entry: the file descriptor of the (now acquired) lock file. This whole operation is guarded with the global cloud storage lock, which prevents race conditions that might otherwise cause multiple processes to believe they hold the same pseudo lock (see _FileLock for more details). """ pseudo_lock_fd = None try: with open(_CLOUD_STORAGE_GLOBAL_LOCK) as global_file: with lock.FileLock(global_file, lock.LOCK_EX | lock.LOCK_NB): # Attempt to acquire the lock in a non-blocking manner. If we block, # then we'll cause deadlock because another process will be unable to # acquire the cloud storage global lock in order to release the pseudo # lock. pseudo_lock_fd = open(pseudo_lock_path, 'w') lock.AcquireFileLock(pseudo_lock_fd, lock.LOCK_EX | lock.LOCK_NB) pseudo_lock_fd_return.append(pseudo_lock_fd) return True except (lock.LockException, IOError): # We failed to acquire either the global cloud storage lock or the pseudo # lock. if pseudo_lock_fd: pseudo_lock_fd.close() return False
def testSharedLock(self): tf = tempfile.NamedTemporaryFile(delete=False) tf.close() temp_write_file = tf.name try: with open(self.temp_file_path, 'w') as f: f.write('0123456789') with open(self.temp_file_path, 'r') as f: # First, acquire a shared lock on temp_file_path lock.AcquireFileLock(f, lock.LOCK_SH) processess = [] # Create 10 processes that also try to acquire shared lock from # temp_file_path then append temp_file_path's content to temp_write_file for _ in range(10): p = multiprocessing.Process( target=_ReadFileWithSharedLockBlockingThenWrite, args=(self.temp_file_path, temp_write_file)) p.start() processess.append(p) for p in processess: p.join() # temp_write_file should contains 10 copy of temp_file_path's content. with open(temp_write_file, 'r') as f: self.assertEquals('0123456789' * 10, f.read()) finally: os.remove(temp_write_file)
def _FileLock(base_path): pseudo_lock_path = '%s.pseudo_lock' % base_path _CreateDirectoryIfNecessary(os.path.dirname(pseudo_lock_path)) # We need to make sure that there is no other process which is acquiring the # lock on |base_path| and has not finished before proceeding further to create # the |pseudo_lock_path|. Otherwise, |pseudo_lock_path| may be deleted by # that other process after we create it in this process. while os.path.exists(pseudo_lock_path): time.sleep(0.1) # Guard the creation & acquiring lock of |pseudo_lock_path| by the global lock # to make sure that there is no race condition on creating the file. with open(_CLOUD_STORAGE_GLOBAL_LOCK) as global_file: with lock.FileLock(global_file, lock.LOCK_EX): fd = open(pseudo_lock_path, 'w') lock.AcquireFileLock(fd, lock.LOCK_EX) try: yield finally: lock.ReleaseFileLock(fd) try: fd.close() os.remove(pseudo_lock_path) except OSError: # We don't care if the pseudo-lock gets removed elsewhere before we have # a chance to do so. pass
def _ReadFileWithExclusiveLockNonBlocking(target_file, status_file): with open(target_file, 'r') as f: try: lock.AcquireFileLock(f, lock.LOCK_EX | lock.LOCK_NB) with open(status_file, 'w') as f2: f2.write('LockException was not raised') except lock.LockException: with open(status_file, 'w') as f2: f2.write('LockException raised')
def _AppendTextToFile(file_name): with open(file_name, 'a') as f: lock.AcquireFileLock(f, lock.LOCK_EX) # Sleep 100 ms to increase the chance of another process trying to acquire # the lock of file as the same time. time.sleep(0.1) f.write('Start') for _ in range(10000): f.write('*') f.write('End')
def testNonBlockingLockAcquiring(self): tf = tempfile.NamedTemporaryFile(delete=False) tf.close() temp_status_file = tf.name try: with open(self.temp_file_path, 'w') as f: lock.AcquireFileLock(f, lock.LOCK_EX) p = multiprocessing.Process( target=_ReadFileWithExclusiveLockNonBlocking, args=(self.temp_file_path, temp_status_file)) p.start() p.join() with open(temp_status_file, 'r') as f: self.assertEquals('LockException raised', f.read()) finally: os.remove(temp_status_file)