def testAlreadyLocked(self): with daemon.flock('foo'): # Locking foo again should fail. with self.assertRaises(daemon.LockAlreadyLocked): with daemon.flock('foo'): pass # Locking another file should succeed. with daemon.flock('bar'): pass # Locking foo now it's been unlocked should succeed. with daemon.flock('foo'): pass
def testGetLock(self): self._mock_basic_fs_calls() self._set_lock_status() self._set_stat_status() self._set_unlink_status() with self._assert_reached() as reached: with daemon.flock("bogus"): reached["yup"] = True
def testGetLock(self): self._mock_basic_fs_calls() self._set_lock_status() self._set_stat_status() self._set_unlink_status() with self._assert_reached() as reached: with daemon.flock('bogus'): reached['yup'] = True
def testUnlinkFailureDoesntBreak(self): """Test that a failing unlink doesn't break us.""" self._mock_basic_fs_calls() self._set_lock_status() self._set_stat_status() self._set_unlink_status(success=False) with self._assert_reached() as reached: with daemon.flock("bogus"): reached["yup"] = True
def testUnlinkFailureDoesntBreak(self): """Test that a failing unlink doesn't break us.""" self._mock_basic_fs_calls() self._set_lock_status() self._set_stat_status() self._set_unlink_status(success=False) with self._assert_reached() as reached: with daemon.flock('bogus'): reached['yup'] = True
def testDontGetLock(self): self._mock_basic_fs_calls() self._set_lock_status(success=False) self._set_stat_status() self._set_unlink_status() with self.assertRaises(daemon.LockAlreadyLocked): with daemon.flock("bogus"): # Should never reach this. # pylint: disable=redundant-unittest-assert self.assertTrue(False) # pragma: no cover
def testDontGetLock(self): self._mock_basic_fs_calls() self._set_lock_status(success=False) self._set_stat_status() self._set_unlink_status() with self.assertRaises(daemon.LockAlreadyLocked): with daemon.flock('bogus'): # Should never reach this. # pylint: disable=redundant-unittest-assert self.assertTrue(False) # pragma: no cover
def testLockfileRecreated(self): """Test that we abort if a new lockfile is created under us.""" self._mock_basic_fs_calls() self._set_lock_status() self._set_stat_status(matching=False) self._set_unlink_status() with self.assertRaises(daemon.LockAlreadyLocked): with daemon.flock("bogus"): # Should never reach this. # pylint: disable=redundant-unittest-assert self.assertTrue(False) # pragma: no cover
def testFileDeletedAfterLockAcquired(self): """Test that we abort if we acquire a lock but the file has been deleted.""" self._mock_basic_fs_calls() self._set_lock_status() self._set_stat_status(success=False) self._set_unlink_status() with self.assertRaises(daemon.LockAlreadyLocked): with daemon.flock("bogus"): # Should never reach this. # pylint: disable=redundant-unittest-assert self.assertTrue(False) # pragma: no cover
def testFileDeletedAfterLockAcquired(self): """Test that we abort if we acquire a lock but the file has been deleted.""" self._mock_basic_fs_calls() self._set_lock_status() self._set_stat_status(success=False) self._set_unlink_status() with self.assertRaises(daemon.LockAlreadyLocked): with daemon.flock('bogus'): # Should never reach this. # pylint: disable=redundant-unittest-assert self.assertTrue(False) # pragma: no cover
def testLockfileRecreated(self): """Test that we abort if a new lockfile is created under us.""" self._mock_basic_fs_calls() self._set_lock_status() self._set_stat_status(matching=False) self._set_unlink_status() with self.assertRaises(daemon.LockAlreadyLocked): with daemon.flock('bogus'): # Should never reach this. # pylint: disable=redundant-unittest-assert self.assertTrue(False) # pragma: no cover
def testDeleteWhenDone(self): """Test that we delete the lockfile when we're done.""" data = {'count': 0} def _mock_unlink(*_args, **_kwargs): data['count'] += 1 self.mock(os, 'unlink', _mock_unlink) self._mock_basic_fs_calls() self._set_lock_status() self._set_stat_status() with self._assert_reached() as reached: with daemon.flock('bogus'): reached['yup'] = True self.assertEqual(data['count'], 1)
def run_state_machine_pass( logger, matchlist, abs_master_directory, emergency_file, desired_state, transition_time_utc, enable_gclient_sync, prod, connection_timeout, hostname, builder_filters): # pragma: no cover if os.path.exists(os.path.join(abs_master_directory, emergency_file)): logger.error('%s detected in %s, aborting!', emergency_file, abs_master_directory) return 1 if not master_hostname_is_valid(hostname, abs_master_directory, logger): return 1 evidence = buildbot_state.collect_evidence( abs_master_directory, connection_timeout=connection_timeout, builder_filters=builder_filters) evidence['desired_buildbot_state'] = { 'desired_state': desired_state, 'transition_time_utc': transition_time_utc, } state, action_name, action_items = matchlist.execution_list(evidence) execution_list = list( master.convert_action_items_to_cli( action_items, abs_master_directory, enable_gclient=enable_gclient_sync)) logger.info('%s: current state: %s', abs_master_directory, state) logger.info('%s: performing action: %s', abs_master_directory, action_name) if execution_list: if prod: logger.info('production run, executing:') else: logger.info('dry run, not executing:') for cmd in execution_list: logger.info('* %s (in %s)', cmd['cmd'], cmd['cwd']) if prod: try: with daemon.flock(cmd['lockfile']): subprocess.check_call( [str(x) for x in cmd['cmd']], cwd=cmd['cwd'], close_fds=True) except daemon.LockAlreadyLocked: logger.warn(' lock on %s could not be acquired, no action taken.', cmd['lockfile']) else: logger.info('no action to be taken.') return 0
def testDeleteWhenDone(self): """Test that we delete the lockfile when we're done.""" data = {"count": 0} def _mock_unlink(*_args, **_kwargs): data["count"] += 1 self.mock(os, "unlink", _mock_unlink) self._mock_basic_fs_calls() self._set_lock_status() self._set_stat_status() with self._assert_reached() as reached: with daemon.flock("bogus"): reached["yup"] = True self.assertEqual(data["count"], 1)
def run_state_machine_pass(logger, matchlist, abs_master_directory, emergency_file, desired_state, transition_time_utc, enable_gclient_sync, prod, connection_timeout, hostname): # pragma: no cover if os.path.exists(os.path.join(abs_master_directory, emergency_file)): logger.error('%s detected in %s, aborting!', emergency_file, abs_master_directory) return 1 if not master_hostname_is_valid(hostname, abs_master_directory, logger): return 1 evidence = buildbot_state.collect_evidence( abs_master_directory, connection_timeout=connection_timeout) evidence['desired_buildbot_state'] = { 'desired_state': desired_state, 'transition_time_utc': transition_time_utc, } state, action_name, action_items = matchlist.execution_list(evidence) execution_list = list( master.convert_action_items_to_cli(action_items, abs_master_directory, enable_gclient=enable_gclient_sync)) logger.info('%s: current state: %s', abs_master_directory, state) logger.info('%s: performing action: %s', abs_master_directory, action_name) if execution_list: if prod: logger.info('production run, executing:') else: logger.info('dry run, not executing:') for cmd in execution_list: logger.info('* %s (in %s)', cmd['cmd'], cmd['cwd']) if prod: try: with daemon.flock(cmd['lockfile']): subprocess.check_call([str(x) for x in cmd['cmd']], cwd=cmd['cwd'], close_fds=True) except daemon.LockAlreadyLocked: logger.warn( ' lock on %s could not be acquired, no action taken.', cmd['lockfile']) else: logger.info('no action to be taken.') return 0
def start(self): """Writes a state file, returns False if we're already running.""" # Use a flock here to avoid a race condition where two service_managers # start at the same time and both write their own state file. try: with daemon.flock('service_manager.flock', self._state_directory): try: self.get_running_process_state() except ProcessStateError: pass else: return False # already running self._write_state_file(os.getpid()) return True except daemon.LockAlreadyLocked: return False
def flock(lock_file, retries=20, sleep_duration=3): # pragma: no cover logging.debug('Acquiring file lock on %s...', lock_file) i = 0 while True: try: with daemon.flock(lock_file): logging.debug('Lock acquired on %s.', lock_file) try: yield finally: logging.debug('Releasing lock on %s.', lock_file) break except daemon.LockAlreadyLocked: if i == retries - 1: raise FlockTimeoutError() else: logging.debug('Lock on %s busy; sleeping for %d seconds.', lock_file, sleep_duration) i += 1 time.sleep(sleep_duration)