def test_node_lock_acquire_already_owner(self, mock_acquire): mock_acquire.return_value = 'ACTION_XYZ' res = lockm.node_lock_acquire(self.ctx, 'NODE_A', 'ACTION_XYZ') self.assertTrue(res) mock_acquire.assert_called_once_with('NODE_A', 'ACTION_XYZ')
def test_node_lock_acquire_failed(self, mock_acquire, mock_dead): mock_dead.return_value = False mock_acquire.side_effect = ['ACTION_ABC'] res = lockm.node_lock_acquire(self.ctx, 'NODE_A', 'ACTION_XYZ') self.assertFalse(res) mock_acquire.assert_called_once_with('NODE_A', 'ACTION_XYZ')
def test_node_lock_acquire_with_retry(self, mock_acquire, mock_sleep): cfg.CONF.set_override('lock_retry_times', 5, enforce_type=True) mock_acquire.side_effect = ['ACTION_ABC', 'ACTION_ABC', 'ACTION_XYZ'] res = lockm.node_lock_acquire(self.ctx, 'NODE_A', 'ACTION_XYZ') self.assertTrue(res) sleep_calls = [mock.call(cfg.CONF.lock_retry_interval)] mock_sleep.assert_has_calls(sleep_calls * 2) acquire_calls = [mock.call('NODE_A', 'ACTION_XYZ')] mock_acquire.assert_has_calls(acquire_calls * 3)
def test_node_lock_acquire_forced(self, mock_steal, mock_acquire): mock_acquire.side_effect = ['ACTION_ABC', 'ACTION_ABC', 'ACTION_ABC'] mock_steal.return_value = 'ACTION_XY' res = lockm.node_lock_acquire(self.ctx, 'NODE_A', 'ACTION_XY', forced=True) self.assertTrue(res) mock_acquire.assert_called_once_with('NODE_A', 'ACTION_XY') mock_steal.assert_called_once_with('NODE_A', 'ACTION_XY')
def execute(self, **kwargs): """Interface function for action execution. :param dict kwargs: Parameters provided to the action, if any. :returns: A tuple containing the result and the related reason. """ # Since node.cluster_id could be reset to '' during action execution, # we record it here for policy check and cluster lock release. forced = (self.action in [consts.NODE_DELETE, consts.NODE_OPERATION]) saved_cluster_id = self.entity.cluster_id if saved_cluster_id: if self.cause == consts.CAUSE_RPC: res = senlin_lock.cluster_lock_acquire(self.context, self.entity.cluster_id, self.id, self.owner, senlin_lock.NODE_SCOPE, False) if not res: return self.RES_RETRY, 'Failed in locking cluster' try: self.policy_check(self.entity.cluster_id, 'BEFORE') finally: if self.data['status'] != pb.CHECK_OK: # Don't emit message since policy_check should have # done it senlin_lock.cluster_lock_release( saved_cluster_id, self.id, senlin_lock.NODE_SCOPE) return self.RES_ERROR, ('Policy check: ' + self.data['reason']) elif self.cause == consts.CAUSE_DERIVED_LCH: self.policy_check(saved_cluster_id, 'BEFORE') try: res = senlin_lock.node_lock_acquire(self.context, self.entity.id, self.id, self.owner, forced) if not res: res = self.RES_RETRY reason = 'Failed in locking node' else: res, reason = self._execute() if saved_cluster_id and self.cause == consts.CAUSE_RPC: self.policy_check(saved_cluster_id, 'AFTER') if self.data['status'] != pb.CHECK_OK: res = self.RES_ERROR reason = 'Policy check: ' + self.data['reason'] finally: senlin_lock.node_lock_release(self.entity.id, self.id) if saved_cluster_id and self.cause == consts.CAUSE_RPC: senlin_lock.cluster_lock_release(saved_cluster_id, self.id, senlin_lock.NODE_SCOPE) return res, reason
def execute(self, **kwargs): """Interface function for action execution. :param dict kwargs: Parameters provided to the action, if any. :returns: A tuple containing the result and the related reason. """ # Since node.cluster_id could be reset to None in _execute progress, # we record it here for policy check and cluster lock release. saved_cluster_id = self.node.cluster_id if self.node.cluster_id: if self.cause == base.CAUSE_RPC: res = senlin_lock.cluster_lock_acquire( self.context, self.node.cluster_id, self.id, self.owner, senlin_lock.NODE_SCOPE, False) if not res: return self.RES_RETRY, _('Failed in locking cluster') self.policy_check(self.node.cluster_id, 'BEFORE') if self.data['status'] != policy_mod.CHECK_OK: # Don't emit message here since policy_check should have # done it if self.cause == base.CAUSE_RPC: senlin_lock.cluster_lock_release( self.node.cluster_id, self.id, senlin_lock.NODE_SCOPE) return self.RES_ERROR, 'Policy check: ' + self.data['reason'] reason = '' try: res = senlin_lock.node_lock_acquire(self.context, self.node.id, self.id, self.owner, False) if not res: res = self.RES_ERROR reason = _('Failed in locking node') else: res, reason = self._execute() if res == self.RES_OK and saved_cluster_id is not None: self.policy_check(saved_cluster_id, 'AFTER') if self.data['status'] != policy_mod.CHECK_OK: res = self.RES_ERROR reason = 'Policy check: ' + self.data['reason'] else: res = self.RES_OK finally: senlin_lock.node_lock_release(self.node.id, self.id) if saved_cluster_id is not None and self.cause == base.CAUSE_RPC: senlin_lock.cluster_lock_release(saved_cluster_id, self.id, senlin_lock.NODE_SCOPE) return res, reason
def test_node_lock_acquire_max_retries(self, mock_acquire, mock_sleep): cfg.CONF.set_override('lock_retry_times', 2) mock_acquire.side_effect = [ 'ACTION_ABC', 'ACTION_ABC', 'ACTION_ABC', 'ACTION_XYZ' ] res = lockm.node_lock_acquire('NODE_A', 'ACTION_XYZ') self.assertFalse(res) sleep_calls = [mock.call(cfg.CONF.lock_retry_interval)] mock_sleep.assert_has_calls(sleep_calls * 2) self.assertEqual(2, mock_sleep.call_count) acquire_calls = [mock.call('NODE_A', 'ACTION_XYZ')] mock_acquire.assert_has_calls(acquire_calls * 3)
def test_node_lock_acquire_dead_owner(self, mock_steal, mock_acquire, mock_action_fail, mock_dead): mock_dead.return_value = True mock_acquire.side_effect = ['ACTION_ABC'] mock_steal.return_value = 'ACTION_XYZ' res = lockm.node_lock_acquire(self.ctx, 'NODE_A', 'ACTION_XYZ', 'NEW_ENGINE') self.assertTrue(res) mock_acquire.assert_called_once_with('NODE_A', 'ACTION_XYZ') mock_steal.assert_called_once_with('NODE_A', 'ACTION_XYZ') mock_action_fail.assert_called_once_with( self.ctx, 'ACTION_ABC', mock.ANY, 'Engine died when executing this action.')
def test_node_lock_acquire_steal_failed(self, mock_steal, mock_acquire, mock_sleep): cfg.CONF.set_override('lock_retry_times', 2) mock_acquire.side_effect = ['ACTION_ABC', 'ACTION_ABC', 'ACTION_ABC'] mock_steal.return_value = None res = lockm.node_lock_acquire('NODE_A', 'ACTION_XY', forced=True) self.assertFalse(res) sleep_calls = [mock.call(cfg.CONF.lock_retry_interval)] mock_sleep.assert_has_calls(sleep_calls * 2) self.assertEqual(2, mock_sleep.call_count) acquire_calls = [mock.call('NODE_A', 'ACTION_XY')] mock_acquire.assert_has_calls(acquire_calls * 3) mock_steal.assert_called_once_with('NODE_A', 'ACTION_XY')
def test_node_lock_acquire_dead_owner(self, mock_steal, mock_acquire, mock_action_fail, mock_dead): mock_dead.return_value = True mock_acquire.side_effect = ['ACTION_ABC', 'ACTION_ABC', 'ACTION_ABC', 'ACTION_ABC'] mock_steal.return_value = 'ACTION_XYZ' res = lockm.node_lock_acquire(self.ctx, 'NODE_A', 'ACTION_XYZ', 'NEW_ENGINE') self.assertTrue(res) self.assertEqual(4, mock_acquire.call_count) mock_steal.assert_called_once_with('NODE_A', 'ACTION_XYZ') mock_action_fail.assert_called_once_with( self.ctx, 'ACTION_ABC', mock.ANY, 'Engine died when executing this action.')
def execute(self, **kwargs): # Since node.cluster_id could be reset to None in _execute progress, # we record it here for policy check and cluster lock release. saved_cluster_id = self.node.cluster_id if self.node.cluster_id: if self.cause == base.CAUSE_RPC: res = senlin_lock.cluster_lock_acquire( self.node.cluster_id, self.id, senlin_lock.NODE_SCOPE, False) if not res: return self.RES_RETRY, _('Failed in locking cluster') self.policy_check(self.node.cluster_id, 'BEFORE') if self.data['status'] != policy_mod.CHECK_OK: # Don't emit message here since policy_check should have # done it if self.cause == base.CAUSE_RPC: senlin_lock.cluster_lock_release( self.node.cluster_id, self.id, senlin_lock.NODE_SCOPE) return self.RES_ERROR, 'Policy check: ' + self.data['reason'] reason = '' try: res = senlin_lock.node_lock_acquire(self.node.id, self.id, False) if not res: res = self.RES_ERROR reason = _('Failed in locking node') else: res, reason = self._execute() if res == self.RES_OK and saved_cluster_id is not None: self.policy_check(saved_cluster_id, 'AFTER') if self.data['status'] != policy_mod.CHECK_OK: res = self.RES_ERROR reason = 'Policy check: ' + self.data['reason'] else: res = self.RES_OK finally: senlin_lock.node_lock_release(self.node.id, self.id) if saved_cluster_id is not None and self.cause == base.CAUSE_RPC: senlin_lock.cluster_lock_release(saved_cluster_id, self.id, senlin_lock.NODE_SCOPE) return res, reason