def test_over_threshold(self): policy_db = Policy.get_by_ref("wolfpack.action-1.concurrency.attr") self.assertGreater(policy_db.parameters["threshold"], 0) self.assertIn("actionstr", policy_db.parameters["attributes"]) for i in range(0, policy_db.parameters["threshold"]): liveaction = LiveActionDB(action="wolfpack.action-1", parameters={"actionstr": "fu"}) action_service.request(liveaction) scheduled = LiveAction.get_all() self.assertEqual(len(scheduled), policy_db.parameters["threshold"]) for liveaction in scheduled: self.assertIn(liveaction.status, SCHEDULED_STATES) # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action="wolfpack.action-1", parameters={"actionstr": "fu"}) liveaction, _ = action_service.request(liveaction) delayed = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(delayed.status, action_constants.LIVEACTION_STATUS_DELAYED) # Execution is expected to be scheduled since concurrency threshold is not reached. # The execution with actionstr "fu" is over the threshold but actionstr "bar" is not. liveaction = LiveActionDB(action="wolfpack.action-1", parameters={"actionstr": "bar"}) liveaction, _ = action_service.request(liveaction) liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertIn(liveaction.status, SCHEDULED_STATES) # Mark one of the execution as completed. action_service.update_status(scheduled[0], action_constants.LIVEACTION_STATUS_SUCCEEDED, publish=True) # Execution is expected to be rescheduled. liveaction = LiveAction.get_by_id(str(delayed.id)) self.assertIn(liveaction.status, SCHEDULED_STATES)
def test_resume_option_reset_tasks(self): MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH) liveaction1 = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS) liveaction1, execution1 = action_service.request(liveaction1) self.assertFalse(MistralRunner.resume.called) # Rerun the execution. context = { 're-run': { 'ref': execution1.id, 'tasks': ['x', 'y'], 'reset': ['y'] } } liveaction2 = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS, context=context) liveaction2, execution2 = action_service.request(liveaction2) liveaction2 = LiveAction.get_by_id(str(liveaction2.id)) self.assertEqual(liveaction2.status, action_constants.LIVEACTION_STATUS_RUNNING) task_specs = { 'x': { 'reset': False }, 'y': { 'reset': True } } MistralRunner.resume.assert_called_with(ex_ref=execution1, task_specs=task_specs)
def test_over_threshold_delay_executions(self): policy_db = Policy.get_by_ref('wolfpack.action-1.concurrency.attr') self.assertGreater(policy_db.parameters['threshold'], 0) self.assertIn('actionstr', policy_db.parameters['attributes']) for i in range(0, policy_db.parameters['threshold']): liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'fu'}) action_service.request(liveaction) scheduled = [item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES] self.assertEqual(len(scheduled), policy_db.parameters['threshold']) # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'fu'}) liveaction, _ = action_service.request(liveaction) delayed = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(delayed.status, action_constants.LIVEACTION_STATUS_DELAYED) # Execution is expected to be scheduled since concurrency threshold is not reached. # The execution with actionstr "fu" is over the threshold but actionstr "bar" is not. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'bar'}) liveaction, _ = action_service.request(liveaction) liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertIn(liveaction.status, SCHEDULED_STATES) # Mark one of the execution as completed. action_service.update_status( scheduled[0], action_constants.LIVEACTION_STATUS_SUCCEEDED, publish=True) # Execution is expected to be rescheduled. liveaction = LiveAction.get_by_id(str(delayed.id)) self.assertIn(liveaction.status, SCHEDULED_STATES)
def test_trace_tag_resuse(self): self.traceable_liveaction['context']['trace_context'] = {'trace_tag': 'blank space'} action_services.request(self.traceable_liveaction) # Let's use same trace tag again and we should see two trace objects in db. action_services.request(self.traceable_liveaction) traces = Trace.query(**{'trace_tag': 'blank space'}) self.assertEqual(len(traces), 2)
def test_over_threshold_cancel_executions(self): policy_db = Policy.get_by_ref('wolfpack.action-2.concurrency.cancel') self.assertEqual(policy_db.parameters['action'], 'cancel') self.assertGreater(policy_db.parameters['threshold'], 0) for i in range(0, policy_db.parameters['threshold']): liveaction = LiveActionDB(action='wolfpack.action-2', parameters={'actionstr': 'foo'}) action_service.request(liveaction) scheduled = [item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES] self.assertEqual(len(scheduled), policy_db.parameters['threshold']) # duplicate executions caused by accidental publishing of state in the concurrency policies. # num_state_changes = len(scheduled) * len(['requested', 'scheduled', 'running']) expected_num_exec = len(scheduled) expected_num_pubs = expected_num_exec * 3 self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Execution is expected to be canceled since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-2', parameters={'actionstr': 'foo'}) liveaction, _ = action_service.request(liveaction) expected_num_exec += 0 # This request will not be scheduled for execution. expected_num_pubs += 1 # Tally requested state. # Assert the canceling state is being published. calls = [call(liveaction, action_constants.LIVEACTION_STATUS_CANCELING)] LiveActionPublisher.publish_state.assert_has_calls(calls) expected_num_pubs += 2 # Tally canceling and canceled state changes. # Assert the action is canceled. liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_CANCELED) self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count)
def test_disabled_policy_not_applied_on_pre_run(self, mock_policies): scheduler_worker = scheduler.get_scheduler() ########## # First test a scenario where policy is enabled ########## self.assertTrue(self.policy_db.enabled) # Post run hasn't been called yet, call count should be 0 self.assertEqual(mock_policies.get_driver.call_count, 0) liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'}) live_action_db, execution_db = action_service.request(liveaction) scheduler_worker._apply_pre_run_policies(liveaction_db=live_action_db) # Ony policy has been applied so call count should be 1 self.assertEqual(mock_policies.get_driver.call_count, 1) ########## # Now a scenaro with disabled policy ########## mock_policies.get_driver.call_count = 0 self.policy_db.enabled = False self.policy_db = Policy.add_or_update(self.policy_db) self.assertFalse(self.policy_db.enabled) self.assertEqual(mock_policies.get_driver.call_count, 0) liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'}) live_action_db, execution_db = action_service.request(liveaction) scheduler_worker._apply_pre_run_policies(liveaction_db=live_action_db) # Policy is disabled so call_count should stay the same as before as no policies have been # applied self.assertEqual(mock_policies.get_driver.call_count, 0)
def test_cancel_subworkflow_action(self): liveaction1 = LiveActionDB(action=WF2_NAME, parameters=ACTION_PARAMS) liveaction1, execution1 = action_service.request(liveaction1) liveaction1 = LiveAction.get_by_id(str(liveaction1.id)) self.assertEqual(liveaction1.status, action_constants.LIVEACTION_STATUS_RUNNING) liveaction2 = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS) liveaction2, execution2 = action_service.request(liveaction2) liveaction2 = LiveAction.get_by_id(str(liveaction2.id)) self.assertEqual(liveaction2.status, action_constants.LIVEACTION_STATUS_RUNNING) # Mock the children of the parent execution to make this # test case has subworkflow execution. with mock.patch.object( ActionExecutionDB, 'children', new_callable=mock.PropertyMock) as action_ex_children_mock: action_ex_children_mock.return_value = [execution2.id] mistral_context = liveaction1.context.get('mistral', None) self.assertIsNotNone(mistral_context) self.assertEqual(mistral_context['execution_id'], WF2_EXEC.get('id')) self.assertEqual(mistral_context['workflow_name'], WF2_EXEC.get('workflow_name')) requester = cfg.CONF.system_user.user liveaction1, execution1 = action_service.request_cancellation(liveaction1, requester) self.assertTrue(executions.ExecutionManager.update.called) self.assertEqual(executions.ExecutionManager.update.call_count, 2) calls = [ mock.call(WF2_EXEC.get('id'), 'CANCELLED'), mock.call(WF1_EXEC.get('id'), 'CANCELLED') ] executions.ExecutionManager.update.assert_has_calls(calls, any_order=False)
def test_over_threshold(self): policy_db = Policy.get_by_ref('wolfpack.action-1.concurrency') self.assertGreater(policy_db.parameters['threshold'], 0) for i in range(0, policy_db.parameters['threshold']): liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'}) action_service.request(liveaction) scheduled = LiveAction.get_all() self.assertEqual(len(scheduled), policy_db.parameters['threshold']) for liveaction in scheduled: self.assertIn(liveaction.status, SCHEDULED_STATES) # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'}) liveaction, _ = action_service.request(liveaction) liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_DELAYED) # Mark one of the execution as completed. action_service.update_status( scheduled[0], action_constants.LIVEACTION_STATUS_SUCCEEDED, publish=True) # Execution is expected to be rescheduled. liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertIn(liveaction.status, SCHEDULED_STATES)
def test_on_cancellation(self): policy_db = Policy.get_by_ref('wolfpack.action-1.concurrency') self.assertGreater(policy_db.parameters['threshold'], 0) # Launch action executions until the expected threshold is reached. for i in range(0, policy_db.parameters['threshold']): parameters = {'actionstr': 'foo-' + str(i)} liveaction = LiveActionDB(action='wolfpack.action-1', parameters=parameters) action_service.request(liveaction) # Run the scheduler to schedule action executions. self._process_scheduling_queue() # Check the number of action executions in scheduled state. scheduled = [item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES] self.assertEqual(len(scheduled), policy_db.parameters['threshold']) # duplicate executions caused by accidental publishing of state in the concurrency policies. # num_state_changes = len(scheduled) * len(['requested', 'scheduled', 'running']) expected_num_exec = len(scheduled) expected_num_pubs = expected_num_exec * 3 self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'}) liveaction, _ = action_service.request(liveaction) expected_num_pubs += 1 # Tally requested state. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() # Since states are being processed async, wait for the liveaction to go into delayed state. liveaction = self._wait_on_status(liveaction, action_constants.LIVEACTION_STATUS_DELAYED) expected_num_exec += 0 # This request will not be scheduled for execution. expected_num_pubs += 0 # The delayed status change should not be published. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Cancel execution. action_service.request_cancellation(scheduled[0], 'stanley') expected_num_pubs += 2 # Tally the canceling and canceled states. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() # Once capacity freed up, the delayed execution is published as requested again. expected_num_exec += 1 # This request is expected to be executed. expected_num_pubs += 2 # Tally scheduled and running state. # Execution is expected to be rescheduled. liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertIn(liveaction.status, SCHEDULED_STATES) self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count)
def test_request_override_runner_parameter_mutable(self): parameters = {'hosts': 'localhost', 'cmd': 'uname -a'} request = LiveActionDB(action=ACTION_OVR_PARAM_MUTABLE_REF, parameters=parameters) request, _ = action_service.request(request) parameters = {'hosts': 'localhost', 'cmd': 'uname -a', 'sudo': True} request = LiveActionDB(action=ACTION_OVR_PARAM_MUTABLE_REF, parameters=parameters) request, _ = action_service.request(request)
def test_request_override_runner_parameter(self): parameters = {'hosts': '127.0.0.1', 'cmd': 'uname -a'} request = LiveActionDB(action=ACTION_OVR_PARAM_REF, parameters=parameters) request, _ = action_service.request(request) parameters = {'hosts': '127.0.0.1', 'cmd': 'uname -a', 'sudo': False} request = LiveActionDB(action=ACTION_OVR_PARAM_REF, parameters=parameters) request, _ = action_service.request(request)
def test_over_threshold_delay_executions(self): policy_db = Policy.get_by_ref('wolfpack.action-1.concurrency.attr') self.assertGreater(policy_db.parameters['threshold'], 0) self.assertIn('actionstr', policy_db.parameters['attributes']) for i in range(0, policy_db.parameters['threshold']): liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'fu'}) action_service.request(liveaction) scheduled = [item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES] self.assertEqual(len(scheduled), policy_db.parameters['threshold']) # Assert the correct number of published states and action executions. This is to avoid # duplicate executions caused by accidental publishing of state in the concurrency policies. # num_state_changes = len(scheduled) * len(['requested', 'scheduled', 'running']) expected_num_exec = len(scheduled) expected_num_pubs = expected_num_exec * 3 self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'fu'}) liveaction, _ = action_service.request(liveaction) expected_num_pubs += 1 # Tally requested state. # Assert the action is delayed. delayed = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(delayed.status, action_constants.LIVEACTION_STATUS_DELAYED) self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Execution is expected to be scheduled since concurrency threshold is not reached. # The execution with actionstr "fu" is over the threshold but actionstr "bar" is not. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'bar'}) liveaction, _ = action_service.request(liveaction) expected_num_exec += 1 # This request is expected to be executed. expected_num_pubs += 3 # Tally requested, scheduled, and running states. liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertIn(liveaction.status, SCHEDULED_STATES) self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Mark one of the execution as completed. action_service.update_status( scheduled[0], action_constants.LIVEACTION_STATUS_SUCCEEDED, publish=True) expected_num_pubs += 1 # Tally succeeded state. # Once capacity freed up, the delayed execution is published as requested again. expected_num_exec += 1 # The delayed request is expected to be executed. expected_num_pubs += 3 # Tally requested, scheduled, and running state. # Execution is expected to be rescheduled. liveaction = LiveAction.get_by_id(str(delayed.id)) self.assertIn(liveaction.status, SCHEDULED_STATES) self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count)
def test_over_threshold_cancel_executions(self): policy_db = Policy.get_by_ref('wolfpack.action-2.concurrency.attr.cancel') self.assertEqual(policy_db.parameters['action'], 'cancel') self.assertGreater(policy_db.parameters['threshold'], 0) self.assertIn('actionstr', policy_db.parameters['attributes']) for i in range(0, policy_db.parameters['threshold']): liveaction = LiveActionDB(action='wolfpack.action-2', parameters={'actionstr': 'fu'}) action_service.request(liveaction) # Since states are being processed asynchronously, wait for the # liveactions to go into scheduled states. for i in range(0, 100): eventlet.sleep(1) scheduled = [item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES] if len(scheduled) == policy_db.parameters['threshold']: break scheduled = [item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES] self.assertEqual(len(scheduled), policy_db.parameters['threshold']) # Assert the correct number of published states and action executions. This is to avoid # duplicate executions caused by accidental publishing of state in the concurrency policies. # num_state_changes = len(scheduled) * len(['requested', 'scheduled', 'running']) expected_num_exec = len(scheduled) expected_num_pubs = expected_num_exec * 3 self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-2', parameters={'actionstr': 'fu'}) liveaction, _ = action_service.request(liveaction) expected_num_exec += 0 # This request will not be scheduled for execution. expected_num_pubs += 1 # Tally requested state. # Since states are being processed asynchronously, wait for the # liveaction to go into cancel state. for i in range(0, 100): eventlet.sleep(1) liveaction = LiveAction.get_by_id(str(liveaction.id)) if liveaction.status in [ action_constants.LIVEACTION_STATUS_CANCELING, action_constants.LIVEACTION_STATUS_CANCELED]: break # Assert the canceling state is being published. calls = [call(liveaction, action_constants.LIVEACTION_STATUS_CANCELING)] LiveActionPublisher.publish_state.assert_has_calls(calls) expected_num_pubs += 2 # Tally canceling and canceled state changes. # Assert the action is canceled. canceled = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(canceled.status, action_constants.LIVEACTION_STATUS_CANCELED) self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count)
def test_cancel_on_task_action_concurrency_by_attr(self): # Delete other policies in the test pack to avoid conflicts. required_policy = 'mistral_tests.cancel_on_concurrency_by_attr' self._drop_all_other_policies(required_policy) # Get threshold from the policy. policy = Policy.get_by_ref(required_policy) threshold = policy.parameters.get('threshold', 0) self.assertGreater(threshold, 0) params = {'friend': 'grande animalerie'} # Launch instances of the workflow up to threshold. for i in range(0, threshold): liveaction = LiveActionDB(action=WF1_NAME, parameters=params) liveaction, execution1 = action_service.request(liveaction) liveaction = LiveAction.get_by_id(str(liveaction.id)) liveaction = self._wait_on_status( liveaction, action_constants.LIVEACTION_STATUS_RUNNING ) # Check number of running instances running = LiveAction.count( action=WF1_NAME, status=action_constants.LIVEACTION_STATUS_RUNNING, parameters__friend=params['friend']) self.assertEqual(running, threshold) # Mock the mistral runner cancel method to assert cancel is called. mistral_runner_cls = runners.get_runner('mistral-v2').__class__ mock_cancel_return_value = (action_constants.LIVEACTION_STATUS_CANCELING, None, None) mock_cancel = mock.MagicMock(return_value=mock_cancel_return_value) with mock.patch.object(mistral_runner_cls, 'cancel', mock_cancel): # Launch another instance of the workflow with mistral callback defined # to indicate that this is executed under a workflow. callback = { 'source': MISTRAL_RUNNER_NAME, 'url': 'http://127.0.0.1:8989/v2/action_executions/12345' } liveaction2 = LiveActionDB(action=WF1_NAME, parameters=params, callback=callback) liveaction2, execution2 = action_service.request(liveaction2) liveaction2 = LiveAction.get_by_id(str(liveaction2.id)) # Assert cancel has been called. liveaction2 = self._wait_on_status( liveaction2, action_constants.LIVEACTION_STATUS_CANCELING ) mistral_runner_cls.cancel.assert_called_once_with()
def test_resume_unidentified_tasks(self): MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH) liveaction1 = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS) liveaction1, execution1 = action_service.request(liveaction1) # Rerun the execution. context = {"re-run": {"ref": execution1.id, "tasks": ["x"]}} liveaction2 = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS, context=context) liveaction2, execution2 = action_service.request(liveaction2) liveaction2 = LiveAction.get_by_id(str(liveaction2.id)) self.assertEqual(liveaction2.status, action_constants.LIVEACTION_STATUS_FAILED) self.assertIn("Unable to identify", liveaction2.result.get("error"))
def test_resume_option(self): MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH) liveaction1 = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS) liveaction1, execution1 = action_service.request(liveaction1) self.assertFalse(MistralRunner.resume.called) # Rerun the execution. context = {"re-run": {"ref": execution1.id, "tasks": ["x"]}} liveaction2 = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS, context=context) liveaction2, execution2 = action_service.request(liveaction2) liveaction2 = LiveAction.get_by_id(str(liveaction2.id)) self.assertEqual(liveaction2.status, action_constants.LIVEACTION_STATUS_RUNNING) MistralRunner.resume.assert_called_with(execution1, context["re-run"]["tasks"])
def setUp(self): super(PolicyServiceTestCase, self).setUp() params = {'action': 'wolfpack.action-1', 'parameters': {'actionstr': 'foo-last'}} self.lv_ac_db_1 = action_db_models.LiveActionDB(**params) self.lv_ac_db_1, _ = action_service.request(self.lv_ac_db_1) params = {'action': 'wolfpack.action-2', 'parameters': {'actionstr': 'foo-last'}} self.lv_ac_db_2 = action_db_models.LiveActionDB(**params) self.lv_ac_db_2, _ = action_service.request(self.lv_ac_db_2) params = {'action': 'core.local', 'parameters': {'cmd': 'date'}} self.lv_ac_db_3 = action_db_models.LiveActionDB(**params) self.lv_ac_db_3, _ = action_service.request(self.lv_ac_db_3)
def test_trace_provided(self): self.traceable_liveaction['context']['trace_context'] = {'trace_tag': 'OohLaLaLa'} action_services.request(self.traceable_liveaction) traces = Trace.get_all() self.assertEqual(len(traces), 1) self.assertEqual(len(traces[0]['action_executions']), 1) # Let's use existing trace id in trace context. # We shouldn't create new trace object. trace_id = str(traces[0].id) self.traceable_liveaction['context']['trace_context'] = {'id_': trace_id} action_services.request(self.traceable_liveaction) traces = Trace.get_all() self.assertEqual(len(traces), 1) self.assertEqual(len(traces[0]['action_executions']), 2)
def test_retry_on_timeout_max_retries_reached(self): # Verify initial state self.assertSequenceEqual(LiveAction.get_all(), []) self.assertSequenceEqual(ActionExecution.get_all(), []) # Start a mock action which times out liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'}) live_action_db, execution_db = action_service.request(liveaction) live_action_db.status = LIVEACTION_STATUS_TIMED_OUT live_action_db.context['policies'] = {} live_action_db.context['policies']['retry'] = {'retry_count': 2} execution_db.status = LIVEACTION_STATUS_TIMED_OUT LiveAction.add_or_update(live_action_db) ActionExecution.add_or_update(execution_db) # Simulate policy "apply_after" run self.policy.apply_after(target=live_action_db) # Note: There should be no new objects since max retries has been reached live_action_dbs = LiveAction.get_all() action_execution_dbs = ActionExecution.get_all() self.assertEqual(len(live_action_dbs), 1) self.assertEqual(len(action_execution_dbs), 1) self.assertEqual(action_execution_dbs[0].status, LIVEACTION_STATUS_TIMED_OUT)
def test_launch_workflow_with_notifications(self): notify_data = {'on_complete': {'channels': ['slack'], 'message': '"@channel: Action succeeded."', 'data': {}}} MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH) liveaction = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS, notify=notify_data) liveaction, execution = action_service.request(liveaction) liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING) mistral_context = liveaction.context.get('mistral', None) self.assertIsNotNone(mistral_context) self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id')) self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name')) workflow_input = copy.deepcopy(ACTION_PARAMS) workflow_input.update({'count': '3'}) env = { 'st2_execution_id': str(execution.id), 'st2_liveaction_id': str(liveaction.id), '__actions': { 'st2.action': { 'st2_context': { 'endpoint': 'http://0.0.0.0:9101/v1/actionexecutions', 'parent': str(liveaction.id), 'notify': NotificationsHelper.from_model(liveaction.notify), 'skip_notify_tasks': [] } } } } executions.ExecutionManager.create.assert_called_with( WF1_NAME, workflow_input=workflow_input, env=env)
def test_launch_workflow_with_many_workflows(self): MistralRunner.entry_point = mock.PropertyMock(return_value=WF2_YAML_FILE_PATH) liveaction = LiveActionDB(action=WF2_NAME, parameters=ACTION_PARAMS) liveaction, execution = action_service.request(liveaction) liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_FAILED) self.assertIn('Multiple workflows is not supported.', liveaction.result['message'])
def test_launch_workflow_mistral_offline(self): MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH) liveaction = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS) liveaction, execution = action_service.request(liveaction) liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_FAILED) self.assertIn('Failed to connect to mistral', liveaction.result['message'])
def _submit_request(self): context = {'user': USERNAME} parameters = {'hosts': 'localhost', 'cmd': 'uname -a'} request = LiveActionDB(action=ACTION_REF, context=context, parameters=parameters) request, _ = action_service.request(request) execution = action_db.get_liveaction_by_id(str(request.id)) return request, execution
def test_launch_workflow_with_auth(self): MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH) liveaction = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS, context=ACTION_CONTEXT) liveaction, execution = action_service.request(liveaction) liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING) mistral_context = liveaction.context.get('mistral', None) self.assertIsNotNone(mistral_context) self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id')) self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name')) workflow_input = copy.deepcopy(ACTION_PARAMS) workflow_input.update({'count': '3'}) env = { 'st2_execution_id': str(execution.id), 'st2_liveaction_id': str(liveaction.id), '__actions': { 'st2.action': { 'st2_context': { 'auth_token': TOKEN_DB.token, 'endpoint': 'http://0.0.0.0:9101/v1/actionexecutions', 'parent': str(liveaction.id), 'notify': {}, 'skip_notify_tasks': [] } } } } executions.ExecutionManager.create.assert_called_with( WF1_NAME, workflow_input=workflow_input, env=env)
def _schedule_execution(self, action_alias_db, params, notify, context): action_ref = action_alias_db.action_ref action_db = action_utils.get_action_by_ref(action_ref) if not action_db: raise StackStormDBObjectNotFoundError('Action with ref "%s" not found ' % (action_ref)) assert_request_user_has_resource_db_permission(request=pecan.request, resource_db=action_db, permission_type=PermissionType.ACTION_EXECUTE) try: # prior to shipping off the params cast them to the right type. params = action_param_utils.cast_params(action_ref=action_alias_db.action_ref, params=params, cast_overrides=CAST_OVERRIDES) if not context: context = { 'action_alias_ref': reference.get_ref_from_model(action_alias_db), 'user': get_system_username() } liveaction = LiveActionDB(action=action_alias_db.action_ref, context=context, parameters=params, notify=notify) _, action_execution_db = action_service.request(liveaction) return ActionExecutionAPI.from_model(action_execution_db) except ValueError as e: LOG.exception('Unable to execute action.') pecan.abort(http_client.BAD_REQUEST, str(e)) except jsonschema.ValidationError as e: LOG.exception('Unable to execute action. Parameter validation failed.') pecan.abort(http_client.BAD_REQUEST, str(e)) except Exception as e: LOG.exception('Unable to execute action. Unexpected error encountered.') pecan.abort(http_client.INTERNAL_SERVER_ERROR, str(e))
def test_adherence_to_output_schema(self): wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'sequential_with_schema.yaml') wf_input = {'who': 'Thanos'} lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'], parameters=wf_input) lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) wf_ex_dbs = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id)) wf_ex_db = wf_ex_dbs[0] query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'task1'} tk1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk1_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(tk1_ex_db.id))[0] wf_svc.handle_action_execution_completion(tk1_ac_ex_db) tk1_ex_db = wf_db_access.TaskExecution.get_by_id(tk1_ex_db.id) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'task2'} tk2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk2_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(tk2_ex_db.id))[0] wf_svc.handle_action_execution_completion(tk2_ac_ex_db) tk2_ex_db = wf_db_access.TaskExecution.get_by_id(tk2_ex_db.id) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'task3'} tk3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk3_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(tk3_ex_db.id))[0] wf_svc.handle_action_execution_completion(tk3_ac_ex_db) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id)) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
def test_no_retry_on_workflow_task(self): # Verify initial state self.assertSequenceEqual(LiveAction.get_all(), []) self.assertSequenceEqual(ActionExecution.get_all(), []) # Start a mock action which times out live_action_db = LiveActionDB( action='wolfpack.action-1', parameters={'actionstr': 'foo'}, context={'parent': {'execution_id': 'abcde'}} ) live_action_db, execution_db = action_service.request(live_action_db) live_action_db = LiveAction.get_by_id(str(live_action_db.id)) self.assertEqual(live_action_db.status, LIVEACTION_STATUS_REQUESTED) # Expire the workflow instance. live_action_db.status = LIVEACTION_STATUS_TIMED_OUT live_action_db.context['policies'] = {} execution_db.status = LIVEACTION_STATUS_TIMED_OUT LiveAction.add_or_update(live_action_db) ActionExecution.add_or_update(execution_db) # Simulate policy "apply_after" run self.policy.apply_after(target=live_action_db) # Note: There should be no new objects since live action is under the context of a workflow. live_action_dbs = LiveAction.get_all() action_execution_dbs = ActionExecution.get_all() self.assertEqual(len(live_action_dbs), 1) self.assertEqual(len(action_execution_dbs), 1) self.assertEqual(action_execution_dbs[0].status, LIVEACTION_STATUS_TIMED_OUT)
def test_no_retry_on_non_applicable_statuses(self): # Verify initial state self.assertSequenceEqual(LiveAction.get_all(), []) self.assertSequenceEqual(ActionExecution.get_all(), []) # Start a mock action in various statuses in which we shouldn't retry non_retry_statuses = [ LIVEACTION_STATUS_REQUESTED, LIVEACTION_STATUS_SCHEDULED, LIVEACTION_STATUS_DELAYED, LIVEACTION_STATUS_CANCELING, LIVEACTION_STATUS_CANCELED, ] action_ref = 'wolfpack.action-1' for status in non_retry_statuses: liveaction = LiveActionDB(action=action_ref, parameters={'actionstr': 'foo'}) live_action_db, execution_db = action_service.request(liveaction) live_action_db.status = status execution_db.status = status LiveAction.add_or_update(live_action_db) ActionExecution.add_or_update(execution_db) # Simulate policy "apply_after" run self.policy.apply_after(target=live_action_db) # None of the actions should have been retried live_action_dbs = LiveAction.get_all() action_execution_dbs = ActionExecution.get_all() self.assertEqual(len(live_action_dbs), len(non_retry_statuses)) self.assertEqual(len(action_execution_dbs), len(non_retry_statuses))
def test_launch_workbook_with_many_workflows_no_default(self): MistralRunner.entry_point = mock.PropertyMock(return_value=WB3_YAML_FILE_PATH) liveaction = LiveActionDB(action=WB3_NAME, parameters=ACTION_PARAMS) liveaction, execution = action_service.request(liveaction) liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_FAILED) self.assertIn('Default workflow cannot be determined.', liveaction.result['message'])
def test_chained_executions(self): liveaction = LiveActionDB(action='core.chain') liveaction, _ = action_service.request(liveaction) liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_FAILED) execution = self._get_action_execution(liveaction__id=str(liveaction.id), raise_exception=True) action = action_utils.get_action_by_ref('core.chain') self.assertDictEqual(execution.action, vars(ActionAPI.from_model(action))) runner = RunnerType.get_by_name(action.runner_type['name']) self.assertDictEqual(execution.runner, vars(RunnerTypeAPI.from_model(runner))) liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(execution.start_timestamp, liveaction.start_timestamp) self.assertEqual(execution.end_timestamp, liveaction.end_timestamp) self.assertEqual(execution.result, liveaction.result) self.assertEqual(execution.status, liveaction.status) self.assertEqual(execution.context, liveaction.context) self.assertEqual(execution.liveaction['callback'], liveaction.callback) self.assertEqual(execution.liveaction['action'], liveaction.action) self.assertGreater(len(execution.children), 0) for child in execution.children: record = ActionExecution.get(id=child, raise_exception=True) self.assertEqual(record.parent, str(execution.id)) self.assertEqual(record.action['name'], 'local') self.assertEqual(record.runner['name'], 'run-local')
def test_run_workflow_with_action_less_tasks(self): wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'action-less-tasks.yaml') wf_input = {'name': 'Thanos'} lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'], parameters=wf_input) lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db) # Assert action execution is running. lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result) wf_ex_db = wf_db_access.WorkflowExecution.query( action_execution=str(ac_ex_db.id))[0] self.assertEqual(wf_ex_db.status, ac_const.LIVEACTION_STATUS_RUNNING) # Assert task1 is already completed. query_filters = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task1' } tk1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk1_ac_ex_dbs = ex_db_access.ActionExecution.query( task_execution=str(tk1_ex_db.id)) self.assertEqual(len(tk1_ac_ex_dbs), 0) self.assertEqual(tk1_ex_db.status, wf_statuses.SUCCEEDED) # Assert task2 is already completed. query_filters = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task2' } tk2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk2_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(tk2_ex_db.id))[0] tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id( tk2_ac_ex_db.liveaction['id']) self.assertEqual(tk2_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Manually handle action execution completion. wf_svc.handle_action_execution_completion(tk2_ac_ex_db) # Assert task3 is already completed. query_filters = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task3' } tk3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk3_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(tk3_ex_db.id))[0] tk3_lv_ac_db = lv_db_access.LiveAction.get_by_id( tk3_ac_ex_db.liveaction['id']) self.assertEqual(tk3_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Manually handle action execution completion. wf_svc.handle_action_execution_completion(tk3_ac_ex_db) # Assert task4 is already completed. query_filters = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task4' } tk4_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk4_ac_ex_dbs = ex_db_access.ActionExecution.query( task_execution=str(tk4_ex_db.id)) self.assertEqual(len(tk4_ac_ex_dbs), 0) self.assertEqual(tk4_ex_db.status, wf_statuses.SUCCEEDED) # Assert task5 is already completed. query_filters = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task5' } tk5_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk5_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(tk5_ex_db.id))[0] tk5_lv_ac_db = lv_db_access.LiveAction.get_by_id( tk5_ac_ex_db.liveaction['id']) self.assertEqual(tk5_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Manually handle action execution completion. wf_svc.handle_action_execution_completion(tk5_ac_ex_db) # Assert workflow is completed. wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.SUCCEEDED) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id)) self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Check workflow output. expected_output = { 'greeting': '%s, All your base are belong to us!' % wf_input['name'] } expected_output['greeting'] = expected_output['greeting'].upper() self.assertDictEqual(wf_ex_db.output, expected_output) # Check liveaction and action execution result. expected_result = {'output': expected_output} self.assertDictEqual(lv_ac_db.result, expected_result) self.assertDictEqual(ac_ex_db.result, expected_result)
def test_handle_action_execution_completion(self): wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'subworkflow.yaml') lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result) # Identify the records for the main workflow. wf_ex_db = wf_db_access.WorkflowExecution.query( action_execution=str(ac_ex_db.id))[0] tk_ex_dbs = wf_db_access.TaskExecution.query( workflow_execution=str(wf_ex_db.id)) self.assertEqual(len(tk_ex_dbs), 1) # Identify the records for the tasks. t1_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(tk_ex_dbs[0].id))[0] t1_wf_ex_db = wf_db_access.WorkflowExecution.query( action_execution=str(t1_ac_ex_db.id))[0] self.assertEqual(t1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_RUNNING) self.assertEqual(t1_wf_ex_db.status, wf_statuses.RUNNING) # Manually notify action execution completion for the tasks. # Assert policies are not applied in the notifier. t1_t1_ex_db = wf_db_access.TaskExecution.query( workflow_execution=str(t1_wf_ex_db.id))[0] t1_t1_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(t1_t1_ex_db.id))[0] notifier.get_notifier().process(t1_t1_ac_ex_db) self.assertFalse(pc_svc.apply_post_run_policies.called) t1_tk_ex_dbs = wf_db_access.TaskExecution.query( workflow_execution=str(t1_wf_ex_db.id)) self.assertEqual(len(t1_tk_ex_dbs), 1) workflows.get_engine().process(t1_t1_ac_ex_db) self.assertTrue(pc_svc.apply_post_run_policies.called) pc_svc.apply_post_run_policies.reset_mock() t1_t2_ex_db = wf_db_access.TaskExecution.query( workflow_execution=str(t1_wf_ex_db.id))[1] t1_t2_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(t1_t2_ex_db.id))[0] notifier.get_notifier().process(t1_t2_ac_ex_db) self.assertFalse(pc_svc.apply_post_run_policies.called) t1_tk_ex_dbs = wf_db_access.TaskExecution.query( workflow_execution=str(t1_wf_ex_db.id)) self.assertEqual(len(t1_tk_ex_dbs), 2) workflows.get_engine().process(t1_t2_ac_ex_db) self.assertTrue(pc_svc.apply_post_run_policies.called) pc_svc.apply_post_run_policies.reset_mock() t1_t3_ex_db = wf_db_access.TaskExecution.query( workflow_execution=str(t1_wf_ex_db.id))[2] t1_t3_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(t1_t3_ex_db.id))[0] notifier.get_notifier().process(t1_t3_ac_ex_db) self.assertFalse(pc_svc.apply_post_run_policies.called) t1_tk_ex_dbs = wf_db_access.TaskExecution.query( workflow_execution=str(t1_wf_ex_db.id)) self.assertEqual(len(t1_tk_ex_dbs), 3) workflows.get_engine().process(t1_t3_ac_ex_db) self.assertTrue(pc_svc.apply_post_run_policies.called) pc_svc.apply_post_run_policies.reset_mock() t1_ac_ex_db = ex_db_access.ActionExecution.get_by_id(t1_ac_ex_db.id) notifier.get_notifier().process(t1_ac_ex_db) self.assertFalse(pc_svc.apply_post_run_policies.called) tk_ex_dbs = wf_db_access.TaskExecution.query( workflow_execution=str(wf_ex_db.id)) self.assertEqual(len(tk_ex_dbs), 1) workflows.get_engine().process(t1_ac_ex_db) self.assertTrue(pc_svc.apply_post_run_policies.called) pc_svc.apply_post_run_policies.reset_mock() t2_ex_db_qry = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task2' } t2_ex_db = wf_db_access.TaskExecution.query(**t2_ex_db_qry)[0] t2_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(t2_ex_db.id))[0] self.assertEqual(t2_ac_ex_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) notifier.get_notifier().process(t2_ac_ex_db) self.assertFalse(pc_svc.apply_post_run_policies.called) tk_ex_dbs = wf_db_access.TaskExecution.query( workflow_execution=str(wf_ex_db.id)) self.assertEqual(len(tk_ex_dbs), 2) workflows.get_engine().process(t2_ac_ex_db) self.assertTrue(pc_svc.apply_post_run_policies.called) pc_svc.apply_post_run_policies.reset_mock() # Assert the main workflow is completed. lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
def test_action_context_api_user(self): wf_name = 'subworkflow-default-value-from-action-context' wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, wf_name + '.yaml') lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'], context={'api_user': '******'}) lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result) # Identify the records for the main workflow. wf_ex_db = wf_db_access.WorkflowExecution.query( action_execution=str(ac_ex_db.id))[0] t1_ex_db = wf_db_access.TaskExecution.query( workflow_execution=str(wf_ex_db.id))[0] t1_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(t1_ex_db.id))[0] t1_wf_ex_db = wf_db_access.WorkflowExecution.query( action_execution=str(t1_ac_ex_db.id))[0] self.assertEqual(t1_ex_db.status, wf_states.RUNNING) self.assertEqual(t1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_RUNNING) self.assertEqual(t1_wf_ex_db.status, wf_states.RUNNING) # Complete subworkflow under task1. query_filters = { 'workflow_execution': str(t1_wf_ex_db.id), 'task_id': 'task1' } t1_t1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] t1_t1_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(t1_t1_ex_db.id))[0] wf_svc.handle_action_execution_completion(t1_t1_ac_ex_db) query_filters = { 'workflow_execution': str(t1_wf_ex_db.id), 'task_id': 'task2' } t1_t2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] t1_t2_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(t1_t2_ex_db.id))[0] wf_svc.handle_action_execution_completion(t1_t2_ac_ex_db) query_filters = { 'workflow_execution': str(t1_wf_ex_db.id), 'task_id': 'task3' } t1_t3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] t1_t3_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(t1_t3_ex_db.id))[0] wf_svc.handle_action_execution_completion(t1_t3_ac_ex_db) t1_wf_ex_db = wf_db_access.WorkflowExecution.get_by_id( str(t1_wf_ex_db.id)) t1_ac_ex_db = ex_db_access.ActionExecution.get_by_id( str(t1_ac_ex_db.id)) self.assertEqual(t1_wf_ex_db.status, wf_states.SUCCEEDED) self.assertEqual(t1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Complete task1 and main workflow. wf_svc.handle_action_execution_completion(t1_ac_ex_db) t1_ex_db = wf_db_access.TaskExecution.get_by_id(str(t1_ex_db.id)) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(str(wf_ex_db.id)) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(t1_ex_db.status, wf_states.SUCCEEDED) self.assertEqual(wf_ex_db.status, wf_states.SUCCEEDED) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Check result. expected_result = { 'output': { 'msg': 'Thanos, All your base are belong to us!' } } self.assertDictEqual(lv_ac_db.result, expected_result)
def test_over_threshold_delay_executions(self): policy_db = Policy.get_by_ref('wolfpack.action-1.concurrency.attr') self.assertGreater(policy_db.parameters['threshold'], 0) self.assertIn('actionstr', policy_db.parameters['attributes']) for i in range(0, policy_db.parameters['threshold']): liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'fu'}) action_service.request(liveaction) scheduled = [ item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES ] self.assertEqual(len(scheduled), policy_db.parameters['threshold']) # Assert the correct number of published states and action executions. This is to avoid # duplicate executions caused by accidental publishing of state in the concurrency policies. # num_state_changes = len(scheduled) * len(['requested', 'scheduled', 'running']) expected_num_exec = len(scheduled) expected_num_pubs = expected_num_exec * 3 self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'fu'}) liveaction, _ = action_service.request(liveaction) expected_num_pubs += 1 # Tally requested state. # Assert the action is delayed. delayed = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(delayed.status, action_constants.LIVEACTION_STATUS_DELAYED) self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Execution is expected to be scheduled since concurrency threshold is not reached. # The execution with actionstr "fu" is over the threshold but actionstr "bar" is not. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'bar'}) liveaction, _ = action_service.request(liveaction) expected_num_exec += 1 # This request is expected to be executed. expected_num_pubs += 3 # Tally requested, scheduled, and running states. liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertIn(liveaction.status, SCHEDULED_STATES) self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Mark one of the execution as completed. action_service.update_status( scheduled[0], action_constants.LIVEACTION_STATUS_SUCCEEDED, publish=True) expected_num_pubs += 1 # Tally succeeded state. # Once capacity freed up, the delayed execution is published as requested again. expected_num_exec += 1 # The delayed request is expected to be executed. expected_num_pubs += 3 # Tally requested, scheduled, and running state. # Execution is expected to be rescheduled. liveaction = LiveAction.get_by_id(str(delayed.id)) self.assertIn(liveaction.status, SCHEDULED_STATES) self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count)
def test_launch_workflow_under_parent_chain_with_jinja_parameters(self): ac_ctx = { 'chain': { 'parameters': { 'var1': 'foobar', 'var2': '{{foobar}}', 'var3': ['{{foo}}', '{{bar}}'], 'var4': { 'foobar': '{{foobar}}' }, } } } liveaction = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS, context=ac_ctx) liveaction, execution = action_service.request(liveaction) liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING) mistral_context = liveaction.context.get('mistral', None) self.assertIsNotNone(mistral_context) self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id')) self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name')) workflow_input = copy.deepcopy(ACTION_PARAMS) workflow_input.update({'count': '3'}) env = { 'st2_execution_id': str(execution.id), 'st2_liveaction_id': str(liveaction.id), 'st2_action_api_url': 'http://0.0.0.0:9101/v1', '__actions': { 'st2.action': { 'st2_context': { 'api_url': 'http://0.0.0.0:9101/v1', 'endpoint': 'http://0.0.0.0:9101/v1/actionexecutions', 'parent': { 'pack': 'mistral_tests', 'execution_id': str(execution.id), 'chain': { 'parameters': { 'var1': 'foobar', 'var2': '{% raw %}{{foobar}}{% endraw %}', 'var3': [ '{% raw %}{{foo}}{% endraw %}', '{% raw %}{{bar}}{% endraw %}' ], 'var4': { 'foobar': '{% raw %}{{foobar}}{% endraw %}' } } } }, 'notify': {}, 'skip_notify_tasks': [] } } } } executions.ExecutionManager.create.assert_called_with( WF1_NAME, workflow_input=workflow_input, env=env)
def test_req_override_runner_parameter_type_attribute_no_value_changed( self): parameters = {'hosts': '127.0.0.1', 'cmd': 'uname -a'} req = LiveActionDB(action=ACTION_OVR_PARAM_BAD_ATTR_NOOP_REF, parameters=parameters) req, _ = action_service.request(req)
def test_on_cancellation(self): policy_db = Policy.get_by_ref('wolfpack.action-1.concurrency') self.assertGreater(policy_db.parameters['threshold'], 0) # Launch action executions until the expected threshold is reached. for i in range(0, policy_db.parameters['threshold']): parameters = {'actionstr': 'foo-' + str(i)} liveaction = LiveActionDB(action='wolfpack.action-1', parameters=parameters) action_service.request(liveaction) # Run the scheduler to schedule action executions. self._process_scheduling_queue() # Check the number of action executions in scheduled state. scheduled = [ item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES ] self.assertEqual(len(scheduled), policy_db.parameters['threshold']) # duplicate executions caused by accidental publishing of state in the concurrency policies. # num_state_changes = len(scheduled) * len(['requested', 'scheduled', 'running']) expected_num_exec = len(scheduled) expected_num_pubs = expected_num_exec * 3 self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'}) liveaction, _ = action_service.request(liveaction) expected_num_pubs += 1 # Tally requested state. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() # Since states are being processed async, wait for the liveaction to go into delayed state. liveaction = self._wait_on_status( liveaction, action_constants.LIVEACTION_STATUS_DELAYED) expected_num_exec += 0 # This request will not be scheduled for execution. expected_num_pubs += 0 # The delayed status change should not be published. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Cancel execution. action_service.request_cancellation(scheduled[0], 'stanley') expected_num_pubs += 2 # Tally the canceling and canceled states. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() # Once capacity freed up, the delayed execution is published as requested again. expected_num_exec += 1 # This request is expected to be executed. expected_num_pubs += 2 # Tally scheduled and running state. # Execution is expected to be rescheduled. liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertIn(liveaction.status, SCHEDULED_STATES) self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count)
def test_over_threshold_cancel_executions(self): policy_db = Policy.get_by_ref( "wolfpack.action-2.concurrency.attr.cancel") self.assertEqual(policy_db.parameters["action"], "cancel") self.assertGreater(policy_db.parameters["threshold"], 0) self.assertIn("actionstr", policy_db.parameters["attributes"]) # Launch action executions until the expected threshold is reached. for i in range(0, policy_db.parameters["threshold"]): liveaction = LiveActionDB(action="wolfpack.action-2", parameters={"actionstr": "foo"}) action_service.request(liveaction) # Run the scheduler to schedule action executions. self._process_scheduling_queue() # Check the number of action executions in scheduled state. scheduled = [ item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES ] self.assertEqual(len(scheduled), policy_db.parameters["threshold"]) # Assert the correct number of published states and action executions. This is to avoid # duplicate executions caused by accidental publishing of state in the concurrency policies. # num_state_changes = len(scheduled) * len(['requested', 'scheduled', 'running']) expected_num_exec = len(scheduled) expected_num_pubs = expected_num_exec * 3 self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action="wolfpack.action-2", parameters={"actionstr": "foo"}) liveaction, _ = action_service.request(liveaction) expected_num_pubs += 1 # Tally requested state. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() # Assert the canceling state is being published. calls = [ call(liveaction, action_constants.LIVEACTION_STATUS_CANCELING) ] LiveActionPublisher.publish_state.assert_has_calls(calls) expected_num_pubs += 2 # Tally canceling and canceled state changes. expected_num_exec += 0 # This request will not be scheduled for execution. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Assert the action is canceled. canceled = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(canceled.status, action_constants.LIVEACTION_STATUS_CANCELED)
def test_with_items_concurrency(self): num_items = 3 concurrency = 2 wf_input = {'concurrency': concurrency} wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'with-items-concurrency.yaml') lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'], parameters=wf_input) lv_ac_db, ac_ex_db = action_service.request(lv_ac_db) # Assert action execution is running. lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING) wf_ex_db = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id))[0] self.assertEqual(wf_ex_db.status, action_constants.LIVEACTION_STATUS_RUNNING) # Process the first set of action executions from with items concurrency. query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'task1'} t1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] t1_ac_ex_dbs = ex_db_access.ActionExecution.query(task_execution=str(t1_ex_db.id)) self.assertEqual(len(t1_ac_ex_dbs), concurrency) status = [ ac_ex.status == action_constants.LIVEACTION_STATUS_SUCCEEDED for ac_ex in t1_ac_ex_dbs ] self.assertTrue(all(status)) for t1_ac_ex_db in t1_ac_ex_dbs: workflows.get_engine().process(t1_ac_ex_db) t1_ex_db = wf_db_access.TaskExecution.get_by_id(t1_ex_db.id) self.assertEqual(t1_ex_db.status, wf_statuses.RUNNING) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING) # Process the second set of action executions from with items concurrency. t1_ac_ex_dbs = ex_db_access.ActionExecution.query(task_execution=str(t1_ex_db.id)) self.assertEqual(len(t1_ac_ex_dbs), num_items) status = [ ac_ex.status == action_constants.LIVEACTION_STATUS_SUCCEEDED for ac_ex in t1_ac_ex_dbs ] self.assertTrue(all(status)) for t1_ac_ex_db in t1_ac_ex_dbs[concurrency:]: workflows.get_engine().process(t1_ac_ex_db) t1_ex_db = wf_db_access.TaskExecution.get_by_id(t1_ex_db.id) self.assertEqual(t1_ex_db.status, wf_statuses.SUCCEEDED) # Assert the main workflow is completed. wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.SUCCEEDED) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
def request_task_execution(wf_ex_db, task_id, task_spec, task_ctx, st2_ctx): wf_ac_ex_id = wf_ex_db.action_execution LOG.info('[%s] Processing task execution request for "%s".', wf_ac_ex_id, task_id) # Create a record for task execution. task_ex_db = wf_db_models.TaskExecutionDB( workflow_execution=str(wf_ex_db.id), task_name=task_spec.name or task_id, task_id=task_id, task_spec=task_spec.serialize(), context=task_ctx, status=states.REQUESTED) # Insert new record into the database. task_ex_db = wf_db_access.TaskExecution.insert(task_ex_db, publish=False) task_ex_id = str(task_ex_db.id) LOG.info('[%s] Task execution "%s" created for task "%s".', wf_ac_ex_id, task_ex_id, task_id) try: # Return here if no action is specified in task spec. if task_spec.action is None: # Set the task execution to running. task_ex_db.status = states.RUNNING task_ex_db = wf_db_access.TaskExecution.update(task_ex_db, publish=False) # Fast forward task execution to completion. update_task_execution(str(task_ex_db.id), states.SUCCEEDED) update_task_flow(str(task_ex_db.id), publish=False) # Refresh and return the task execution return wf_db_access.TaskExecution.get_by_id(str(task_ex_db.id)) # Identify the action to execute. action_db = ac_db_util.get_action_by_ref(ref=task_spec.action) if not action_db: error = 'Unable to find action "%s".' % task_spec.action raise ac_exc.InvalidActionReferencedException(error) # Identify the runner for the action. runner_type_db = ac_db_util.get_runnertype_by_name( action_db.runner_type['name']) # Set context for the action execution. ac_ex_ctx = { 'parent': st2_ctx, 'orchestra': { 'workflow_execution_id': str(wf_ex_db.id), 'task_execution_id': str(task_ex_db.id), 'task_name': task_spec.name or task_id, 'task_id': task_id } } # Render action execution parameters and setup action execution object. ac_ex_params = param_utils.render_live_params( runner_type_db.runner_parameters or {}, action_db.parameters or {}, getattr(task_spec, 'input', None) or {}, ac_ex_ctx) lv_ac_db = lv_db_models.LiveActionDB(action=task_spec.action, workflow_execution=str( wf_ex_db.id), task_execution=str(task_ex_db.id), context=ac_ex_ctx, parameters=ac_ex_params) # Set the task execution to running first otherwise a race can occur # where the action execution finishes first and the completion handler # conflicts with this status update. task_ex_db.status = states.RUNNING task_ex_db = wf_db_access.TaskExecution.update(task_ex_db, publish=False) # Request action execution. lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db) LOG.info('[%s] Action execution "%s" requested for task "%s".', wf_ac_ex_id, str(ac_ex_db.id), task_id) except Exception as e: LOG.exception('[%s] Failed task execution for task "%s".', wf_ac_ex_id, task_id) result = { 'errors': [{ 'message': str(e), 'task_id': task_ex_db.task_id }] } update_task_execution(str(task_ex_db.id), states.FAILED, result) raise e return task_ex_db
def test_on_cancellation(self): policy_db = Policy.get_by_ref("wolfpack.action-1.concurrency.attr") self.assertGreater(policy_db.parameters["threshold"], 0) self.assertIn("actionstr", policy_db.parameters["attributes"]) # Launch action executions until the expected threshold is reached. for i in range(0, policy_db.parameters["threshold"]): liveaction = LiveActionDB(action="wolfpack.action-1", parameters={"actionstr": "foo"}) action_service.request(liveaction) # Run the scheduler to schedule action executions. self._process_scheduling_queue() # Check the number of action executions in scheduled state. scheduled = [ item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES ] self.assertEqual(len(scheduled), policy_db.parameters["threshold"]) # duplicate executions caused by accidental publishing of state in the concurrency policies. # num_state_changes = len(scheduled) * len(['requested', 'scheduled', 'running']) expected_num_exec = len(scheduled) expected_num_pubs = expected_num_exec * 3 self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action="wolfpack.action-1", parameters={"actionstr": "foo"}) liveaction, _ = action_service.request(liveaction) expected_num_pubs += 1 # Tally requested state. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() # Since states are being processed asynchronously, wait for the # liveaction to go into delayed state. liveaction = self._wait_on_status( liveaction, action_constants.LIVEACTION_STATUS_DELAYED) delayed = liveaction expected_num_exec += 0 # This request will not be scheduled for execution. expected_num_pubs += 0 # The delayed status change should not be published. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Execution is expected to be scheduled since concurrency threshold is not reached. # The execution with actionstr "fu" is over the threshold but actionstr "bar" is not. liveaction = LiveActionDB(action="wolfpack.action-1", parameters={"actionstr": "bar"}) liveaction, _ = action_service.request(liveaction) # Run the scheduler to schedule action executions. self._process_scheduling_queue() # Since states are being processed asynchronously, wait for the # liveaction to go into scheduled state. liveaction = self._wait_on_statuses(liveaction, SCHEDULED_STATES) expected_num_exec += 1 # This request is expected to be executed. expected_num_pubs += 3 # Tally requested, scheduled, and running states. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Cancel execution. action_service.request_cancellation(scheduled[0], "stanley") expected_num_pubs += 2 # Tally the canceling and canceled states. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() # Once capacity freed up, the delayed execution is published as requested again. expected_num_exec += 1 # The delayed request is expected to be executed. expected_num_pubs += 2 # Tally scheduled and running state. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Since states are being processed asynchronously, wait for the # liveaction to go into scheduled state. liveaction = LiveAction.get_by_id(str(delayed.id)) liveaction = self._wait_on_statuses(liveaction, SCHEDULED_STATES)
def test_over_threshold_delay_executions(self): policy_db = Policy.get_by_ref('wolfpack.action-1.concurrency') self.assertGreater(policy_db.parameters['threshold'], 0) for i in range(0, policy_db.parameters['threshold']): liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'}) action_service.request(liveaction) # Since states are being processed asynchronously, wait for the # liveactions to go into scheduled states. for i in range(0, 100): eventlet.sleep(1) scheduled = [ item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES ] if len(scheduled) == policy_db.parameters['threshold']: break scheduled = [ item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES ] self.assertEqual(len(scheduled), policy_db.parameters['threshold']) # Assert the correct number of published states and action executions. This is to avoid # duplicate executions caused by accidental publishing of state in the concurrency policies. # num_state_changes = len(scheduled) * len(['requested', 'scheduled', 'running']) expected_num_exec = len(scheduled) expected_num_pubs = expected_num_exec * 3 self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'}) liveaction, _ = action_service.request(liveaction) expected_num_exec += 1 # This request is expected to be executed. expected_num_pubs += 1 # Tally requested state. # Since states are being processed asynchronously, wait for the # liveaction to go into delayed state. for i in range(0, 100): eventlet.sleep(1) liveaction = LiveAction.get_by_id(str(liveaction.id)) if liveaction.status == action_constants.LIVEACTION_STATUS_DELAYED: break # Assert the action is delayed. liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_DELAYED) # Mark one of the execution as completed. action_service.update_status( scheduled[0], action_constants.LIVEACTION_STATUS_SUCCEEDED, publish=True) expected_num_pubs += 1 # Tally requested state. # Once capacity freed up, the delayed execution is published as requested again. expected_num_pubs += 3 # Tally requested, scheduled, and running state. # Since states are being processed asynchronously, wait for the # liveaction to go into scheduled state. for i in range(0, 100): eventlet.sleep(1) liveaction = LiveAction.get_by_id(str(liveaction.id)) if liveaction.status in SCHEDULED_STATES: break # Execution is expected to be rescheduled. liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertIn(liveaction.status, SCHEDULED_STATES) self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count)
def test_over_threshold_cancel_executions(self): policy_db = Policy.get_by_ref('wolfpack.action-2.concurrency.cancel') self.assertEqual(policy_db.parameters['action'], 'cancel') self.assertGreater(policy_db.parameters['threshold'], 0) for i in range(0, policy_db.parameters['threshold']): liveaction = LiveActionDB(action='wolfpack.action-2', parameters={'actionstr': 'foo'}) action_service.request(liveaction) # Since states are being processed asynchronously, wait for the # liveactions to go into scheduled states. for i in range(0, 100): eventlet.sleep(1) scheduled = [ item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES ] if len(scheduled) == policy_db.parameters['threshold']: break scheduled = [ item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES ] self.assertEqual(len(scheduled), policy_db.parameters['threshold']) # duplicate executions caused by accidental publishing of state in the concurrency policies. # num_state_changes = len(scheduled) * len(['requested', 'scheduled', 'running']) expected_num_exec = len(scheduled) expected_num_pubs = expected_num_exec * 3 self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Execution is expected to be canceled since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-2', parameters={'actionstr': 'foo'}) liveaction, _ = action_service.request(liveaction) expected_num_exec += 0 # This request will not be scheduled for execution. expected_num_pubs += 1 # Tally requested state. # Since states are being processed asynchronously, wait for the # liveaction to go into cancel state. for i in range(0, 100): eventlet.sleep(1) liveaction = LiveAction.get_by_id(str(liveaction.id)) if liveaction.status in [ action_constants.LIVEACTION_STATUS_CANCELING, action_constants.LIVEACTION_STATUS_CANCELED ]: break # Assert the canceling state is being published. calls = [ call(liveaction, action_constants.LIVEACTION_STATUS_CANCELING) ] LiveActionPublisher.publish_state.assert_has_calls(calls) expected_num_pubs += 2 # Tally canceling and canceled state changes. # Assert the action is canceled. liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_CANCELED) self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count)
def test_run_workflow_with_unicode_input(self): wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'sequential.yaml') wf_input = {'who': '薩諾斯'} lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'], parameters=wf_input) lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db) wf_ex_db = wf_db_access.WorkflowExecution.query( action_execution=str(ac_ex_db.id))[0] # Process task1. query_filters = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task1' } tk1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk1_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(tk1_ex_db.id))[0] tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id( tk1_ac_ex_db.liveaction['id']) self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) wf_svc.handle_action_execution_completion(tk1_ac_ex_db) tk1_ex_db = wf_db_access.TaskExecution.get_by_id(tk1_ex_db.id) self.assertEqual(tk1_ex_db.status, wf_statuses.SUCCEEDED) # Process task2. query_filters = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task2' } tk2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk2_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(tk2_ex_db.id))[0] tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id( tk2_ac_ex_db.liveaction['id']) self.assertEqual(tk2_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) wf_svc.handle_action_execution_completion(tk2_ac_ex_db) tk2_ex_db = wf_db_access.TaskExecution.get_by_id(tk2_ex_db.id) self.assertEqual(tk2_ex_db.status, wf_statuses.SUCCEEDED) # Process task3. query_filters = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task3' } tk3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk3_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(tk3_ex_db.id))[0] tk3_lv_ac_db = lv_db_access.LiveAction.get_by_id( tk3_ac_ex_db.liveaction['id']) self.assertEqual(tk3_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) wf_svc.handle_action_execution_completion(tk3_ac_ex_db) tk3_ex_db = wf_db_access.TaskExecution.get_by_id(tk3_ex_db.id) self.assertEqual(tk3_ex_db.status, wf_statuses.SUCCEEDED) # Assert workflow is completed. wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.SUCCEEDED) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id)) self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Check workflow output. wf_input_val = wf_input['who'].decode( 'utf-8') if six.PY2 else wf_input['who'] expected_output = { 'msg': '%s, All your base are belong to us!' % wf_input_val } self.assertDictEqual(wf_ex_db.output, expected_output) # Check liveaction and action execution result. expected_result = {'output': expected_output} self.assertDictEqual(lv_ac_db.result, expected_result) self.assertDictEqual(ac_ex_db.result, expected_result)
def test_run_workflow(self): username = '******' wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'sequential.yaml') wf_input = {'who': 'Thanos'} lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'], parameters=wf_input) lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db) # The main action execution for this workflow is not under the context of another workflow. self.assertFalse( wf_svc.is_action_execution_under_workflow_context(ac_ex_db)) # Assert action execution is running. lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertTrue(lv_ac_db.action_is_workflow) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result) wf_ex_dbs = wf_db_access.WorkflowExecution.query( action_execution=str(ac_ex_db.id)) wf_ex_db = wf_ex_dbs[0] # Check required attributes. self.assertEqual(len(wf_ex_dbs), 1) self.assertIsNotNone(wf_ex_db.id) self.assertGreater(wf_ex_db.rev, 0) self.assertEqual(wf_ex_db.action_execution, str(ac_ex_db.id)) self.assertEqual(wf_ex_db.status, ac_const.LIVEACTION_STATUS_RUNNING) # Check context in the workflow execution. expected_wf_ex_ctx = { 'st2': { 'workflow_execution_id': str(wf_ex_db.id), 'action_execution_id': str(ac_ex_db.id), 'api_url': 'http://127.0.0.1/v1', 'user': username, 'pack': 'orquesta_tests' }, 'parent': { 'pack': 'orquesta_tests' } } self.assertDictEqual(wf_ex_db.context, expected_wf_ex_ctx) # Check context in the liveaction. expected_lv_ac_ctx = { 'workflow_execution': str(wf_ex_db.id), 'pack': 'orquesta_tests' } self.assertDictEqual(lv_ac_db.context, expected_lv_ac_ctx) # Check graph. self.assertIsNotNone(wf_ex_db.graph) self.assertTrue(isinstance(wf_ex_db.graph, dict)) self.assertIn('nodes', wf_ex_db.graph) self.assertIn('adjacency', wf_ex_db.graph) # Check task states. self.assertIsNotNone(wf_ex_db.state) self.assertTrue(isinstance(wf_ex_db.state, dict)) self.assertIn('tasks', wf_ex_db.state) self.assertIn('sequence', wf_ex_db.state) # Check input. self.assertDictEqual(wf_ex_db.input, wf_input) # Assert task1 is already completed. query_filters = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task1' } tk1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk1_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(tk1_ex_db.id))[0] tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id( tk1_ac_ex_db.liveaction['id']) self.assertEqual(tk1_lv_ac_db.context.get('user'), username) self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) self.assertTrue( wf_svc.is_action_execution_under_workflow_context(tk1_ac_ex_db)) # Manually handle action execution completion. wf_svc.handle_action_execution_completion(tk1_ac_ex_db) # Assert task1 succeeded and workflow is still running. tk1_ex_db = wf_db_access.TaskExecution.get_by_id(tk1_ex_db.id) self.assertEqual(tk1_ex_db.status, wf_statuses.SUCCEEDED) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING) # Assert task2 is already completed. query_filters = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task2' } tk2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk2_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(tk2_ex_db.id))[0] tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id( tk2_ac_ex_db.liveaction['id']) self.assertEqual(tk2_lv_ac_db.context.get('user'), username) self.assertEqual(tk2_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) self.assertTrue( wf_svc.is_action_execution_under_workflow_context(tk2_ac_ex_db)) # Manually handle action execution completion. wf_svc.handle_action_execution_completion(tk2_ac_ex_db) # Assert task2 succeeded and workflow is still running. tk2_ex_db = wf_db_access.TaskExecution.get_by_id(tk2_ex_db.id) self.assertEqual(tk2_ex_db.status, wf_statuses.SUCCEEDED) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING) # Assert task3 is already completed. query_filters = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task3' } tk3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk3_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(tk3_ex_db.id))[0] tk3_lv_ac_db = lv_db_access.LiveAction.get_by_id( tk3_ac_ex_db.liveaction['id']) self.assertEqual(tk3_lv_ac_db.context.get('user'), username) self.assertEqual(tk3_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) self.assertTrue( wf_svc.is_action_execution_under_workflow_context(tk3_ac_ex_db)) # Manually handle action execution completion. wf_svc.handle_action_execution_completion(tk3_ac_ex_db) # Assert workflow is completed. wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.SUCCEEDED) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id)) self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Check post run is invoked for the liveaction. self.assertTrue(runners_utils.invoke_post_run.called) self.assertEqual(runners_utils.invoke_post_run.call_count, 1) # Check workflow output. expected_output = { 'msg': '%s, All your base are belong to us!' % wf_input['who'] } self.assertDictEqual(wf_ex_db.output, expected_output) # Check liveaction and action execution result. expected_result = {'output': expected_output} self.assertDictEqual(lv_ac_db.result, expected_result) self.assertDictEqual(ac_ex_db.result, expected_result)
def test_delay_for_with_items_concurrency(self): num_items = 3 concurrency = 2 expected_delay_sec = 1 expected_delay_msec = expected_delay_sec * 1000 wf_input = {'concurrency': concurrency, 'delay': expected_delay_sec} wf_meta = base.get_wf_fixture_meta_data( TEST_PACK_PATH, 'with-items-concurrency-delay.yaml') lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'], parameters=wf_input) lv_ac_db, ac_ex_db = action_service.request(lv_ac_db) # Assert action execution is running. lv_ac_db = self._wait_on_status( lv_ac_db, action_constants.LIVEACTION_STATUS_RUNNING) wf_ex_db = wf_db_access.WorkflowExecution.query( action_execution=str(ac_ex_db.id))[0] self.assertEqual(wf_ex_db.status, action_constants.LIVEACTION_STATUS_RUNNING) # Process the first set of action executions from with items concurrency. query_filters = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task1' } t1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] t1_ac_ex_dbs = ex_db_access.ActionExecution.query( task_execution=str(t1_ex_db.id)) t1_lv_ac_dbs = lv_db_access.LiveAction.query( task_execution=str(t1_ex_db.id)) # Assert the number of concurrent items is correct. self.assertEqual(len(t1_ac_ex_dbs), concurrency) # Assert delay value is rendered and assigned. self.assertEqual(t1_ex_db.delay, expected_delay_sec) for t1_lv_ac_db in t1_lv_ac_dbs: self.assertEqual(t1_lv_ac_db.delay, expected_delay_msec) for t1_ac_ex_db in t1_ac_ex_dbs: self.assertEqual(t1_ac_ex_db.delay, expected_delay_msec) status = [ ac_ex.status == action_constants.LIVEACTION_STATUS_SUCCEEDED for ac_ex in t1_ac_ex_dbs ] self.assertTrue(all(status)) for t1_ac_ex_db in t1_ac_ex_dbs: workflows.get_engine().process(t1_ac_ex_db) t1_ex_db = wf_db_access.TaskExecution.get_by_id(t1_ex_db.id) self.assertEqual(t1_ex_db.status, wf_states.RUNNING) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_states.RUNNING) # Process the second set of action executions from with items concurrency. t1_ac_ex_dbs = ex_db_access.ActionExecution.query( task_execution=str(t1_ex_db.id)) t1_lv_ac_dbs = lv_db_access.LiveAction.query( task_execution=str(t1_ex_db.id)) # Assert delay value is rendered and assigned only to the first set of action executions. t1_lv_ac_dbs_delays = [ t1_lv_ac_db.delay for t1_lv_ac_db in t1_lv_ac_dbs if t1_lv_ac_db.delay is not None ] self.assertEqual(len(t1_lv_ac_dbs_delays), concurrency) t1_ac_ex_dbs_delays = [ t1_ac_ex_db.delay for t1_ac_ex_db in t1_ac_ex_dbs if t1_ac_ex_db.delay is not None ] self.assertEqual(len(t1_ac_ex_dbs_delays), concurrency) # Assert all items are processed. self.assertEqual(len(t1_ac_ex_dbs), num_items) status = [ ac_ex.status == action_constants.LIVEACTION_STATUS_SUCCEEDED for ac_ex in t1_ac_ex_dbs ] self.assertTrue(all(status)) for t1_ac_ex_db in t1_ac_ex_dbs[concurrency:]: workflows.get_engine().process(t1_ac_ex_db) t1_ex_db = wf_db_access.TaskExecution.get_by_id(t1_ex_db.id) self.assertEqual(t1_ex_db.status, wf_states.SUCCEEDED) # Assert the main workflow is completed. wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_states.SUCCEEDED) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
def test_over_threshold_delay_executions(self): # Ensure the concurrency policy is accurate. policy_db = Policy.get_by_ref('wolfpack.action-1.concurrency') self.assertGreater(policy_db.parameters['threshold'], 0) # Launch action executions until the expected threshold is reached. for i in range(0, policy_db.parameters['threshold']): parameters = {'actionstr': 'foo-' + str(i)} liveaction = LiveActionDB(action='wolfpack.action-1', parameters=parameters) action_service.request(liveaction) # Run the scheduler to schedule action executions. self._process_scheduling_queue() # Check the number of action executions in scheduled state. scheduled = [ item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES ] self.assertEqual(len(scheduled), policy_db.parameters['threshold']) # Assert the correct number of published states and action executions. This is to avoid # duplicate executions caused by accidental publishing of state in the concurrency policies. # num_state_changes = len(scheduled) * len(['requested', 'scheduled', 'running']) expected_num_exec = len(scheduled) expected_num_pubs = expected_num_exec * 3 self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo-last'}) liveaction, _ = action_service.request(liveaction) expected_num_pubs += 1 # Tally requested state. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() # Since states are being processed async, wait for the liveaction to go into delayed state. liveaction = self._wait_on_status( liveaction, action_constants.LIVEACTION_STATUS_DELAYED) expected_num_exec += 0 # This request will not be scheduled for execution. expected_num_pubs += 0 # The delayed status change should not be published. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Mark one of the scheduled/running execution as completed. action_service.update_status( scheduled[0], action_constants.LIVEACTION_STATUS_SUCCEEDED, publish=True) expected_num_pubs += 1 # Tally succeeded state. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() # Once capacity freed up, the delayed execution is published as scheduled. expected_num_exec += 1 # This request is expected to be executed. expected_num_pubs += 2 # Tally scheduled and running state. # Since states are being processed async, wait for the liveaction to be scheduled. liveaction = self._wait_on_statuses(liveaction, SCHEDULED_STATES) self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count)
def test_cancel_subworkflow_cascade_up_to_workflow_with_other_subworkflows( self): wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'subworkflows.yaml') lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result) # Identify the records for the subworkflow. wf_ex_dbs = wf_db_access.WorkflowExecution.query( action_execution=str(ac_ex_db.id)) self.assertEqual(len(wf_ex_dbs), 1) tk_ex_dbs = wf_db_access.TaskExecution.query( workflow_execution=str(wf_ex_dbs[0].id)) self.assertEqual(len(tk_ex_dbs), 2) tk1_ac_ex_dbs = ex_db_access.ActionExecution.query( task_execution=str(tk_ex_dbs[0].id)) self.assertEqual(len(tk1_ac_ex_dbs), 1) tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id( tk1_ac_ex_dbs[0].liveaction['id']) self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING) tk2_ac_ex_dbs = ex_db_access.ActionExecution.query( task_execution=str(tk_ex_dbs[1].id)) self.assertEqual(len(tk2_ac_ex_dbs), 1) tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id( tk2_ac_ex_dbs[0].liveaction['id']) self.assertEqual(tk2_lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING) # Cancel the subworkflow which should cascade up to the root. requester = cfg.CONF.system_user.user tk1_lv_ac_db, tk1_ac_ex_db = ac_svc.request_cancellation( tk1_lv_ac_db, requester) self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELING) # Assert the main workflow is canceling. lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELING) # Assert both subworkflows are canceled. tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(str(tk1_lv_ac_db.id)) self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELED) tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id(str(tk2_lv_ac_db.id)) self.assertEqual(tk2_lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELED) # Manually handle action execution completion for one of the tasks. tk1_ac_ex_db = ex_db_access.ActionExecution.get_by_id( str(tk1_ac_ex_db.id)) self.assertEqual(tk1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_CANCELED) wf_svc.handle_action_execution_completion(tk1_ac_ex_db) # Manually handle action execution completion for the other task. tk2_ac_ex_db = tk2_ac_ex_dbs[0] tk2_ac_ex_db = ex_db_access.ActionExecution.get_by_id( str(tk2_ac_ex_db.id)) self.assertEqual(tk2_ac_ex_db.status, ac_const.LIVEACTION_STATUS_CANCELED) wf_svc.handle_action_execution_completion(tk2_ac_ex_db) # Assert the main workflow is canceling. lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
def request_action_execution(wf_ex_db, task_ex_db, st2_ctx, ac_ex_req, delay=None): wf_ac_ex_id = wf_ex_db.action_execution action_ref = ac_ex_req['action'] action_input = ac_ex_req['input'] item_id = ac_ex_req.get('item_id') # If the task is with items and item_id is not provided, raise exception. if task_ex_db.itemized and item_id is None: msg = 'Unable to request action execution. Identifier for the item is not provided.' raise Exception(msg) # Identify the action to execute. action_db = action_utils.get_action_by_ref(ref=action_ref) if not action_db: error = 'Unable to find action "%s".' % action_ref raise ac_exc.InvalidActionReferencedException(error) # Identify the runner for the action. runner_type_db = action_utils.get_runnertype_by_name( action_db.runner_type['name']) # Identify action pack name pack_name = action_ref.split('.')[0] if action_ref else st2_ctx.get('pack') # Set context for the action execution. ac_ex_ctx = { 'pack': pack_name, 'user': st2_ctx.get('user'), 'parent': st2_ctx, 'orquesta': { 'workflow_execution_id': str(wf_ex_db.id), 'task_execution_id': str(task_ex_db.id), 'task_name': task_ex_db.task_name, 'task_id': task_ex_db.task_id, 'task_route': task_ex_db.task_route } } if st2_ctx.get('api_user'): ac_ex_ctx['api_user'] = st2_ctx.get('api_user') if st2_ctx.get('source_channel'): ac_ex_ctx['source_channel'] = st2_ctx.get('source_channel') if item_id is not None: ac_ex_ctx['orquesta']['item_id'] = item_id # Render action execution parameters and setup action execution object. ac_ex_params = param_utils.render_live_params( runner_type_db.runner_parameters or {}, action_db.parameters or {}, action_input or {}, ac_ex_ctx) # The delay spec is in seconds and scheduler expects milliseconds. if delay is not None and delay > 0: delay = delay * 1000 # Instantiate the live action record. lv_ac_db = lv_db_models.LiveActionDB(action=action_ref, workflow_execution=str(wf_ex_db.id), task_execution=str(task_ex_db.id), delay=delay, context=ac_ex_ctx, parameters=ac_ex_params) # Set notification if instructed. if (wf_ex_db.notify and wf_ex_db.notify.get('config') and wf_ex_db.notify.get('tasks') and task_ex_db.task_name in wf_ex_db.notify['tasks']): lv_ac_db.notify = notify_api_models.NotificationsHelper.to_model( wf_ex_db.notify['config']) # Set the task execution to running first otherwise a race can occur # where the action execution finishes first and the completion handler # conflicts with this status update. task_ex_db.status = statuses.RUNNING task_ex_db = wf_db_access.TaskExecution.update(task_ex_db, publish=False) # Request action execution. lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db) msg = '[%s] Action execution "%s" requested for task "%s", route "%s".' LOG.info(msg, wf_ac_ex_id, str(ac_ex_db.id), task_ex_db.task_id, str(task_ex_db.task_route)) return ac_ex_db
def test_cascade_notify_to_tasks(self): wf_input = {'notify': ['task2']} wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'sequential.yaml') lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'], parameters=wf_input) lv_ac_db.notify = notify_api_models.NotificationsHelper.to_model(MOCK_NOTIFY) lv_ac_db, ac_ex_db = action_service.request(lv_ac_db) # Assert action execution is running. lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING) wf_ex_db = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id))[0] self.assertEqual(wf_ex_db.status, action_constants.LIVEACTION_STATUS_RUNNING) # Assert task1 notify is not set. query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'task1'} tk1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk1_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(tk1_ex_db.id))[0] tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk1_ac_ex_db.liveaction['id']) self.assertIsNone(tk1_lv_ac_db.notify) self.assertEqual(tk1_ac_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED) self.assertFalse(notifier.Notifier._post_notify_triggers.called) notifier.Notifier._post_notify_triggers.reset_mock() # Handle task1 completion. workflow_service.handle_action_execution_completion(tk1_ac_ex_db) tk1_ex_db = wf_db_access.TaskExecution.get_by_id(tk1_ex_db.id) self.assertEqual(tk1_ex_db.status, wf_statuses.SUCCEEDED) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING) # Assert task2 notify is set. query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'task2'} tk2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk2_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(tk2_ex_db.id))[0] tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk2_ac_ex_db.liveaction['id']) notify = notify_api_models.NotificationsHelper.from_model(notify_model=tk2_lv_ac_db.notify) self.assertEqual(notify, MOCK_NOTIFY) self.assertEqual(tk2_ac_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED) self.assertTrue(notifier.Notifier._post_notify_triggers.called) notifier.Notifier._post_notify_triggers.reset_mock() # Handle task2 completion. workflow_service.handle_action_execution_completion(tk2_ac_ex_db) tk2_ex_db = wf_db_access.TaskExecution.get_by_id(tk2_ex_db.id) self.assertEqual(tk2_ex_db.status, wf_statuses.SUCCEEDED) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING) # Assert task3 notify is not set. query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'task3'} tk3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk3_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(tk3_ex_db.id))[0] tk3_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk3_ac_ex_db.liveaction['id']) self.assertIsNone(tk3_lv_ac_db.notify) self.assertEqual(tk3_ac_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED) self.assertFalse(notifier.Notifier._post_notify_triggers.called) notifier.Notifier._post_notify_triggers.reset_mock() # Handle task3 completion. workflow_service.handle_action_execution_completion(tk3_ac_ex_db) tk3_ex_db = wf_db_access.TaskExecution.get_by_id(tk3_ex_db.id) self.assertEqual(tk3_ex_db.status, wf_statuses.SUCCEEDED) # Assert workflow is completed. wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.SUCCEEDED) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED) ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id)) self.assertEqual(ac_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED) self.assertTrue(notifier.Notifier._post_notify_triggers.called) notifier.Notifier._post_notify_triggers.reset_mock()
def test_include_result_to_error_log(self): username = '******' wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'sequential.yaml') wf_input = {'who': 'Thanos'} lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'], parameters=wf_input) lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db) # Assert action execution is running. lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result) wf_ex_dbs = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id)) wf_ex_db = wf_ex_dbs[0] # Assert task1 is already completed. query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'task1'} tk1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk1_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(tk1_ex_db.id))[0] tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk1_ac_ex_db.liveaction['id']) self.assertEqual(tk1_lv_ac_db.context.get('user'), username) self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Manually override and fail the action execution and write some result. # Action execution result can contain dotted notation so ensure this is tested. result = {"127.0.0.1": {"hostname": "foobar"}} ac_svc.update_status( tk1_lv_ac_db, ac_const.LIVEACTION_STATUS_FAILED, result=result, publish=False ) tk1_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(tk1_ex_db.id))[0] tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk1_ac_ex_db.liveaction['id']) self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED) self.assertDictEqual(tk1_lv_ac_db.result, result) # Manually handle action execution completion. wf_svc.handle_action_execution_completion(tk1_ac_ex_db) # Assert task and workflow failed. tk1_ex_db = wf_db_access.TaskExecution.get_by_id(tk1_ex_db.id) self.assertEqual(tk1_ex_db.status, wf_statuses.FAILED) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.FAILED) # Assert result is included in the error log. expected_errors = [ { 'message': 'Execution failed. See result for details.', 'type': 'error', 'task_id': 'task1', 'result': { '127.0.0.1': { 'hostname': 'foobar' } } } ] self.assertListEqual(wf_ex_db.errors, expected_errors)
def test_on_cancellation(self): policy_db = Policy.get_by_ref('wolfpack.action-1.concurrency.attr') self.assertGreater(policy_db.parameters['threshold'], 0) self.assertIn('actionstr', policy_db.parameters['attributes']) for i in range(0, policy_db.parameters['threshold']): liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'fu'}) action_service.request(liveaction) # Since states are being processed asynchronously, wait for the # liveactions to go into scheduled states. MockLiveActionPublisherNonBlocking.wait_all() for i in range(0, 100): eventlet.sleep(1) scheduled = [ item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES ] if len(scheduled) == policy_db.parameters['threshold']: break MockLiveActionPublisherNonBlocking.wait_all() scheduled = [ item for item in LiveAction.get_all() if item.status in SCHEDULED_STATES ] self.assertEqual(len(scheduled), policy_db.parameters['threshold']) # duplicate executions caused by accidental publishing of state in the concurrency policies. # num_state_changes = len(scheduled) * len(['requested', 'scheduled', 'running']) expected_num_exec = len(scheduled) expected_num_pubs = expected_num_exec * 3 self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'fu'}) liveaction, _ = action_service.request(liveaction) expected_num_pubs += 1 # Tally requested state. # Since states are being processed asynchronously, wait for the # liveaction to go into delayed state. for i in range(0, 100): eventlet.sleep(1) liveaction = LiveAction.get_by_id(str(liveaction.id)) if liveaction.status == action_constants.LIVEACTION_STATUS_DELAYED: break # Assert the action is delayed. delayed = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(delayed.status, action_constants.LIVEACTION_STATUS_DELAYED) self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Execution is expected to be scheduled since concurrency threshold is not reached. # The execution with actionstr "fu" is over the threshold but actionstr "bar" is not. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'bar'}) liveaction, _ = action_service.request(liveaction) expected_num_exec += 1 # This request is expected to be executed. expected_num_pubs += 3 # Tally requested, scheduled, and running states. # Since states are being processed asynchronously, wait for the # liveaction to go into scheduled state. for i in range(0, 100): eventlet.sleep(1) liveaction = LiveAction.get_by_id(str(liveaction.id)) if liveaction.status in SCHEDULED_STATES: break liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertIn(liveaction.status, SCHEDULED_STATES) self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Cancel execution. action_service.request_cancellation(scheduled[0], 'stanley') expected_num_pubs += 2 # Tally the canceling and canceled states. # Once capacity freed up, the delayed execution is published as requested again. expected_num_exec += 1 # The delayed request is expected to be executed. expected_num_pubs += 3 # Tally requested, scheduled, and running state. # Since states are being processed asynchronously, wait for the # liveaction to go into scheduled state. for i in range(0, 100): eventlet.sleep(1) liveaction = LiveAction.get_by_id(str(liveaction.id)) if liveaction.status in SCHEDULED_STATES: break # Execution is expected to be rescheduled. liveaction = LiveAction.get_by_id(str(delayed.id)) self.assertIn(liveaction.status, SCHEDULED_STATES) self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count)
def test_with_items_concurrency_pause_and_resume(self): num_items = 3 concurrency = 2 wf_input = {'concurrency': concurrency} wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'with-items-concurrency.yaml') lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'], parameters=wf_input) lv_ac_db, ac_ex_db = action_service.request(lv_ac_db) # Assert the workflow execution is running. lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING) wf_ex_db = wf_db_access.WorkflowExecution.query( action_execution=str(ac_ex_db.id))[0] self.assertEqual(wf_ex_db.status, action_constants.LIVEACTION_STATUS_RUNNING) query_filters = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task1' } t1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] t1_ac_ex_dbs = ex_db_access.ActionExecution.query( task_execution=str(t1_ex_db.id)) self.assertEqual(t1_ex_db.status, wf_statuses.RUNNING) self.assertEqual(len(t1_ac_ex_dbs), concurrency) # Reset the action executions to running status. for ac_ex in t1_ac_ex_dbs: self.set_execution_status( ac_ex.liveaction['id'], action_constants.LIVEACTION_STATUS_RUNNING) t1_ac_ex_dbs = ex_db_access.ActionExecution.query( task_execution=str(t1_ex_db.id)) status = [ ac_ex.status == action_constants.LIVEACTION_STATUS_RUNNING for ac_ex in t1_ac_ex_dbs ] self.assertTrue(all(status)) # Pause the workflow execution. requester = cfg.CONF.system_user.user lv_ac_db, ac_ex_db = action_service.request_pause(lv_ac_db, requester) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_PAUSING) # Manually succeed the action executions and process completion. for ac_ex in t1_ac_ex_dbs: self.set_execution_status( ac_ex.liveaction['id'], action_constants.LIVEACTION_STATUS_SUCCEEDED) t1_ac_ex_dbs = ex_db_access.ActionExecution.query( task_execution=str(t1_ex_db.id)) status = [ ac_ex.status == action_constants.LIVEACTION_STATUS_SUCCEEDED for ac_ex in t1_ac_ex_dbs ] self.assertTrue(all(status)) for t1_ac_ex_db in t1_ac_ex_dbs: workflows.get_engine().process(t1_ac_ex_db) # Check that the workflow execution is paused. wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.PAUSED) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_PAUSED) # Resume the workflow execution. requester = cfg.CONF.system_user.user lv_ac_db, ac_ex_db = action_service.request_resume(lv_ac_db, requester) self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_RESUMING) # Check that the workflow execution is running. lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING) # Check new set of action execution is scheduled. t1_ac_ex_dbs = ex_db_access.ActionExecution.query( task_execution=str(t1_ex_db.id)) self.assertEqual(len(t1_ac_ex_dbs), num_items) # Manually process the last action execution. workflows.get_engine().process(t1_ac_ex_dbs[2]) # Check that the workflow execution is completed. wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.SUCCEEDED) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
def test_fail_manually_with_recovery_failure(self): wf_file = 'fail-manually-with-recovery-failure.yaml' wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, wf_file) lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db) wf_ex_db = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id))[0] # Assert task1 and workflow execution failed due to fail in the task transition. query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'task1'} tk1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk1_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(tk1_ex_db.id))[0] tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk1_ac_ex_db.liveaction['id']) self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED) wf_svc.handle_action_execution_completion(tk1_ac_ex_db) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.FAILED) # Assert recover task is scheduled even though the workflow execution failed manually. # The recover task in the workflow is setup to fail. query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'recover'} tk2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk2_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(tk2_ex_db.id))[0] tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk2_ac_ex_db.liveaction['id']) self.assertEqual(tk2_lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED) wf_svc.handle_action_execution_completion(tk2_ac_ex_db) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.FAILED) # Check errors and output. expected_errors = [ { 'task_id': 'fail', 'type': 'error', 'message': 'Execution failed. See result for details.' }, { 'task_id': 'recover', 'type': 'error', 'message': 'Execution failed. See result for details.', 'result': { 'failed': True, 'return_code': 1, 'stderr': '', 'stdout': '', 'succeeded': False } }, { 'task_id': 'task1', 'type': 'error', 'message': 'Execution failed. See result for details.', 'result': { 'failed': True, 'return_code': 1, 'stderr': '', 'stdout': '', 'succeeded': False } } ] self.assertListEqual(self.sort_workflow_errors(wf_ex_db.errors), expected_errors)
def test_chain_pause_resume_last_task_failed_with_no_next_task(self): # A temp file is created during test setup. Ensure the temp file exists. # The test action chain will stall until this file is deleted. This gives # the unit test a moment to run any test related logic. path = self.temp_file_path self.assertTrue(os.path.exists(path)) action = (TEST_PACK + "." + "test_pause_resume_last_task_failed_with_no_next_task") params = {"tempfile": path, "message": "foobar"} liveaction = LiveActionDB(action=action, parameters=params) liveaction, execution = action_service.request(liveaction) liveaction = LiveAction.get_by_id(str(liveaction.id)) # Wait until the liveaction is running. liveaction = self._wait_for_status( liveaction, action_constants.LIVEACTION_STATUS_RUNNING) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING) # Request action chain to pause. liveaction, execution = action_service.request_pause( liveaction, USERNAME) # Wait until the liveaction is pausing. liveaction = self._wait_for_status( liveaction, action_constants.LIVEACTION_STATUS_PAUSING) extra_info = str(liveaction) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_PAUSING, extra_info) # Delete the temporary file that the action chain is waiting on. os.remove(path) self.assertFalse(os.path.exists(path)) # Wait until the liveaction is paused. liveaction = self._wait_for_status( liveaction, action_constants.LIVEACTION_STATUS_PAUSED) extra_info = str(liveaction) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_PAUSED, extra_info) # Wait for non-blocking threads to complete. Ensure runner is not running. MockLiveActionPublisherNonBlocking.wait_all() # Request action chain to resume. liveaction, execution = action_service.request_resume( liveaction, USERNAME) # Wait until the liveaction is completed. liveaction = self._wait_for_status( liveaction, action_constants.LIVEACTION_STATUS_FAILED) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_FAILED) # Wait for non-blocking threads to complete. MockLiveActionPublisherNonBlocking.wait_all() # Check liveaction result. self.assertIn("tasks", liveaction.result) self.assertEqual(len(liveaction.result["tasks"]), 1) self.assertEqual( liveaction.result["tasks"][0]["state"], action_constants.LIVEACTION_STATUS_FAILED, )
def test_with_items_cancellation(self): num_items = 3 wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'with-items-concurrency.yaml') lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = action_service.request(lv_ac_db) # Assert the workflow execution is running. lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING) wf_ex_db = wf_db_access.WorkflowExecution.query( action_execution=str(ac_ex_db.id))[0] self.assertEqual(wf_ex_db.status, action_constants.LIVEACTION_STATUS_RUNNING) query_filters = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task1' } t1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] t1_ac_ex_dbs = ex_db_access.ActionExecution.query( task_execution=str(t1_ex_db.id)) self.assertEqual(t1_ex_db.status, wf_statuses.RUNNING) self.assertEqual(len(t1_ac_ex_dbs), num_items) # Reset the action executions to running status. for ac_ex in t1_ac_ex_dbs: self.set_execution_status( ac_ex.liveaction['id'], action_constants.LIVEACTION_STATUS_RUNNING) t1_ac_ex_dbs = ex_db_access.ActionExecution.query( task_execution=str(t1_ex_db.id)) status = [ ac_ex.status == action_constants.LIVEACTION_STATUS_RUNNING for ac_ex in t1_ac_ex_dbs ] self.assertTrue(all(status)) # Cancels the workflow execution. requester = cfg.CONF.system_user.user lv_ac_db, ac_ex_db = action_service.request_cancellation( lv_ac_db, requester) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_CANCELING) # Manually succeed the action executions and process completion. for ac_ex in t1_ac_ex_dbs: self.set_execution_status( ac_ex.liveaction['id'], action_constants.LIVEACTION_STATUS_SUCCEEDED) t1_ac_ex_dbs = ex_db_access.ActionExecution.query( task_execution=str(t1_ex_db.id)) status = [ ac_ex.status == action_constants.LIVEACTION_STATUS_SUCCEEDED for ac_ex in t1_ac_ex_dbs ] self.assertTrue(all(status)) for t1_ac_ex_db in t1_ac_ex_dbs: workflows.get_engine().process(t1_ac_ex_db) # Check that the workflow execution is canceled. wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.CANCELED) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_CANCELED)
def test_chain_pause_resume_cascade_to_parent_workflow(self): # A temp file is created during test setup. Ensure the temp file exists. # The test action chain will stall until this file is deleted. This gives # the unit test a moment to run any test related logic. path = self.temp_file_path self.assertTrue(os.path.exists(path)) action = TEST_PACK + "." + "test_pause_resume_with_subworkflow" params = {"tempfile": path, "message": "foobar"} liveaction = LiveActionDB(action=action, parameters=params) liveaction, execution = action_service.request(liveaction) liveaction = LiveAction.get_by_id(str(liveaction.id)) # Wait until the liveaction is running. liveaction = self._wait_for_status( liveaction, action_constants.LIVEACTION_STATUS_RUNNING) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING) # Wait for subworkflow to register. execution = self._wait_for_children(execution) self.assertEqual(len(execution.children), 1) # Wait until the subworkflow is running. task1_exec = ActionExecution.get_by_id(execution.children[0]) task1_live = LiveAction.get_by_id(task1_exec.liveaction["id"]) task1_live = self._wait_for_status( task1_live, action_constants.LIVEACTION_STATUS_RUNNING) self.assertEqual(task1_live.status, action_constants.LIVEACTION_STATUS_RUNNING) # Request subworkflow to pause. task1_live, task1_exec = action_service.request_pause( task1_live, USERNAME) # Wait until the subworkflow is pausing. task1_exec = ActionExecution.get_by_id(execution.children[0]) task1_live = LiveAction.get_by_id(task1_exec.liveaction["id"]) task1_live = self._wait_for_status( task1_live, action_constants.LIVEACTION_STATUS_PAUSING) extra_info = str(task1_live) self.assertEqual(task1_live.status, action_constants.LIVEACTION_STATUS_PAUSING, extra_info) # Delete the temporary file that the action chain is waiting on. os.remove(path) self.assertFalse(os.path.exists(path)) # Wait until the subworkflow is paused. task1_exec = ActionExecution.get_by_id(execution.children[0]) task1_live = LiveAction.get_by_id(task1_exec.liveaction["id"]) task1_live = self._wait_for_status( task1_live, action_constants.LIVEACTION_STATUS_PAUSED) extra_info = str(task1_live) self.assertEqual(task1_live.status, action_constants.LIVEACTION_STATUS_PAUSED, extra_info) # Wait until the parent liveaction is paused. liveaction = self._wait_for_status( liveaction, action_constants.LIVEACTION_STATUS_PAUSED) extra_info = str(liveaction) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_PAUSED, extra_info) self.assertEqual(len(execution.children), 1) # Wait for non-blocking threads to complete. Ensure runner is not running. MockLiveActionPublisherNonBlocking.wait_all() # Check liveaction result. self.assertIn("tasks", liveaction.result) self.assertEqual(len(liveaction.result["tasks"]), 1) subworkflow = liveaction.result["tasks"][0] self.assertEqual(len(subworkflow["result"]["tasks"]), 1) self.assertEqual(subworkflow["state"], action_constants.LIVEACTION_STATUS_PAUSED) # Request subworkflow to resume. task1_live, task1_exec = action_service.request_resume( task1_live, USERNAME) # Wait until the subworkflow is paused. task1_exec = ActionExecution.get_by_id(execution.children[0]) task1_live = LiveAction.get_by_id(task1_exec.liveaction["id"]) task1_live = self._wait_for_status( task1_live, action_constants.LIVEACTION_STATUS_SUCCEEDED) self.assertEqual(task1_live.status, action_constants.LIVEACTION_STATUS_SUCCEEDED) # The parent workflow will stay paused. liveaction = self._wait_for_status( liveaction, action_constants.LIVEACTION_STATUS_PAUSED) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_PAUSED) # Wait for non-blocking threads to complete. MockLiveActionPublisherNonBlocking.wait_all() # Check liveaction result of the parent, which should stay the same # because only the subworkflow was resumed. self.assertIn("tasks", liveaction.result) self.assertEqual(len(liveaction.result["tasks"]), 1) subworkflow = liveaction.result["tasks"][0] self.assertEqual(len(subworkflow["result"]["tasks"]), 1) self.assertEqual(subworkflow["state"], action_constants.LIVEACTION_STATUS_PAUSED) # Request parent workflow to resume. liveaction, execution = action_service.request_resume( liveaction, USERNAME) # Wait until the liveaction is completed. liveaction = self._wait_for_status( liveaction, action_constants.LIVEACTION_STATUS_SUCCEEDED) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_SUCCEEDED) # Wait for non-blocking threads to complete. MockLiveActionPublisherNonBlocking.wait_all() # Check liveaction result. self.assertIn("tasks", liveaction.result) self.assertEqual(len(liveaction.result["tasks"]), 2) subworkflow = liveaction.result["tasks"][0] self.assertEqual(len(subworkflow["result"]["tasks"]), 2) self.assertEqual(subworkflow["state"], action_constants.LIVEACTION_STATUS_SUCCEEDED)
def assert_data_flow(self, data): wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'data-flow.yaml') wf_input = {'a1': data} lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'], parameters=wf_input) lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db) # Assert action execution is running. lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result) wf_ex_db = wf_db_access.WorkflowExecution.query( action_execution=str(ac_ex_db.id))[0] self.assertEqual(wf_ex_db.status, ac_const.LIVEACTION_STATUS_RUNNING) # Assert task1 is already completed. query_filters = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task1' } tk1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk1_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(tk1_ex_db.id))[0] tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id( tk1_ac_ex_db.liveaction['id']) self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Manually handle action execution completion. wf_svc.handle_action_execution_completion(tk1_ac_ex_db) # Assert task1 succeeded and workflow is still running. tk1_ex_db = wf_db_access.TaskExecution.get_by_id(tk1_ex_db.id) self.assertEqual(tk1_ex_db.status, wf_states.SUCCEEDED) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_states.RUNNING) # Assert task2 is already completed. query_filters = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task2' } tk2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk2_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(tk2_ex_db.id))[0] tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id( tk2_ac_ex_db.liveaction['id']) self.assertEqual(tk2_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Manually handle action execution completion. wf_svc.handle_action_execution_completion(tk2_ac_ex_db) # Assert task2 succeeded and workflow is still running. tk2_ex_db = wf_db_access.TaskExecution.get_by_id(tk2_ex_db.id) self.assertEqual(tk2_ex_db.status, wf_states.SUCCEEDED) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_states.RUNNING) # Assert task3 is already completed. query_filters = { 'workflow_execution': str(wf_ex_db.id), 'task_id': 'task3' } tk3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk3_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(tk3_ex_db.id))[0] tk3_lv_ac_db = lv_db_access.LiveAction.get_by_id( tk3_ac_ex_db.liveaction['id']) self.assertEqual(tk3_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Manually handle action execution completion. wf_svc.handle_action_execution_completion(tk3_ac_ex_db) # Assert task3 succeeded and workflow is completed. tk3_ex_db = wf_db_access.TaskExecution.get_by_id(tk3_ex_db.id) self.assertEqual(tk3_ex_db.status, wf_states.SUCCEEDED) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_states.SUCCEEDED) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id)) self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Check workflow output. expected_output = { 'a5': wf_input['a1'] if six.PY3 else wf_input['a1'].decode('utf-8'), 'b5': wf_input['a1'] if six.PY3 else wf_input['a1'].decode('utf-8') } self.assertDictEqual(wf_ex_db.output, expected_output) # Check liveaction and action execution result. expected_result = {'output': expected_output} self.assertDictEqual(lv_ac_db.result, expected_result) self.assertDictEqual(ac_ex_db.result, expected_result)
def test_chain_cancel_cascade_to_parent_workflow(self): # A temp file is created during test setup. Ensure the temp file exists. # The test action chain will stall until this file is deleted. This gives # the unit test a moment to run any test related logic. path = self.temp_file_path self.assertTrue(os.path.exists(path)) action = TEST_PACK + '.' + 'test_cancel_with_subworkflow' params = {'tempfile': path, 'message': 'foobar'} liveaction = LiveActionDB(action=action, parameters=params) liveaction, execution = action_service.request(liveaction) liveaction = LiveAction.get_by_id(str(liveaction.id)) # Wait until the liveaction is running. liveaction = self._wait_for_status( liveaction, action_constants.LIVEACTION_STATUS_RUNNING) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING) # Wait for subworkflow to register. execution = self._wait_for_children(execution) self.assertEqual(len(execution.children), 1) # Wait until the subworkflow is running. task1_exec = ActionExecution.get_by_id(execution.children[0]) task1_live = LiveAction.get_by_id(task1_exec.liveaction['id']) task1_live = self._wait_for_status( task1_live, action_constants.LIVEACTION_STATUS_RUNNING) self.assertEqual(task1_live.status, action_constants.LIVEACTION_STATUS_RUNNING) # Request subworkflow to cancel. task1_live, task1_exec = action_service.request_cancellation( task1_live, USERNAME) # Wait until the subworkflow is canceling. task1_exec = ActionExecution.get_by_id(execution.children[0]) task1_live = LiveAction.get_by_id(task1_exec.liveaction['id']) task1_live = self._wait_for_status( task1_live, action_constants.LIVEACTION_STATUS_CANCELING) self.assertEqual(task1_live.status, action_constants.LIVEACTION_STATUS_CANCELING) # Delete the temporary file that the action chain is waiting on. os.remove(path) self.assertFalse(os.path.exists(path)) # Wait until the subworkflow is canceled. task1_exec = ActionExecution.get_by_id(execution.children[0]) task1_live = LiveAction.get_by_id(task1_exec.liveaction['id']) task1_live = self._wait_for_status( task1_live, action_constants.LIVEACTION_STATUS_CANCELED) self.assertEqual(task1_live.status, action_constants.LIVEACTION_STATUS_CANCELED) # Wait until the parent liveaction is canceled. liveaction = self._wait_for_status( liveaction, action_constants.LIVEACTION_STATUS_CANCELED) self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_CANCELED) self.assertEqual(len(execution.children), 1) # Wait for non-blocking threads to complete. Ensure runner is not running. MockLiveActionPublisherNonBlocking.wait_all() # Check liveaction result. self.assertIn('tasks', liveaction.result) self.assertEqual(len(liveaction.result['tasks']), 1) subworkflow = liveaction.result['tasks'][0] self.assertEqual(len(subworkflow['result']['tasks']), 1) self.assertEqual(subworkflow['state'], action_constants.LIVEACTION_STATUS_CANCELED)
def assert_data_flow(self, data): wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "data-flow.yaml") wf_input = {"a1": data} lv_ac_db = lv_db_models.LiveActionDB( action=wf_meta["name"], parameters=wf_input ) lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db) # Assert action execution is running. lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual( lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result ) wf_ex_db = wf_db_access.WorkflowExecution.query( action_execution=str(ac_ex_db.id) )[0] self.assertEqual(wf_ex_db.status, ac_const.LIVEACTION_STATUS_RUNNING) # Assert task1 is already completed. query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "task1"} tk1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk1_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(tk1_ex_db.id) )[0] tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk1_ac_ex_db.liveaction["id"]) self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Manually handle action execution completion. wf_svc.handle_action_execution_completion(tk1_ac_ex_db) # Assert task1 succeeded and workflow is still running. tk1_ex_db = wf_db_access.TaskExecution.get_by_id(tk1_ex_db.id) self.assertEqual(tk1_ex_db.status, wf_statuses.SUCCEEDED) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING) # Assert task2 is already completed. query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "task2"} tk2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk2_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(tk2_ex_db.id) )[0] tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk2_ac_ex_db.liveaction["id"]) self.assertEqual(tk2_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Manually handle action execution completion. wf_svc.handle_action_execution_completion(tk2_ac_ex_db) # Assert task2 succeeded and workflow is still running. tk2_ex_db = wf_db_access.TaskExecution.get_by_id(tk2_ex_db.id) self.assertEqual(tk2_ex_db.status, wf_statuses.SUCCEEDED) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING) # Assert task3 is already completed. query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "task3"} tk3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk3_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(tk3_ex_db.id) )[0] tk3_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk3_ac_ex_db.liveaction["id"]) self.assertEqual(tk3_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Manually handle action execution completion. wf_svc.handle_action_execution_completion(tk3_ac_ex_db) # Assert task3 succeeded and workflow is still running. tk3_ex_db = wf_db_access.TaskExecution.get_by_id(tk3_ex_db.id) self.assertEqual(tk3_ex_db.status, wf_statuses.SUCCEEDED) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING) # Assert task4 is already completed. query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "task4"} tk4_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] tk4_ac_ex_db = ex_db_access.ActionExecution.query( task_execution=str(tk4_ex_db.id) )[0] tk4_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk4_ac_ex_db.liveaction["id"]) self.assertEqual(tk4_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Manually handle action execution completion. wf_svc.handle_action_execution_completion(tk4_ac_ex_db) # Assert task4 succeeded and workflow is completed. tk4_ex_db = wf_db_access.TaskExecution.get_by_id(tk4_ex_db.id) self.assertEqual(tk4_ex_db.status, wf_statuses.SUCCEEDED) wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) self.assertEqual(wf_ex_db.status, wf_statuses.SUCCEEDED) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id)) self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) # Check workflow output. expected_value = wf_input["a1"] if six.PY3 else wf_input["a1"].decode("utf-8") expected_output = { "a6": expected_value, "b6": expected_value, "a7": expected_value, "b7": expected_value, } self.assertDictEqual(wf_ex_db.output, expected_output) # Check liveaction and action execution result. expected_result = {"output": expected_output} self.assertDictEqual(lv_ac_db.result, expected_result) self.assertDictEqual(ac_ex_db.result, expected_result) # Assert expected output on conversion to API model ac_ex_api = ex_api_models.ActionExecutionAPI.from_model( ac_ex_db, mask_secrets=True ) expected_masked_output = { "a6": expected_value, "b6": expected_value, "a7": expected_value, "b7": secrets_const.MASKED_ATTRIBUTE_VALUE, } expected_masked_result = {"output": expected_masked_output} self.assertDictEqual(ac_ex_api.result, expected_masked_result)