def test_request_rerun_while_original_is_still_running(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, 'sequential.yaml') # Manually create the liveaction and action execution objects without publishing. lv_ac_db1 = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db1, ac_ex_db1 = action_service.create_request(lv_ac_db1) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db1) wf_ex_db = workflow_service.request(wf_def, ac_ex_db1, st2_ctx) wf_ex_db = self.prep_wf_ex(wf_ex_db) # Check workflow status. conductor, wf_ex_db = workflow_service.refresh_conductor( str(wf_ex_db.id)) self.assertEqual(conductor.get_workflow_status(), wf_statuses.RUNNING) self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING) # Manually create the liveaction and action execution objects for the rerun. lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2) # Request workflow execution rerun. st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context) st2_ctx['workflow_execution_id'] = str(wf_ex_db.id) rerun_options = {'ref': str(ac_ex_db1.id), 'tasks': ['task1']} expected_error = ('^Unable to rerun workflow execution \".*\" ' 'because it is not in a completed state.$') self.assertRaisesRegexp(wf_exc.WorkflowExecutionRerunException, expected_error, workflow_service.request_rerun, ac_ex_db2, st2_ctx, rerun_options)
def test_request_rerun_again_while_prev_rerun_is_still_running(self): # Create and return a failed workflow execution. wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db = self.prep_wf_ex_for_rerun() # Manually create the liveaction and action execution objects for the rerun. lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2) # Request workflow execution rerun. st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context) st2_ctx["workflow_execution_id"] = str(wf_ex_db.id) rerun_options = {"ref": str(ac_ex_db1.id), "tasks": ["task1"]} wf_ex_db = workflow_service.request_rerun(ac_ex_db2, st2_ctx, rerun_options) wf_ex_db = self.prep_wf_ex(wf_ex_db) # Check workflow status. conductor, wf_ex_db = workflow_service.refresh_conductor( str(wf_ex_db.id)) self.assertEqual(conductor.get_workflow_status(), wf_statuses.RUNNING) self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING) # Complete task1. self.run_workflow_step(wf_ex_db, "task1", 0) # Check workflow status and make sure it is still running. conductor, wf_ex_db = workflow_service.refresh_conductor( str(wf_ex_db.id)) self.assertEqual(conductor.get_workflow_status(), wf_statuses.RUNNING) self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING) lv_ac_db2 = lv_db_access.LiveAction.get_by_id(str(lv_ac_db2.id)) self.assertEqual(lv_ac_db2.status, action_constants.LIVEACTION_STATUS_RUNNING) ac_ex_db2 = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db2.id)) self.assertEqual(ac_ex_db2.status, action_constants.LIVEACTION_STATUS_RUNNING) # Manually create the liveaction and action execution objects for the rerun. lv_ac_db3 = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db3, ac_ex_db3 = action_service.create_request(lv_ac_db3) # Request workflow execution rerun again. st2_ctx = self.mock_st2_context(ac_ex_db3, ac_ex_db1.context) st2_ctx["workflow_execution_id"] = str(wf_ex_db.id) rerun_options = {"ref": str(ac_ex_db1.id), "tasks": ["task1"]} expected_error = ('^Unable to rerun workflow execution ".*" ' "because it is not in a completed state.$") self.assertRaisesRegexp( wf_exc.WorkflowExecutionRerunException, expected_error, workflow_service.request_rerun, ac_ex_db3, st2_ctx, rerun_options, )
def prep_wf_ex_for_rerun(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, 'sequential.yaml') # Manually create the liveaction and action execution objects without publishing. lv_ac_db1 = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db1, ac_ex_db1 = action_service.create_request(lv_ac_db1) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db1) wf_ex_db = workflow_service.request(wf_def, ac_ex_db1, st2_ctx) wf_ex_db = self.prep_wf_ex(wf_ex_db) # Fail workflow execution. self.run_workflow_step( wf_ex_db, 'task1', 0, expected_ac_ex_db_status=action_constants.LIVEACTION_STATUS_FAILED, expected_tk_ex_db_status=wf_statuses.FAILED) # Check workflow status. conductor, wf_ex_db = workflow_service.refresh_conductor( str(wf_ex_db.id)) self.assertEqual(conductor.get_workflow_status(), wf_statuses.FAILED) self.assertEqual(wf_ex_db.status, wf_statuses.FAILED) lv_ac_db1 = lv_db_access.LiveAction.get_by_id(str(lv_ac_db1.id)) self.assertEqual(lv_ac_db1.status, action_constants.LIVEACTION_STATUS_FAILED) ac_ex_db1 = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db1.id)) self.assertEqual(ac_ex_db1.status, action_constants.LIVEACTION_STATUS_FAILED) return wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db
def test_request_with_input(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, 'sequential.yaml') # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'], parameters={'who': 'stan'}) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) # Check workflow execution is saved to the database. wf_ex_dbs = wf_db_access.WorkflowExecution.query( action_execution=str(ac_ex_db.id)) self.assertEqual(len(wf_ex_dbs), 1) # Check required attributes. wf_ex_db = wf_ex_dbs[0] self.assertIsNotNone(wf_ex_db.id) self.assertGreater(wf_ex_db.rev, 0) self.assertEqual(wf_ex_db.action_execution, str(ac_ex_db.id)) self.assertEqual(wf_ex_db.status, wf_statuses.REQUESTED) # Check input and context. expected_input = {'who': 'stan'} self.assertDictEqual(wf_ex_db.input, expected_input)
def test_request_task_execution_bad_action(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, TEST_FIXTURES['workflows'][0]) # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = ac_svc.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = wf_svc.request(wf_def, ac_ex_db, st2_ctx) # Manually request task execution. task_id = 'task1' spec_module = specs_loader.get_spec_module(wf_ex_db.spec['catalog']) wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec) task_spec = wf_spec.tasks.get_task(task_id) task_ctx = {'foo': 'bar'} st2_ctx = {'execution_id': wf_ex_db.action_execution} task_spec.action = 'mock.foobar' self.assertRaises(ac_exc.InvalidActionReferencedException, wf_svc.request_task_execution, wf_ex_db, task_id, task_spec, task_ctx, st2_ctx)
def test_cancellation(self): # Manually create the liveaction and action execution objects without publishing. wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, TEST_FIXTURES['workflows'][0]) lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = ac_svc.create_request(lv_ac_db) # Request and pre-process the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = wf_svc.request(wf_def, ac_ex_db, st2_ctx) wf_ex_db = self.prep_wf_ex(wf_ex_db) # Manually request task executions. task_route = 0 self.run_workflow_step(wf_ex_db, 'task1', task_route) self.assert_task_running('task2', task_route) # Cancel the workflow when there is still active task(s). wf_ex_db = wf_svc.request_cancellation(ac_ex_db) conductor, wf_ex_db = wf_svc.refresh_conductor(str(wf_ex_db.id)) self.assertEqual(conductor.get_workflow_status(), wf_statuses.CANCELING) self.assertEqual(wf_ex_db.status, wf_statuses.CANCELING) # Manually complete the task and ensure workflow is canceled. self.run_workflow_step(wf_ex_db, 'task2', task_route) self.assert_task_not_started('task3', task_route) conductor, wf_ex_db = wf_svc.refresh_conductor(str(wf_ex_db.id)) self.assertEqual(conductor.get_workflow_status(), wf_statuses.CANCELED) self.assertEqual(wf_ex_db.status, wf_statuses.CANCELED)
def test_cancellation(self): # Manually create the liveaction and action execution objects without publishing. wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, TEST_FIXTURES["workflows"][0]) lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db, ac_ex_db = ac_svc.create_request(lv_ac_db) # Request and pre-process the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = wf_svc.request(wf_def, ac_ex_db, st2_ctx) wf_ex_db = self.prep_wf_ex(wf_ex_db) # Manually request task executions. task_route = 0 self.run_workflow_step(wf_ex_db, "task1", task_route) self.assert_task_running("task2", task_route) # Cancel the workflow when there is still active task(s). wf_ex_db = wf_svc.request_cancellation(ac_ex_db) conductor, wf_ex_db = wf_svc.refresh_conductor(str(wf_ex_db.id)) self.assertEqual(conductor.get_workflow_status(), wf_statuses.CANCELING) self.assertEqual(wf_ex_db.status, wf_statuses.CANCELING) # Manually complete the task and ensure workflow is canceled. self.run_workflow_step(wf_ex_db, "task2", task_route) self.assert_task_not_started("task3", task_route) conductor, wf_ex_db = wf_svc.refresh_conductor(str(wf_ex_db.id)) self.assertEqual(conductor.get_workflow_status(), wf_statuses.CANCELED) self.assertEqual(wf_ex_db.status, wf_statuses.CANCELED)
def test_request_rerun_with_conductor_status_not_abended(self): # Create and return a failed workflow execution. wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db = self.prep_wf_ex_for_rerun() # Manually set workflow conductor state to paused. wf_ex_db.state["status"] = wf_statuses.PAUSED wf_ex_db = wf_db_access.WorkflowExecution.add_or_update(wf_ex_db) # Manually create the liveaction and action execution objects for the rerun. lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2) # Request workflow execution rerun with bogus workflow_execution_id. st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context) st2_ctx["workflow_execution_id"] = str(wf_ex_db.id) rerun_options = {"ref": str(ac_ex_db1.id), "tasks": ["task1"]} expected_error = ( "Unable to rerun workflow because it is not in a completed state.") self.assertRaisesRegexp( wf_exc.WorkflowExecutionRerunException, expected_error, workflow_service.request_rerun, ac_ex_db2, st2_ctx, rerun_options, )
def test_request_rerun_with_conductor_status_not_resuming(self): # Create and return a failed workflow execution. wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db = self.prep_wf_ex_for_rerun() # Manually create the liveaction and action execution objects for the rerun. lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2) # Request workflow execution rerun with bogus workflow_execution_id. st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context) st2_ctx["workflow_execution_id"] = str(wf_ex_db.id) rerun_options = {"ref": str(ac_ex_db1.id), "tasks": ["task1"]} expected_error = ('^Unable to rerun workflow execution ".*" ' "due to an unknown cause.") with mock.patch.object( conducting.WorkflowConductor, "get_workflow_status", mock.MagicMock(return_value=wf_statuses.FAILED), ): self.assertRaisesRegexp( wf_exc.WorkflowExecutionRerunException, expected_error, workflow_service.request_rerun, ac_ex_db2, st2_ctx, rerun_options, )
def test_request_task_execution_bad_action(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, TEST_FIXTURES['workflows'][0]) # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = ac_svc.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) wf_ex_db = wf_svc.request(wf_def, ac_ex_db) # Manually request task execution. task_id = 'task1' spec_module = specs_loader.get_spec_module(wf_ex_db.spec['catalog']) wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec) task_spec = wf_spec.tasks.get_task(task_id) task_ctx = {'foo': 'bar'} st2_ctx = {'execution_id': wf_ex_db.action_execution} task_spec.action = 'mock.foobar' self.assertRaises( ac_exc.InvalidActionReferencedException, wf_svc.request_task_execution, wf_ex_db, task_id, task_spec, task_ctx, st2_ctx )
def _schedule_execution(self, liveaction): # Initialize execution context if it does not exist. if not hasattr(liveaction, 'context'): liveaction.context = dict() liveaction.context['user'] = get_requester() LOG.debug('User is: %s' % liveaction.context['user']) # Retrieve other st2 context from request header. if 'st2-context' in pecan.request.headers and pecan.request.headers['st2-context']: context = jsonify.try_loads(pecan.request.headers['st2-context']) if not isinstance(context, dict): raise ValueError('Unable to convert st2-context from the headers into JSON.') liveaction.context.update(context) # Schedule the action execution. liveaction_db = LiveActionAPI.to_model(liveaction) liveaction_db, actionexecution_db = action_service.create_request(liveaction_db) action_db = action_utils.get_action_by_ref(liveaction_db.action) runnertype_db = action_utils.get_runnertype_by_name(action_db.runner_type['name']) try: liveaction_db.parameters = param_utils.render_live_params( runnertype_db.runner_parameters, action_db.parameters, liveaction_db.parameters, liveaction_db.context) except ParamException as e: raise ValueValidationException(str(e)) liveaction_db = LiveAction.add_or_update(liveaction_db, publish=False) _, actionexecution_db = action_service.publish_request(liveaction_db, actionexecution_db) from_model_kwargs = self._get_from_model_kwargs_for_request(request=pecan.request) return ActionExecutionAPI.from_model(actionexecution_db, from_model_kwargs)
def test_request_with_input(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, 'sequential.yaml') # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'], parameters={'who': 'stan'}) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) # Check workflow execution is saved to the database. wf_ex_dbs = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id)) self.assertEqual(len(wf_ex_dbs), 1) # Check required attributes. wf_ex_db = wf_ex_dbs[0] self.assertIsNotNone(wf_ex_db.id) self.assertGreater(wf_ex_db.rev, 0) self.assertEqual(wf_ex_db.action_execution, str(ac_ex_db.id)) self.assertEqual(wf_ex_db.status, wf_statuses.REQUESTED) # Check input and context. expected_input = { 'who': 'stan' } self.assertDictEqual(wf_ex_db.input, expected_input)
def test_request_task_execution(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, 'sequential.yaml') # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) spec_module = specs_loader.get_spec_module(wf_ex_db.spec['catalog']) wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec) # Manually request task execution. task_id = 'task1' task_spec = wf_spec.tasks.get_task(task_id) task_ctx = {'foo': 'bar'} st2_ctx = {'execution_id': wf_ex_db.action_execution} task_ex_req = { 'id': task_id, 'spec': task_spec, 'ctx': task_ctx, 'actions': [{ 'action': 'core.echo', 'input': { 'message': 'Veni, vidi, vici.' } }] } workflow_service.request_task_execution(wf_ex_db, st2_ctx, task_ex_req) # Check task execution is saved to the database. task_ex_dbs = wf_db_access.TaskExecution.query( workflow_execution=str(wf_ex_db.id)) self.assertEqual(len(task_ex_dbs), 1) # Check required attributes. task_ex_db = task_ex_dbs[0] self.assertIsNotNone(task_ex_db.id) self.assertGreater(task_ex_db.rev, 0) self.assertEqual(task_ex_db.workflow_execution, str(wf_ex_db.id)) self.assertEqual(task_ex_db.status, wf_states.RUNNING) # Check action execution for the task query with task execution ID. ac_ex_dbs = ex_db_access.ActionExecution.query( task_execution=str(task_ex_db.id)) self.assertEqual(len(ac_ex_dbs), 1) # Check action execution for the task query with workflow execution ID. ac_ex_dbs = ex_db_access.ActionExecution.query( workflow_execution=str(wf_ex_db.id)) self.assertEqual(len(ac_ex_dbs), 1)
def _schedule_execution(self, liveaction, user=None, context_string=None, show_secrets=False): # Initialize execution context if it does not exist. if not hasattr(liveaction, 'context'): liveaction.context = dict() liveaction.context['user'] = user LOG.debug('User is: %s' % liveaction.context['user']) # Retrieve other st2 context from request header. if context_string: context = try_loads(context_string) if not isinstance(context, dict): raise ValueError( 'Unable to convert st2-context from the headers into JSON.' ) liveaction.context.update(context) # Schedule the action execution. liveaction_db = LiveActionAPI.to_model(liveaction) liveaction_db, actionexecution_db = action_service.create_request( liveaction_db) action_db = action_utils.get_action_by_ref(liveaction_db.action) runnertype_db = action_utils.get_runnertype_by_name( action_db.runner_type['name']) try: liveaction_db.parameters = param_utils.render_live_params( runnertype_db.runner_parameters, action_db.parameters, liveaction_db.parameters, liveaction_db.context) except ParamException: # By this point the execution is already in the DB therefore need to mark it failed. _, e, tb = sys.exc_info() action_service.update_status(liveaction=liveaction_db, new_status=LIVEACTION_STATUS_FAILED, result={ 'error': str(e), 'traceback': ''.join( traceback.format_tb(tb, 20)) }) # Might be a good idea to return the actual ActionExecution rather than bubble up # the execption. raise ValueValidationException(str(e)) liveaction_db = LiveAction.add_or_update(liveaction_db, publish=False) _, actionexecution_db = action_service.publish_request( liveaction_db, actionexecution_db) execution_api = ActionExecutionAPI.from_model( actionexecution_db, mask_secrets=(not show_secrets)) return Response(json=execution_api, status=http_client.CREATED)
def _invoke_action(self, action_db, runnertype_db, params, context=None, additional_contexts=None): """ Schedule an action execution. :type action_exec_spec: :class:`ActionExecutionSpecDB` :param params: Partially rendered parameters to execute the action with. :type params: ``dict`` :rtype: :class:`LiveActionDB` on successful scheduling, None otherwise. """ action_ref = action_db.ref runnertype_db = action_utils.get_runnertype_by_name( action_db.runner_type['name']) liveaction_db = LiveActionDB(action=action_ref, context=context, parameters=params) try: liveaction_db.parameters = self.get_resolved_parameters( runnertype_db=runnertype_db, action_db=action_db, params=liveaction_db.parameters, context=liveaction_db.context, additional_contexts=additional_contexts) except param_exc.ParamException as e: # We still need to create a request, so liveaction_db is assigned an ID liveaction_db, execution_db = action_service.create_request( liveaction_db) # By this point the execution is already in the DB therefore need to mark it failed. _, e, tb = sys.exc_info() action_service.update_status( liveaction=liveaction_db, new_status=action_constants.LIVEACTION_STATUS_FAILED, result={ 'error': str(e), 'traceback': ''.join(traceback.format_tb(tb, 20)) }) # Might be a good idea to return the actual ActionExecution rather than bubble up # the exception. raise validation_exc.ValueValidationException(str(e)) liveaction_db, execution_db = action_service.request(liveaction_db) return execution_db
def test_request_wf_def_with_unregistered_action(self): wf_meta = self.get_wf_fixture_meta_data( TEST_PACK_PATH, 'fail-inspection-action-db.yaml') # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = ac_svc.create_request(lv_ac_db) # Exception is expected on request of workflow execution. self.assertRaises(orquesta_exc.WorkflowInspectionError, wf_svc.request, self.get_wf_def(TEST_PACK_PATH, wf_meta), ac_ex_db, self.mock_st2_context(ac_ex_db))
def test_request_task_execution(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, 'sequential.yaml') # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) spec_module = specs_loader.get_spec_module(wf_ex_db.spec['catalog']) wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec) # Manually request task execution. task_route = 0 task_id = 'task1' task_spec = wf_spec.tasks.get_task(task_id) task_ctx = {'foo': 'bar'} st2_ctx = {'execution_id': wf_ex_db.action_execution} task_ex_req = { 'id': task_id, 'route': task_route, 'spec': task_spec, 'ctx': task_ctx, 'actions': [ {'action': 'core.echo', 'input': {'message': 'Veni, vidi, vici.'}} ] } workflow_service.request_task_execution(wf_ex_db, st2_ctx, task_ex_req) # Check task execution is saved to the database. task_ex_dbs = wf_db_access.TaskExecution.query(workflow_execution=str(wf_ex_db.id)) self.assertEqual(len(task_ex_dbs), 1) # Check required attributes. task_ex_db = task_ex_dbs[0] self.assertIsNotNone(task_ex_db.id) self.assertGreater(task_ex_db.rev, 0) self.assertEqual(task_ex_db.workflow_execution, str(wf_ex_db.id)) self.assertEqual(task_ex_db.status, wf_statuses.RUNNING) # Check action execution for the task query with task execution ID. ac_ex_dbs = ex_db_access.ActionExecution.query(task_execution=str(task_ex_db.id)) self.assertEqual(len(ac_ex_dbs), 1) # Check action execution for the task query with workflow execution ID. ac_ex_dbs = ex_db_access.ActionExecution.query(workflow_execution=str(wf_ex_db.id)) self.assertEqual(len(ac_ex_dbs), 1)
def test_invalid_json_request_skipvalidate(self): parameters = { "hosts": "127.0.0.1", "cmd": "uname -a", "arg_default_value": 123 } liveaction = LiveActionDB(action=ACTION_REF, parameters=parameters) # Validate that if skip validation that no exception raised (action, execution) = action_service.create_request(liveaction, validate_params=False) self.assertTrue(action) self.assertTrue(execution)
def test_request_wf_def_with_unregistered_action(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, 'fail-inspection-action-db.yaml') # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Exception is expected on request of workflow execution. self.assertRaises( orquesta_exc.WorkflowInspectionError, workflow_service.request, self.get_wf_def(TEST_PACK_PATH, wf_meta), ac_ex_db, self.mock_st2_context(ac_ex_db) )
def test_request_wf_def_with_bad_action_ref(self): wf_meta = self.get_wf_fixture_meta_data( TEST_PACK_PATH, "fail-inspection-action-ref.yaml") # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Exception is expected on request of workflow execution. self.assertRaises( orquesta_exc.WorkflowInspectionError, workflow_service.request, self.get_wf_def(TEST_PACK_PATH, wf_meta), ac_ex_db, self.mock_st2_context(ac_ex_db), )
def test_request_rerun_with_missing_workflow_execution_id(self): # Create and return a failed workflow execution. wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db = self.prep_wf_ex_for_rerun() # Manually create the liveaction and action execution objects for the rerun. lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2) # Request workflow execution rerun without workflow_execution_id. st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context) rerun_options = {'ref': str(ac_ex_db1.id), 'tasks': ['task1']} expected_error = ('Unable to rerun workflow execution because ' 'workflow_execution_id is not provided.') self.assertRaisesRegexp(wf_exc.WorkflowExecutionRerunException, expected_error, workflow_service.request_rerun, ac_ex_db2, st2_ctx, rerun_options)
def test_request_task_execution_bad_action(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml") # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) spec_module = specs_loader.get_spec_module(wf_ex_db.spec["catalog"]) wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec) # Manually request task execution. task_route = 0 task_id = "task1" task_spec = wf_spec.tasks.get_task(task_id) task_ctx = {"foo": "bar"} st2_ctx = {"execution_id": wf_ex_db.action_execution} task_ex_req = { "id": task_id, "route": task_route, "spec": task_spec, "ctx": task_ctx, "actions": [{ "action": "mock.echo", "input": { "message": "Veni, vidi, vici." } }], } self.assertRaises( action_exc.InvalidActionReferencedException, workflow_service.request_task_execution, wf_ex_db, st2_ctx, task_ex_req, )
def test_handle_action_execution_completion(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, TEST_FIXTURES['workflows'][0]) # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = ac_svc.create_request(lv_ac_db) # Request and pre-process the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) wf_ex_db = wf_svc.request(wf_def, ac_ex_db) wf_ex_db = self.prep_wf_ex(wf_ex_db) # Manually request task execution. self.run_workflow_step(wf_ex_db, 'task1', ctx={'foo': 'bar'}) # Check that a new task is executed. self.assert_task_running('task2')
def test_request_rerun_with_bad_task_name(self): # Create and return a failed workflow execution. wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db = self.prep_wf_ex_for_rerun() # Manually create the liveaction and action execution objects for the rerun. lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2) # Request workflow execution. st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context) st2_ctx['workflow_execution_id'] = str(wf_ex_db.id) rerun_options = {'ref': str(ac_ex_db1.id), 'tasks': ['task5354']} expected_error = '^Unable to rerun workflow because one or more tasks is not found: .*$' self.assertRaisesRegexp(wf_exc.WorkflowExecutionRerunException, expected_error, workflow_service.request_rerun, ac_ex_db2, st2_ctx, rerun_options)
def test_handle_action_execution_completion(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, 'sequential.yaml') # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request and pre-process the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) wf_ex_db = self.prep_wf_ex(wf_ex_db) # Manually request task execution. self.run_workflow_step(wf_ex_db, 'task1', 0, ctx={'foo': 'bar'}) # Check that a new task is executed. self.assert_task_running('task2', 0)
def test_request_rerun_with_nonexistent_workflow_execution(self): # Create and return a failed workflow execution. wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db = self.prep_wf_ex_for_rerun() # Manually create the liveaction and action execution objects for the rerun. lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2) # Request workflow execution rerun with bogus workflow_execution_id. st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context) st2_ctx['workflow_execution_id'] = uuid.uuid4().hex[0:24] rerun_options = {'ref': str(ac_ex_db1.id), 'tasks': ['task1']} expected_error = ('^Unable to rerun workflow execution \".*\" ' 'because it does not exist.$') self.assertRaisesRegexp(wf_exc.WorkflowExecutionRerunException, expected_error, workflow_service.request_rerun, ac_ex_db2, st2_ctx, rerun_options)
def test_request_task_execution(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, TEST_FIXTURES['workflows'][0]) # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = ac_svc.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = wf_svc.request(wf_def, ac_ex_db, st2_ctx) # Manually request task execution. task_id = 'task1' spec_module = specs_loader.get_spec_module(wf_ex_db.spec['catalog']) wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec) task_spec = wf_spec.tasks.get_task(task_id) task_ctx = {'foo': 'bar'} st2_ctx = {'execution_id': wf_ex_db.action_execution} wf_svc.request_task_execution(wf_ex_db, task_id, task_spec, task_ctx, st2_ctx) # Check task execution is saved to the database. task_ex_dbs = wf_db_access.TaskExecution.query( workflow_execution=str(wf_ex_db.id)) self.assertEqual(len(task_ex_dbs), 1) # Check required attributes. task_ex_db = task_ex_dbs[0] self.assertIsNotNone(task_ex_db.id) self.assertGreater(task_ex_db.rev, 0) self.assertEqual(task_ex_db.workflow_execution, str(wf_ex_db.id)) self.assertEqual(task_ex_db.status, wf_lib_states.RUNNING) # Check action execution for the task query with task execution ID. ac_ex_dbs = ex_db_access.ActionExecution.query( task_execution=str(task_ex_db.id)) self.assertEqual(len(ac_ex_dbs), 1) # Check action execution for the task query with workflow execution ID. ac_ex_dbs = ex_db_access.ActionExecution.query( workflow_execution=str(wf_ex_db.id)) self.assertEqual(len(ac_ex_dbs), 1)
def _invoke_action(self, action_db, runnertype_db, params, context=None, additional_contexts=None): """ Schedule an action execution. :type action_exec_spec: :class:`ActionExecutionSpecDB` :param params: Partially rendered parameters to execute the action with. :type params: ``dict`` :rtype: :class:`LiveActionDB` on successful scheduling, None otherwise. """ action_ref = action_db.ref runnertype_db = action_utils.get_runnertype_by_name(action_db.runner_type['name']) liveaction_db = LiveActionDB(action=action_ref, context=context, parameters=params) try: liveaction_db.parameters = self.get_resolved_parameters( runnertype_db=runnertype_db, action_db=action_db, params=liveaction_db.parameters, context=liveaction_db.context, additional_contexts=additional_contexts) except param_exc.ParamException as e: # We still need to create a request, so liveaction_db is assigned an ID liveaction_db, execution_db = action_service.create_request(liveaction_db) # By this point the execution is already in the DB therefore need to mark it failed. _, e, tb = sys.exc_info() action_service.update_status( liveaction=liveaction_db, new_status=action_constants.LIVEACTION_STATUS_FAILED, result={'error': six.text_type(e), 'traceback': ''.join(traceback.format_tb(tb, 20))}) # Might be a good idea to return the actual ActionExecution rather than bubble up # the exception. raise validation_exc.ValueValidationException(six.text_type(e)) liveaction_db, execution_db = action_service.request(liveaction_db) return execution_db
def _schedule_execution(self, liveaction, user=None): # Initialize execution context if it does not exist. if not hasattr(liveaction, 'context'): liveaction.context = dict() liveaction.context['user'] = user LOG.debug('User is: %s' % liveaction.context['user']) # Retrieve other st2 context from request header. if 'st2-context' in pecan.request.headers and pecan.request.headers['st2-context']: context = jsonify.try_loads(pecan.request.headers['st2-context']) if not isinstance(context, dict): raise ValueError('Unable to convert st2-context from the headers into JSON.') liveaction.context.update(context) # Schedule the action execution. liveaction_db = LiveActionAPI.to_model(liveaction) liveaction_db, actionexecution_db = action_service.create_request(liveaction_db) action_db = action_utils.get_action_by_ref(liveaction_db.action) runnertype_db = action_utils.get_runnertype_by_name(action_db.runner_type['name']) try: liveaction_db.parameters = param_utils.render_live_params( runnertype_db.runner_parameters, action_db.parameters, liveaction_db.parameters, liveaction_db.context) except ParamException: # By this point the execution is already in the DB therefore need to mark it failed. _, e, tb = sys.exc_info() action_service.update_status( liveaction=liveaction_db, new_status=LIVEACTION_STATUS_FAILED, result={'error': str(e), 'traceback': ''.join(traceback.format_tb(tb, 20))}) # Might be a good idea to return the actual ActionExecution rather than bubble up # the execption. raise ValueValidationException(str(e)) liveaction_db = LiveAction.add_or_update(liveaction_db, publish=False) _, actionexecution_db = action_service.publish_request(liveaction_db, actionexecution_db) from_model_kwargs = self._get_from_model_kwargs_for_request(request=pecan.request) return ActionExecutionAPI.from_model(actionexecution_db, from_model_kwargs)
def test_request_task_execution_bad_action(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, 'sequential.yaml') # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) spec_module = specs_loader.get_spec_module(wf_ex_db.spec['catalog']) wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec) # Manually request task execution. task_route = 0 task_id = 'task1' task_spec = wf_spec.tasks.get_task(task_id) task_ctx = {'foo': 'bar'} st2_ctx = {'execution_id': wf_ex_db.action_execution} task_ex_req = { 'id': task_id, 'route': task_route, 'spec': task_spec, 'ctx': task_ctx, 'actions': [{ 'action': 'mock.echo', 'input': { 'message': 'Veni, vidi, vici.' } }] } self.assertRaises(action_exc.InvalidActionReferencedException, workflow_service.request_task_execution, wf_ex_db, st2_ctx, task_ex_req)
def _schedule_execution(self, liveaction): # Initialize execution context if it does not exist. if not hasattr(liveaction, 'context'): liveaction.context = dict() liveaction.context['user'] = get_requester() LOG.debug('User is: %s' % liveaction.context['user']) # Retrieve other st2 context from request header. if 'st2-context' in pecan.request.headers and pecan.request.headers[ 'st2-context']: context = jsonify.try_loads(pecan.request.headers['st2-context']) if not isinstance(context, dict): raise ValueError( 'Unable to convert st2-context from the headers into JSON.' ) liveaction.context.update(context) # Schedule the action execution. liveaction_db = LiveActionAPI.to_model(liveaction) liveaction_db, actionexecution_db = action_service.create_request( liveaction_db) action_db = action_utils.get_action_by_ref(liveaction_db.action) runnertype_db = action_utils.get_runnertype_by_name( action_db.runner_type['name']) try: liveaction_db.parameters = param_utils.render_live_params( runnertype_db.runner_parameters, action_db.parameters, liveaction_db.parameters, liveaction_db.context) except ParamException as e: raise ValueValidationException(str(e)) liveaction_db = LiveAction.add_or_update(liveaction_db, publish=False) _, actionexecution_db = action_service.publish_request( liveaction_db, actionexecution_db) from_model_kwargs = self._get_from_model_kwargs_for_request( request=pecan.request) return ActionExecutionAPI.from_model(actionexecution_db, from_model_kwargs)
def test_retry_on_write_conflict(self): # Create a temporary file which will be used to signal # which task(s) to mock the DB write conflict. temp_file_path = TEMP_DIR_PATH + '/task4' if not os.path.exists(temp_file_path): with open(temp_file_path, 'w'): pass # Manually create the liveaction and action execution objects without publishing. wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, TEST_FIXTURES['workflows'][0]) lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = ac_svc.create_request(lv_ac_db) # Request and pre-process the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = wf_svc.request(wf_def, ac_ex_db, st2_ctx) wf_ex_db = self.prep_wf_ex(wf_ex_db) # Manually request task executions. task_route = 0 self.run_workflow_step(wf_ex_db, 'task1', task_route) self.assert_task_running('task2', task_route) self.assert_task_running('task4', task_route) self.run_workflow_step(wf_ex_db, 'task2', task_route) self.assert_task_running('task3', task_route) self.run_workflow_step(wf_ex_db, 'task4', task_route) self.assert_task_running('task5', task_route) self.run_workflow_step(wf_ex_db, 'task3', task_route) self.assert_task_not_started('task6', task_route) self.run_workflow_step(wf_ex_db, 'task5', task_route) self.assert_task_running('task6', task_route) self.run_workflow_step(wf_ex_db, 'task6', task_route) self.assert_task_running('task7', task_route) self.run_workflow_step(wf_ex_db, 'task7', task_route) self.assert_workflow_completed(str(wf_ex_db.id), status=wf_statuses.SUCCEEDED) # Ensure retry happened. self.assertFalse(os.path.exists(temp_file_path))
def test_request(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, TEST_FIXTURES['workflows'][0]) # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = ac_svc.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) wf_ex_db = wf_svc.request(wf_def, ac_ex_db) # Check workflow execution is saved to the database. wf_ex_dbs = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id)) self.assertEqual(len(wf_ex_dbs), 1) # Check required attributes. wf_ex_db = wf_ex_dbs[0] self.assertIsNotNone(wf_ex_db.id) self.assertGreater(wf_ex_db.rev, 0) self.assertEqual(wf_ex_db.action_execution, str(ac_ex_db.id)) self.assertEqual(wf_ex_db.status, wf_lib_states.REQUESTED)
def test_request_rerun_with_workflow_execution_not_abended(self): # Create and return a failed workflow execution. wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db = self.prep_wf_ex_for_rerun() # Manually set workflow execution status to paused. wf_ex_db.status = wf_statuses.PAUSED wf_ex_db = wf_db_access.WorkflowExecution.add_or_update(wf_ex_db) # Manually create the liveaction and action execution objects for the rerun. lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2) # Request workflow execution rerun with bogus workflow_execution_id. st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context) st2_ctx['workflow_execution_id'] = str(wf_ex_db.id) rerun_options = {'ref': str(ac_ex_db1.id), 'tasks': ['task1']} expected_error = ('^Unable to rerun workflow execution \".*\" ' 'because it is not in a completed state.$') self.assertRaisesRegexp(wf_exc.WorkflowExecutionRerunException, expected_error, workflow_service.request_rerun, ac_ex_db2, st2_ctx, rerun_options)
def test_request_task_execution_bad_action(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, 'sequential.yaml') # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) spec_module = specs_loader.get_spec_module(wf_ex_db.spec['catalog']) wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec) # Manually request task execution. task_route = 0 task_id = 'task1' task_spec = wf_spec.tasks.get_task(task_id) task_ctx = {'foo': 'bar'} st2_ctx = {'execution_id': wf_ex_db.action_execution} task_ex_req = { 'id': task_id, 'route': task_route, 'spec': task_spec, 'ctx': task_ctx, 'actions': [ {'action': 'mock.echo', 'input': {'message': 'Veni, vidi, vici.'}} ] } self.assertRaises( action_exc.InvalidActionReferencedException, workflow_service.request_task_execution, wf_ex_db, st2_ctx, task_ex_req )
def test_request_task_execution(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, TEST_FIXTURES['workflows'][0]) # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = ac_svc.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) wf_ex_db = wf_svc.request(wf_def, ac_ex_db) # Manually request task execution. task_id = 'task1' spec_module = specs_loader.get_spec_module(wf_ex_db.spec['catalog']) wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec) task_spec = wf_spec.tasks.get_task(task_id) task_ctx = {'foo': 'bar'} st2_ctx = {'execution_id': wf_ex_db.action_execution} wf_svc.request_task_execution(wf_ex_db, task_id, task_spec, task_ctx, st2_ctx) # Check task execution is saved to the database. task_ex_dbs = wf_db_access.TaskExecution.query(workflow_execution=str(wf_ex_db.id)) self.assertEqual(len(task_ex_dbs), 1) # Check required attributes. task_ex_db = task_ex_dbs[0] self.assertIsNotNone(task_ex_db.id) self.assertGreater(task_ex_db.rev, 0) self.assertEqual(task_ex_db.workflow_execution, str(wf_ex_db.id)) self.assertEqual(task_ex_db.status, wf_lib_states.RUNNING) # Check action execution for the task query with task execution ID. ac_ex_dbs = ex_db_access.ActionExecution.query(task_execution=str(task_ex_db.id)) self.assertEqual(len(ac_ex_dbs), 1) # Check action execution for the task query with workflow execution ID. ac_ex_dbs = ex_db_access.ActionExecution.query(workflow_execution=str(wf_ex_db.id)) self.assertEqual(len(ac_ex_dbs), 1)
def test_request(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, TEST_FIXTURES['workflows'][0]) # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = ac_svc.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) wf_ex_db = wf_svc.request(wf_def, ac_ex_db) # Check workflow execution is saved to the database. wf_ex_dbs = wf_db_access.WorkflowExecution.query( action_execution=str(ac_ex_db.id)) self.assertEqual(len(wf_ex_dbs), 1) # Check required attributes. wf_ex_db = wf_ex_dbs[0] self.assertIsNotNone(wf_ex_db.id) self.assertGreater(wf_ex_db.rev, 0) self.assertEqual(wf_ex_db.action_execution, str(ac_ex_db.id)) self.assertEqual(wf_ex_db.status, wf_lib_states.REQUESTED)
def test_request_rerun(self): # Create and return a failed workflow execution. wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db = self.prep_wf_ex_for_rerun() # Manually create the liveaction and action execution objects for the rerun. lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2) # Request workflow execution rerun. st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context) st2_ctx['workflow_execution_id'] = str(wf_ex_db.id) rerun_options = {'ref': str(ac_ex_db1.id), 'tasks': ['task1']} wf_ex_db = workflow_service.request_rerun(ac_ex_db2, st2_ctx, rerun_options) wf_ex_db = self.prep_wf_ex(wf_ex_db) # Check workflow status. conductor, wf_ex_db = workflow_service.refresh_conductor( str(wf_ex_db.id)) self.assertEqual(conductor.get_workflow_status(), wf_statuses.RUNNING) self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING) # Complete task1. self.run_workflow_step(wf_ex_db, 'task1', 0) # Check workflow status and make sure it is still running. conductor, wf_ex_db = workflow_service.refresh_conductor( str(wf_ex_db.id)) self.assertEqual(conductor.get_workflow_status(), wf_statuses.RUNNING) self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING) lv_ac_db2 = lv_db_access.LiveAction.get_by_id(str(lv_ac_db2.id)) self.assertEqual(lv_ac_db2.status, action_constants.LIVEACTION_STATUS_RUNNING) ac_ex_db2 = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db2.id)) self.assertEqual(ac_ex_db2.status, action_constants.LIVEACTION_STATUS_RUNNING)
def _schedule_execution(self, liveaction, requester_user, user=None, context_string=None, show_secrets=False, pack=None): # Initialize execution context if it does not exist. if not hasattr(liveaction, 'context'): liveaction.context = dict() liveaction.context['user'] = user liveaction.context['pack'] = pack LOG.debug('User is: %s' % liveaction.context['user']) # Retrieve other st2 context from request header. if context_string: context = try_loads(context_string) if not isinstance(context, dict): raise ValueError( 'Unable to convert st2-context from the headers into JSON.' ) liveaction.context.update(context) # Include RBAC context (if RBAC is available and enabled) if cfg.CONF.rbac.enable: user_db = UserDB(name=user) role_dbs = rbac_service.get_roles_for_user(user_db=user_db, include_remote=True) roles = [role_db.name for role_db in role_dbs] liveaction.context['rbac'] = {'user': user, 'roles': roles} # Schedule the action execution. liveaction_db = LiveActionAPI.to_model(liveaction) action_db = action_utils.get_action_by_ref(liveaction_db.action) runnertype_db = action_utils.get_runnertype_by_name( action_db.runner_type['name']) try: liveaction_db.parameters = param_utils.render_live_params( runnertype_db.runner_parameters, action_db.parameters, liveaction_db.parameters, liveaction_db.context) except param_exc.ParamException: # We still need to create a request, so liveaction_db is assigned an ID liveaction_db, actionexecution_db = action_service.create_request( liveaction_db) # By this point the execution is already in the DB therefore need to mark it failed. _, e, tb = sys.exc_info() action_service.update_status( liveaction=liveaction_db, new_status=action_constants.LIVEACTION_STATUS_FAILED, result={ 'error': str(e), 'traceback': ''.join(traceback.format_tb(tb, 20)) }) # Might be a good idea to return the actual ActionExecution rather than bubble up # the exception. raise validation_exc.ValueValidationException(str(e)) # The request should be created after the above call to render_live_params # so any templates in live parameters have a chance to render. liveaction_db, actionexecution_db = action_service.create_request( liveaction_db) liveaction_db = LiveAction.add_or_update(liveaction_db, publish=False) _, actionexecution_db = action_service.publish_request( liveaction_db, actionexecution_db) mask_secrets = self._get_mask_secrets(requester_user, show_secrets=show_secrets) execution_api = ActionExecutionAPI.from_model( actionexecution_db, mask_secrets=mask_secrets) return Response(json=execution_api, status=http_client.CREATED)
def _schedule_execution(self, liveaction, requester_user, action_db, user=None, context_string=None, show_secrets=False): # Initialize execution context if it does not exist. if not hasattr(liveaction, 'context'): liveaction.context = dict() liveaction.context['user'] = user liveaction.context['pack'] = action_db.pack LOG.debug('User is: %s' % liveaction.context['user']) # Retrieve other st2 context from request header. if context_string: context = try_loads(context_string) if not isinstance(context, dict): raise ValueError('Unable to convert st2-context from the headers into JSON.') liveaction.context.update(context) # Include RBAC context (if RBAC is available and enabled) if cfg.CONF.rbac.enable: user_db = UserDB(name=user) role_dbs = rbac_service.get_roles_for_user(user_db=user_db, include_remote=True) roles = [role_db.name for role_db in role_dbs] liveaction.context['rbac'] = { 'user': user, 'roles': roles } # Schedule the action execution. liveaction_db = LiveActionAPI.to_model(liveaction) runnertype_db = action_utils.get_runnertype_by_name(action_db.runner_type['name']) try: liveaction_db.parameters = param_utils.render_live_params( runnertype_db.runner_parameters, action_db.parameters, liveaction_db.parameters, liveaction_db.context) except param_exc.ParamException: # We still need to create a request, so liveaction_db is assigned an ID liveaction_db, actionexecution_db = action_service.create_request( liveaction=liveaction_db, action_db=action_db, runnertype_db=runnertype_db) # By this point the execution is already in the DB therefore need to mark it failed. _, e, tb = sys.exc_info() action_service.update_status( liveaction=liveaction_db, new_status=action_constants.LIVEACTION_STATUS_FAILED, result={'error': six.text_type(e), 'traceback': ''.join(traceback.format_tb(tb, 20))}) # Might be a good idea to return the actual ActionExecution rather than bubble up # the exception. raise validation_exc.ValueValidationException(six.text_type(e)) # The request should be created after the above call to render_live_params # so any templates in live parameters have a chance to render. liveaction_db, actionexecution_db = action_service.create_request(liveaction=liveaction_db, action_db=action_db, runnertype_db=runnertype_db) _, actionexecution_db = action_service.publish_request(liveaction_db, actionexecution_db) mask_secrets = self._get_mask_secrets(requester_user, show_secrets=show_secrets) execution_api = ActionExecutionAPI.from_model(actionexecution_db, mask_secrets=mask_secrets) return Response(json=execution_api, status=http_client.CREATED)
def test_request_action_execution_render(self): # Manually create ConfigDB output = 'Testing' value = { "config_item_one": output } config_db = pk_db_models.ConfigDB(pack=PACK_7, values=value) config = pk_db_access.Config.add_or_update(config_db) self.assertEqual(len(config), 3) wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, 'render_config_context.yaml') # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) spec_module = specs_loader.get_spec_module(wf_ex_db.spec['catalog']) wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec) # Pass down appropriate st2 context to the task and action execution(s). root_st2_ctx = wf_ex_db.context.get('st2', {}) st2_ctx = { 'execution_id': wf_ex_db.action_execution, 'user': root_st2_ctx.get('user'), 'pack': root_st2_ctx.get('pack') } # Manually request task execution. task_route = 0 task_id = 'task1' task_spec = wf_spec.tasks.get_task(task_id) task_ctx = {'foo': 'bar'} task_ex_req = { 'id': task_id, 'route': task_route, 'spec': task_spec, 'ctx': task_ctx, 'actions': [ {'action': 'dummy_pack_7.render_config_context', 'input': None} ] } workflow_service.request_task_execution(wf_ex_db, st2_ctx, task_ex_req) # Check task execution is saved to the database. task_ex_dbs = wf_db_access.TaskExecution.query(workflow_execution=str(wf_ex_db.id)) self.assertEqual(len(task_ex_dbs), 1) workflow_service.request_task_execution(wf_ex_db, st2_ctx, task_ex_req) # Manually request action execution task_ex_db = task_ex_dbs[0] action_ex_db = workflow_service.request_action_execution(wf_ex_db, task_ex_db, st2_ctx, task_ex_req['actions'][0]) # Check required attributes. self.assertIsNotNone(str(action_ex_db.id)) self.assertEqual(task_ex_db.workflow_execution, str(wf_ex_db.id)) expected_parameters = {'value1': output} self.assertEqual(expected_parameters, action_ex_db.parameters)
def test_request_action_execution_render(self): # Manually create ConfigDB output = 'Testing' value = {"config_item_one": output} config_db = pk_db_models.ConfigDB(pack=PACK_7, values=value) config = pk_db_access.Config.add_or_update(config_db) self.assertEqual(len(config), 3) wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, 'render_config_context.yaml') # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name']) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) spec_module = specs_loader.get_spec_module(wf_ex_db.spec['catalog']) wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec) # Pass down appropriate st2 context to the task and action execution(s). root_st2_ctx = wf_ex_db.context.get('st2', {}) st2_ctx = { 'execution_id': wf_ex_db.action_execution, 'user': root_st2_ctx.get('user'), 'pack': root_st2_ctx.get('pack') } # Manually request task execution. task_route = 0 task_id = 'task1' task_spec = wf_spec.tasks.get_task(task_id) task_ctx = {'foo': 'bar'} task_ex_req = { 'id': task_id, 'route': task_route, 'spec': task_spec, 'ctx': task_ctx, 'actions': [{ 'action': 'dummy_pack_7.render_config_context', 'input': None }] } workflow_service.request_task_execution(wf_ex_db, st2_ctx, task_ex_req) # Check task execution is saved to the database. task_ex_dbs = wf_db_access.TaskExecution.query( workflow_execution=str(wf_ex_db.id)) self.assertEqual(len(task_ex_dbs), 1) workflow_service.request_task_execution(wf_ex_db, st2_ctx, task_ex_req) # Manually request action execution task_ex_db = task_ex_dbs[0] action_ex_db = workflow_service.request_action_execution( wf_ex_db, task_ex_db, st2_ctx, task_ex_req['actions'][0]) # Check required attributes. self.assertIsNotNone(str(action_ex_db.id)) self.assertEqual(task_ex_db.workflow_execution, str(wf_ex_db.id)) expected_parameters = {'value1': output} self.assertEqual(expected_parameters, action_ex_db.parameters)
def test_request_task_execution(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml") # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) spec_module = specs_loader.get_spec_module(wf_ex_db.spec["catalog"]) wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec) # Manually request task execution. task_route = 0 task_id = "task1" task_spec = wf_spec.tasks.get_task(task_id) task_ctx = {"foo": "bar"} st2_ctx = {"execution_id": wf_ex_db.action_execution} task_ex_req = { "id": task_id, "route": task_route, "spec": task_spec, "ctx": task_ctx, "actions": [{ "action": "core.echo", "input": { "message": "Veni, vidi, vici." } }], } workflow_service.request_task_execution(wf_ex_db, st2_ctx, task_ex_req) # Check task execution is saved to the database. task_ex_dbs = wf_db_access.TaskExecution.query( workflow_execution=str(wf_ex_db.id)) self.assertEqual(len(task_ex_dbs), 1) # Check required attributes. task_ex_db = task_ex_dbs[0] self.assertIsNotNone(task_ex_db.id) self.assertGreater(task_ex_db.rev, 0) self.assertEqual(task_ex_db.workflow_execution, str(wf_ex_db.id)) self.assertEqual(task_ex_db.status, wf_statuses.RUNNING) # Check action execution for the task query with task execution ID. ac_ex_dbs = ex_db_access.ActionExecution.query( task_execution=str(task_ex_db.id)) self.assertEqual(len(ac_ex_dbs), 1) # Check action execution for the task query with workflow execution ID. ac_ex_dbs = ex_db_access.ActionExecution.query( workflow_execution=str(wf_ex_db.id)) self.assertEqual(len(ac_ex_dbs), 1)
def test_request_action_execution_render(self): # Manually create ConfigDB output = "Testing" value = {"config_item_one": output} config_db = pk_db_models.ConfigDB(pack=PACK_7, values=value) config = pk_db_access.Config.add_or_update(config_db) self.assertEqual(len(config), 3) wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, "render_config_context.yaml") # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) spec_module = specs_loader.get_spec_module(wf_ex_db.spec["catalog"]) wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec) # Pass down appropriate st2 context to the task and action execution(s). root_st2_ctx = wf_ex_db.context.get("st2", {}) st2_ctx = { "execution_id": wf_ex_db.action_execution, "user": root_st2_ctx.get("user"), "pack": root_st2_ctx.get("pack"), } # Manually request task execution. task_route = 0 task_id = "task1" task_spec = wf_spec.tasks.get_task(task_id) task_ctx = {"foo": "bar"} task_ex_req = { "id": task_id, "route": task_route, "spec": task_spec, "ctx": task_ctx, "actions": [{ "action": "dummy_pack_7.render_config_context", "input": None }], } workflow_service.request_task_execution(wf_ex_db, st2_ctx, task_ex_req) # Check task execution is saved to the database. task_ex_dbs = wf_db_access.TaskExecution.query( workflow_execution=str(wf_ex_db.id)) self.assertEqual(len(task_ex_dbs), 1) workflow_service.request_task_execution(wf_ex_db, st2_ctx, task_ex_req) # Manually request action execution task_ex_db = task_ex_dbs[0] action_ex_db = workflow_service.request_action_execution( wf_ex_db, task_ex_db, st2_ctx, task_ex_req["actions"][0]) # Check required attributes. self.assertIsNotNone(str(action_ex_db.id)) self.assertEqual(task_ex_db.workflow_execution, str(wf_ex_db.id)) expected_parameters = {"value1": output} self.assertEqual(expected_parameters, action_ex_db.parameters)