def test_get_trigger_in_correct_orders(self): t1_name = 'trigger-%s' % utils.generate_unicode_uuid() t_s.create_cron_trigger(t1_name, self.wf.name, {}, pattern='*/5 * * * *', start_time=datetime.datetime(2010, 8, 25)) t2_name = 'trigger-%s' % utils.generate_unicode_uuid() t_s.create_cron_trigger(t2_name, self.wf.name, {}, pattern='*/1 * * * *', start_time=datetime.datetime(2010, 8, 22)) t3_name = 'trigger-%s' % utils.generate_unicode_uuid() t_s.create_cron_trigger(t3_name, self.wf.name, {}, pattern='*/2 * * * *', start_time=datetime.datetime(2010, 9, 21)) t4_name = 'trigger-%s' % utils.generate_unicode_uuid() t_s.create_cron_trigger(t4_name, self.wf.name, {}, pattern='*/3 * * * *', start_time=datetime.datetime.utcnow() + datetime.timedelta(0, 50)) trigger_names = [t.name for t in t_s.get_next_cron_triggers()] self.assertEqual([t2_name, t1_name, t3_name], trigger_names)
def test_trigger_create_the_same_first_time_or_count(self): t_s.create_cron_trigger('trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '*/5 * * * *', "4242-12-25 13:37", 2, datetime.datetime(2010, 8, 25)) t_s.create_cron_trigger('trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '*/5 * * * *', "4242-12-25 13:37", 4, datetime.datetime(2010, 8, 25)) t_s.create_cron_trigger('trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '*/5 * * * *', "5353-12-25 13:37", 2, datetime.datetime(2010, 8, 25)) # Creations above should be ok. # But creation with the same count and first time # simultaneously leads to error. self.assertRaises(exc.DBDuplicateEntryException, t_s.create_cron_trigger, 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '*/5 * * * *', "4242-12-25 13:37", 2, None)
def test_get_trigger_in_correct_orders(self): t1_name = 'trigger-%s' % utils.generate_unicode_uuid() t_s.create_cron_trigger( t1_name, self.wf.name, {}, {}, '*/5 * * * *', None, None, datetime.datetime(2010, 8, 25) ) t2_name = 'trigger-%s' % utils.generate_unicode_uuid() t_s.create_cron_trigger( t2_name, self.wf.name, {}, {}, '*/1 * * * *', None, None, datetime.datetime(2010, 8, 22) ) t3_name = 'trigger-%s' % utils.generate_unicode_uuid() t_s.create_cron_trigger( t3_name, self.wf.name, {}, {}, '*/2 * * * *', None, None, datetime.datetime(2010, 9, 21) ) t4_name = 'trigger-%s' % utils.generate_unicode_uuid() t_s.create_cron_trigger( t4_name, self.wf.name, {}, {}, '*/3 * * * *', None, None, datetime.datetime.now() + datetime.timedelta(0, 50) ) trigger_names = [t.name for t in t_s.get_next_cron_triggers()] self.assertEqual([t2_name, t1_name, t3_name], trigger_names)
def test_remove_listener_correlation_id_not_in_results(self): correlation_id = utils.generate_unicode_uuid() self.listener.add_listener(correlation_id) self.assertEqual(type(self.listener._results.get(correlation_id)), moves.queue.Queue) self.listener.remove_listener(utils.generate_unicode_uuid()) self.assertEqual(type(self.listener._results.get(correlation_id)), moves.queue.Queue)
def test_trigger_create_the_same_first_time_or_count(self): t_s.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '*/5 * * * *', "4242-12-25 13:37", 2, datetime.datetime(2010, 8, 25) ) t_s.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '*/5 * * * *', "4242-12-25 13:37", 4, datetime.datetime(2010, 8, 25) ) t_s.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '*/5 * * * *', "5353-12-25 13:37", 2, datetime.datetime(2010, 8, 25) ) # Creations above should be ok. # But creation with the same count and first time # simultaneously leads to error. self.assertRaises( exc.DBDuplicateEntryError, t_s.create_cron_trigger, 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '*/5 * * * *', "4242-12-25 13:37", 2, None )
def _create_action_execution(self, input_dict, runtime_ctx, desc='', action_ex_id=None): action_ex_id = action_ex_id or utils.generate_unicode_uuid() values = { 'id': action_ex_id, 'name': self.action_def.name, 'spec': self.action_def.spec, 'state': states.RUNNING, 'input': input_dict, 'runtime_context': runtime_ctx, 'description': desc } if self.task_ex: values.update({ 'task_execution_id': self.task_ex.id, 'workflow_name': self.task_ex.workflow_name, 'workflow_id': self.task_ex.workflow_id, 'project_id': self.task_ex.project_id, }) else: values.update({ 'project_id': security.get_project_id(), }) self.action_ex = db_api.create_action_execution(values) if self.task_ex: # Add to collection explicitly so that it's in a proper # state within the current session. self.task_ex.executions.append(self.action_ex)
def schedule(self, input_dict, target, index=0, desc=''): assert not self.action_ex # Assign the action execution ID here to minimize database calls. # Otherwise, the input property of the action execution DB object needs # to be updated with the action execution ID after the action execution # DB object is created. action_ex_id = utils.generate_unicode_uuid() self._insert_action_context(action_ex_id, input_dict) self._create_action_execution( self._prepare_input(input_dict), self._prepare_runtime_context(index), desc=desc, action_ex_id=action_ex_id ) scheduler.schedule_call( None, _RUN_EXISTING_ACTION_PATH, 0, action_ex_id=self.action_ex.id, target=target )
def _create_task_execution(self, state=states.RUNNING, state_info=None): task_id = utils.generate_unicode_uuid() task_name = self.task_spec.get_name() task_type = self.task_spec.get_type() data_flow.add_current_task_to_context(self.ctx, task_id, task_name) values = { 'id': task_id, 'name': task_name, 'workflow_execution_id': self.wf_ex.id, 'workflow_name': self.wf_ex.workflow_name, 'workflow_namespace': self.wf_ex.workflow_namespace, 'workflow_id': self.wf_ex.workflow_id, 'state': state, 'state_info': state_info, 'spec': self.task_spec.to_dict(), 'unique_key': self.unique_key, 'in_context': self.ctx, 'published': {}, 'runtime_context': {}, 'project_id': self.wf_ex.project_id, 'type': task_type } if self.triggered_by: values['runtime_context']['triggered_by'] = self.triggered_by self.task_ex = db_api.create_task_execution(values) # Add to collection explicitly so that it's in a proper # state within the current session. self.wf_ex.task_executions.append(self.task_ex) self.created = True
def run(self, input_dict, target, index=0, desc='', save=True): assert not self.action_ex input_dict = self._prepare_input(input_dict) runtime_ctx = self._prepare_runtime_context(index) # Assign the action execution ID here to minimize database calls. # Otherwise, the input property of the action execution DB object needs # to be updated with the action execution ID after the action execution # DB object is created. action_ex_id = utils.generate_unicode_uuid() self._insert_action_context(action_ex_id, input_dict, save=save) if save: self._create_action_execution( input_dict, runtime_ctx, desc=desc, action_ex_id=action_ex_id ) result = rpc.get_executor_client().run_action( self.action_ex.id if self.action_ex else None, self.action_def.action_class, self.action_def.attributes or {}, input_dict, target, async=False ) return self._prepare_output(result)
def test_workflow_without_auth(self): cfg.CONF.set_default('auth_enable', False, group='pecan') cfg.CONF.set_default('dtw_scheduler_last_minute', False, group='engine') wf = workflows.create_workflows(WORKFLOW_LIST)[0] d = dtw.create_delay_tolerant_workload( 'dtw-%s' % utils.generate_unicode_uuid(), wf.name, {}, {}, (datetime.datetime.now() + datetime.timedelta(hours=2)) .strftime('%Y-%m-%dT%H:%M:%S'), None, None ) unscheduled_workload = dtw.get_unscheduled_delay_tolerant_workload() self.assertEqual(1, len(unscheduled_workload)) self.assertEqual(d.deadline, unscheduled_workload[0].deadline) periodic.MistralPeriodicTasks( cfg.CONF).process_delay_tolerant_workload(None) unscheduled_workload = dtw.get_unscheduled_delay_tolerant_workload() self.assertEqual(0, len(unscheduled_workload)) executed_workload = db_api.get_delay_tolerant_workload(d.name) self.assertEqual(executed_workload.executed, True)
def _create_action_execution(self, input_dict, runtime_ctx, desc='', action_ex_id=None): action_ex_id = action_ex_id or utils.generate_unicode_uuid() values = { 'id': action_ex_id, 'name': self.action_def.name, 'spec': self.action_def.spec, 'state': states.RUNNING, 'input': input_dict, 'runtime_context': runtime_ctx, 'description': desc } if self.task_ex: values.update({ 'task_execution_id': self.task_ex.id, 'workflow_name': self.task_ex.workflow_name, 'workflow_id': self.task_ex.workflow_id, 'project_id': self.task_ex.project_id, }) else: values.update({ 'project_id': security.get_project_id(), }) self.action_ex = db_api.create_action_execution(values) if self.task_ex: # Add to collection explicitly so that it's in a proper # state within the current session. self.task_ex.action_executions.append(self.action_ex)
def schedule(self, input_dict, target, index=0, desc='', safe_rerun=False, timeout=None): assert not self.action_ex # Assign the action execution ID here to minimize database calls. # Otherwise, the input property of the action execution DB object needs # to be updated with the action execution ID after the action execution # DB object is created. action_ex_id = utils.generate_unicode_uuid() self._create_action_execution(self._prepare_input(input_dict), self._prepare_runtime_context( index, safe_rerun), self.is_sync(input_dict), desc=desc, action_ex_id=action_ex_id) execution_context = self._prepare_execution_context() action_queue.schedule_run_action(self.action_ex, self.action_def, target, execution_context, timeout=timeout)
def schedule(self, input_dict, target, index=0, desc='', safe_rerun=False): assert not self.action_ex # Assign the action execution ID here to minimize database calls. # Otherwise, the input property of the action execution DB object needs # to be updated with the action execution ID after the action execution # DB object is created. action_ex_id = utils.generate_unicode_uuid() self._insert_action_context(action_ex_id, input_dict) self._create_action_execution( self._prepare_input(input_dict), self._prepare_runtime_context(index, safe_rerun), desc=desc, action_ex_id=action_ex_id ) scheduler.schedule_call( None, _RUN_EXISTING_ACTION_PATH, 0, action_ex_id=self.action_ex.id, target=target )
def run(self, input_dict, target, index=0, desc='', save=True, safe_rerun=False): assert not self.action_ex input_dict = self._prepare_input(input_dict) runtime_ctx = self._prepare_runtime_context(index, safe_rerun) # Assign the action execution ID here to minimize database calls. # Otherwise, the input property of the action execution DB object needs # to be updated with the action execution ID after the action execution # DB object is created. action_ex_id = utils.generate_unicode_uuid() self._insert_action_context(action_ex_id, input_dict, save=save) if save: self._create_action_execution( input_dict, runtime_ctx, desc=desc, action_ex_id=action_ex_id ) result = rpc.get_executor_client().run_action( self.action_ex.id if self.action_ex else None, self.action_def.action_class, self.action_def.attributes or {}, input_dict, target, async=False, safe_rerun=safe_rerun ) return self._prepare_output(result)
def test__on_response_message_ack_ok(self): correlation_id = utils.generate_unicode_uuid() message = mock.MagicMock() message.properties = dict() message.properties['type'] = None message.properties['correlation_id'] = correlation_id response = 'response' kombu_listener.LOG = mock.MagicMock() self.listener.add_listener(correlation_id) self.listener.on_message(response, message) self.assertEqual(kombu_listener.LOG.debug.call_count, 2) self.assertEqual(kombu_listener.LOG.exception.call_count, 0) result = self.listener.get_result(correlation_id, 5) self.assertDictEqual( result, { kombu_base.TYPE: None, kombu_base.RESULT: response } )
def test_trigger_create_wrong_workflow_input(self): wf_with_input = """--- version: '2.0' some_wf: input: - some_var tasks: some_task: action: std.echo output=<% $.some_var %> """ workflows.create_workflows(wf_with_input) exception = self.assertRaises( exc.InputException, t_s.create_cron_trigger, 'trigger-%s' % utils.generate_unicode_uuid(), 'some_wf', {}, {}, '*/5 * * * *', None, None, datetime.datetime(2010, 8, 25) ) self.assertIn('Invalid input', exception.message) self.assertIn('some_wf', exception.message)
def test_trigger_create_wrong_workflow_input(self): wf_with_input = """--- version: '2.0' some_wf: input: - some_var tasks: some_task: action: std.echo output=<% $.some_var %> """ workflows.create_workflows(wf_with_input) exception = self.assertRaises( exc.InputException, t_s.create_cron_trigger, 'trigger-%s' % utils.generate_unicode_uuid(), 'some_wf', {}, {}, '*/5 * * * *', None, None, datetime.datetime(2010, 8, 25) ) self.assertIn('Invalid input', exception.message) self.assertIn('some_wf', exception.message)
def test__on_response_message_ack_ok(self): correlation_id = utils.generate_unicode_uuid() message = mock.MagicMock() message.properties = dict() message.properties['type'] = None message.properties['correlation_id'] = correlation_id response = 'response' kombu_listener.LOG = mock.MagicMock() self.listener.add_listener(correlation_id) self.listener.on_message(response, message) self.assertEqual(kombu_listener.LOG.debug.call_count, 2) self.assertEqual(kombu_listener.LOG.exception.call_count, 0) result = self.listener.get_result(correlation_id, 5) self.assertDictEqual( result, { kombu_base.TYPE: None, kombu_base.RESULT: response } )
def test_create_delete_trust_in_trigger(self, create_ctx, delete_trust): create_ctx.return_value = self.ctx cfg.CONF.set_default('auth_enable', True, group='pecan') trigger_thread = periodic.setup() self.addCleanup(trigger_thread.stop) self.addCleanup( cfg.CONF.set_default, 'auth_enable', False, group='pecan' ) t_s.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '* * * * * *', None, 1, datetime.datetime(2010, 8, 25) ) self._await( lambda: delete_trust.call_count == 1, timeout=10 ) self.assertEqual('my_trust_id', delete_trust.mock_calls[0][1][0])
def _create_task_execution(self, state=states.RUNNING): values = { 'id': utils.generate_unicode_uuid(), 'name': self.task_spec.get_name(), 'workflow_execution_id': self.wf_ex.id, 'workflow_name': self.wf_ex.workflow_name, 'workflow_id': self.wf_ex.workflow_id, 'state': state, 'spec': self.task_spec.to_dict(), 'unique_key': self.unique_key, 'in_context': self.ctx, 'published': {}, 'runtime_context': {}, 'project_id': self.wf_ex.project_id } db_api.insert_or_ignore_task_execution(values) # Since 'insert_or_ignore' cannot return a valid count of updated # rows the only reliable way to check if insert operation has created # an object is try to load this object by just generated uuid. task_ex = db_api.load_task_execution(values['id']) if not task_ex: return False self.task_ex = task_ex # Add to collection explicitly so that it's in a proper # state within the current session. self.wf_ex.task_executions.append(self.task_ex) return True
def _create_task_execution(self, state=states.RUNNING, state_info=None): task_id = utils.generate_unicode_uuid() task_name = self.task_spec.get_name() task_type = self.task_spec.get_type() values = { 'id': task_id, 'name': task_name, 'workflow_execution_id': self.wf_ex.id, 'workflow_name': self.wf_ex.workflow_name, 'workflow_namespace': self.wf_ex.workflow_namespace, 'workflow_id': self.wf_ex.workflow_id, 'state': state, 'state_info': state_info, 'spec': self.task_spec.to_dict(), 'unique_key': self.unique_key, 'in_context': self.ctx, 'published': {}, 'runtime_context': {}, 'project_id': self.wf_ex.project_id, 'type': task_type } if self.triggered_by: values['runtime_context']['triggered_by'] = self.triggered_by self.task_ex = db_api.create_task_execution(values) self.created = True
def test_workflow_without_auth(self): cfg.CONF.set_default('auth_enable', False, group='pecan') wf = workflows.create_workflows(WORKFLOW_LIST)[0] triggers.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), wf.name, {}, {}, '* * * * * */1', None, None, None) next_triggers = triggers.get_next_cron_triggers() self.assertEqual(1, len(next_triggers)) next_trigger = next_triggers[0] next_execution_time_before = next_trigger.next_execution_time periodic.MistralPeriodicTasks(cfg.CONF).process_cron_triggers_v2(None) next_triggers = triggers.get_next_cron_triggers() self.assertEqual(1, len(next_triggers)) next_trigger = next_triggers[0] next_execution_time_after = next_trigger.next_execution_time self.assertNotEqual(next_execution_time_before, next_execution_time_after)
def test_oneshot_trigger_create(self): trigger = t_s.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, None, "4242-12-25 13:37", None, datetime.datetime(2010, 8, 25)) self.assertEqual(datetime.datetime(4242, 12, 25, 13, 37), trigger.next_execution_time)
def _create_task_execution(self, state=states.RUNNING, state_info=None): task_id = utils.generate_unicode_uuid() task_name = self.task_spec.get_name() task_type = self.task_spec.get_type() values = { 'id': task_id, 'name': task_name, 'workflow_execution_id': self.wf_ex.id, 'workflow_name': self.wf_ex.workflow_name, 'workflow_namespace': self.wf_ex.workflow_namespace, 'workflow_id': self.wf_ex.workflow_id, 'state': state, 'state_info': state_info, 'spec': self.task_spec.to_dict(), 'unique_key': self.unique_key, 'in_context': self.ctx, 'published': {}, 'runtime_context': {}, 'project_id': self.wf_ex.project_id, 'type': task_type } if self.triggered_by: values['runtime_context']['triggered_by'] = self.triggered_by self.task_ex = db_api.create_task_execution(values) self.created = True
def test_create_cron_trigger_with_pattern_and_first_time( self, validate_mock): cfg.CONF.set_default('auth_enable', False, group='pecan') wf = workflows.create_workflows(WORKFLOW_LIST)[0] # Make the first_time 1 sec later than current time, in order to make # it executed by next cron-trigger task. first_time = datetime.datetime.utcnow() + datetime.timedelta(0, 1) # Creates a cron-trigger with pattern and first time, ensure the # cron-trigger can be executed more than once, and cron-trigger will # not be deleted. trigger_name = 'trigger-%s' % utils.generate_unicode_uuid() cron_trigger = triggers.create_cron_trigger(trigger_name, wf.name, {}, {}, '*/1 * * * *', first_time, None, None) self.assertEqual(first_time, cron_trigger.next_execution_time) periodic.MistralPeriodicTasks(cfg.CONF).process_cron_triggers_v2(None) next_time = triggers.get_next_execution_time( cron_trigger.pattern, cron_trigger.next_execution_time) cron_trigger_db = db_api.get_cron_trigger(trigger_name) self.assertIsNotNone(cron_trigger_db) self.assertEqual(next_time, cron_trigger_db.next_execution_time)
def test_start_workflow(self, rpc_mock): cfg.CONF.set_default('auth_enable', True, group='pecan') wf = workflows.create_workflows(WORKFLOW_LIST)[0] t = triggers.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), wf.name, {}, {}, '* * * * * */1', None, None, None) self.assertEqual('my_trust_id', t.trust_id) cfg.CONF.set_default('auth_enable', False, group='pecan') next_trigger = triggers.get_next_cron_triggers()[0] next_execution_time_before = next_trigger.next_execution_time periodic.MistralPeriodicTasks(cfg.CONF).process_cron_triggers_v2(None) start_workflow_mock = rpc_mock.return_value.start_workflow start_workflow_mock.assert_called_once() self.assertIn(t.id, start_workflow_mock.mock_calls[0][2]['description']) next_triggers = triggers.get_next_cron_triggers() self.assertEqual(1, len(next_triggers)) next_trigger = next_triggers[0] next_execution_time_after = next_trigger.next_execution_time # Checking the workflow was executed, by # verifying that the next execution time changed. self.assertNotEqual(next_execution_time_before, next_execution_time_after)
def _create_task_execution(self, state=states.RUNNING, state_info=None): task_id = utils.generate_unicode_uuid() task_name = self.task_spec.get_name() task_type = self.task_spec.get_type() data_flow.add_current_task_to_context(self.ctx, task_id, task_name) values = { 'id': task_id, 'name': task_name, 'workflow_execution_id': self.wf_ex.id, 'workflow_name': self.wf_ex.workflow_name, 'workflow_id': self.wf_ex.workflow_id, 'state': state, 'state_info': state_info, 'spec': self.task_spec.to_dict(), 'unique_key': self.unique_key, 'in_context': self.ctx, 'published': {}, 'runtime_context': {}, 'project_id': self.wf_ex.project_id, 'type': task_type } self.task_ex = db_api.create_task_execution(values) # Add to collection explicitly so that it's in a proper # state within the current session. self.wf_ex.task_executions.append(self.task_ex) self.created = True
def test_workflow_without_auth(self): cfg.CONF.set_default('auth_enable', False, group='pecan') wf = workflows.create_workflows(WORKFLOW_LIST)[0] triggers.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), wf.name, {}, {}, '* * * * * */1', None, None, None) next_triggers = triggers.get_next_cron_triggers() self.assertEqual(1, len(next_triggers)) next_trigger = next_triggers[0] next_execution_time_before = next_trigger.next_execution_time ts_before = datetime.datetime.utcnow() periodic.process_cron_triggers_v2(None, None) self._await(lambda: triggers.get_next_cron_triggers(), fail_message="No triggers were found") next_triggers = triggers.get_next_cron_triggers() self.assertEqual(1, len(next_triggers)) next_trigger = next_triggers[0] next_execution_time_after = next_trigger.next_execution_time self.assertGreater(next_execution_time_after, ts_before) self.assertNotEqual(next_execution_time_before, next_execution_time_after)
def test_remove_listener_correlation_id_not_in_results(self): correlation_id = utils.generate_unicode_uuid() self.listener.add_listener(correlation_id) self.assertEqual( type(self.listener._results.get(correlation_id)), moves.queue.Queue ) self.listener.remove_listener(utils.generate_unicode_uuid()) self.assertEqual( type(self.listener._results.get(correlation_id)), moves.queue.Queue )
def __init__(self, conf): super(KombuRPCClient, self).__init__(conf) kombu_base.set_transport_options() self._register_mistral_serialization() self.topic = conf.topic self.server_id = conf.host self._hosts = kombu_hosts.KombuHosts(CONF) self.exchange = CONF.control_exchange self.virtual_host = CONF.oslo_messaging_rabbit.rabbit_virtual_host self.durable_queue = CONF.oslo_messaging_rabbit.amqp_durable_queues self.auto_delete = CONF.oslo_messaging_rabbit.amqp_auto_delete self._timeout = CONF.rpc_response_timeout self.routing_key = self.topic hosts = self._hosts.get_hosts() connections = [] for host in hosts: conn = self._make_connection( host.hostname, host.port, host.username, host.password, self.virtual_host ) connections.append(conn) self._connections = itertools.cycle(connections) # Create exchange. exchange = self._make_exchange( self.exchange, durable=self.durable_queue, auto_delete=self.auto_delete ) # Create queue. self.queue_name = utils.generate_unicode_uuid() self.callback_queue = kombu.Queue( self.queue_name, exchange=exchange, routing_key=self.queue_name, durable=False, exclusive=True, auto_delete=True ) self._listener = kombu_listener.KombuRPCListener( connections=self._connections, callback_queue=self.callback_queue ) self._listener.start()
def test_get_result_lack_of_queue(self): correlation_id = utils.generate_unicode_uuid() self.assertRaises( KeyError, self.listener.get_result, correlation_id, 1 # timeout )
def test_add_listener(self): correlation_id = utils.generate_unicode_uuid() self.listener.add_listener(correlation_id) self.assertEqual(type(self.listener._results.get(correlation_id)), moves.queue.Queue) self.assertEqual(self.listener._results[correlation_id].qsize(), 0)
def test_get_result_lack_of_queue(self): correlation_id = utils.generate_unicode_uuid() self.assertRaises( KeyError, self.listener.get_result, correlation_id, 1 # timeout )
def create_action_execution(action_def, action_input, task_ex=None, index=0, description=''): # TODO(rakhmerov): We can avoid hitting DB at all when calling something # create_action_execution(), these operations can be just done using # SQLAlchemy session (1-level cache) and session flush (on TX commit) would # send necessary SQL queries to DB. Currently, session flush happens # on every operation which may not be optimal. The problem with using just # session level cache is in generating ids. Ids are generated only on # session flush. And now we have a lot places where we need to have ids # before TX completion. # Assign the action execution ID here to minimize database calls. # Otherwise, the input property of the action execution DB object needs # to be updated with the action execution ID after the action execution # DB object is created. action_ex_id = utils.generate_unicode_uuid() if a_m.has_action_context(action_def.action_class, action_def.attributes or {}) and task_ex: action_input.update(a_m.get_action_context(task_ex, action_ex_id)) values = { 'id': action_ex_id, 'name': action_def.name, 'spec': action_def.spec, 'state': states.RUNNING, 'input': action_input, 'runtime_context': { 'with_items_index': index }, 'description': description } if task_ex: values.update({ 'task_execution_id': task_ex.id, 'workflow_name': task_ex.workflow_name, 'workflow_id': task_ex.workflow_id, 'project_id': task_ex.project_id, }) else: values.update({ 'project_id': security.get_project_id(), }) action_ex = db_api.create_action_execution(values) if task_ex: # Add to collection explicitly so that it's in a proper # state within the current session. task_ex.executions.append(action_ex) return action_ex
def start_action(self, action_name, action_input, description=None, **params): with db_api.transaction(): action = action_handler.build_action_by_name(action_name) action.validate_input(action_input) sync = params.get('run_sync') save = params.get('save_result') target = params.get('target') timeout = params.get('timeout') is_action_sync = action.is_sync(action_input) if sync and not is_action_sync: raise exceptions.InputException( "Action does not support synchronous execution.") if not sync and (save or not is_action_sync): action.schedule(action_input, target, timeout=timeout) return action.action_ex.get_clone() output = action.run( action_input, target, save=False, timeout=timeout ) state = states.SUCCESS if output.is_success() else states.ERROR if not save: # Action execution is not created but we need to return similar # object to the client anyway. return db_models.ActionExecution( name=action_name, description=description, input=action_input, output=output.to_dict(), state=state ) action_ex_id = u.generate_unicode_uuid() values = { 'id': action_ex_id, 'name': action_name, 'description': description, 'input': action_input, 'output': output.to_dict(), 'state': state, 'is_sync': is_action_sync } return db_api.create_action_execution(values)
def __init__(self, conf): super(KombuRPCClient, self).__init__(conf) kombu_base.set_transport_options() self._register_mistral_serialization() self.topic = conf.topic self.server_id = conf.host hosts = kombu_hosts.KombuHosts(CONF) self.exchange = CONF.control_exchange self.durable_queue = CONF.oslo_messaging_rabbit.amqp_durable_queues self.auto_delete = CONF.oslo_messaging_rabbit.amqp_auto_delete self._timeout = CONF.rpc_response_timeout self.routing_key = self.topic connections = [] for host in hosts.hosts: conn = self._make_connection( host.hostname, host.port, host.username, host.password, hosts.virtual_host ) connections.append(conn) self._connections = itertools.cycle(connections) # Create exchange. exchange = self._make_exchange( self.exchange, durable=self.durable_queue, auto_delete=self.auto_delete ) # Create queue. self.queue_name = utils.generate_unicode_uuid() self.callback_queue = kombu.Queue( self.queue_name, exchange=exchange, routing_key=self.queue_name, durable=False, exclusive=True, auto_delete=True ) self._listener = kombu_listener.KombuRPCListener( connections=self._connections, callback_queue=self.callback_queue ) self._listener.start()
def test_get_result_results_in_queue(self): expected_result = 'abcd' correlation_id = utils.generate_unicode_uuid() self.listener.add_listener(correlation_id) self.listener._results.get(correlation_id).put(expected_result) result = self.listener.get_result(correlation_id, 5) self.assertEqual(result, expected_result)
def test_get_result_results_in_queue(self): expected_result = 'abcd' correlation_id = utils.generate_unicode_uuid() self.listener.add_listener(correlation_id) self.listener._results.get(correlation_id).put(expected_result) result = self.listener.get_result(correlation_id, 5) self.assertEqual(result, expected_result)
def test_get_result_not_in_queue(self): correlation_id = utils.generate_unicode_uuid() self.listener.add_listener(correlation_id) self.assertRaises( moves.queue.Empty, self.listener.get_result, correlation_id, 1 # timeout )
def test_get_result_not_in_queue(self): correlation_id = utils.generate_unicode_uuid() self.listener.add_listener(correlation_id) self.assertRaises( moves.queue.Empty, self.listener.get_result, correlation_id, 1 # timeout )
def test_start_workflow(self, get_engine_client_mock): cfg.CONF.set_default('auth_enable', True, group='pecan') wf = workflows.create_workflows(WORKFLOW_LIST)[0] t = triggers.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), wf.name, {}, {}, '* * * * * */1', None, None, None ) self.assertEqual('my_trust_id', t.trust_id) cfg.CONF.set_default('auth_enable', False, group='pecan') next_trigger = triggers.get_next_cron_triggers()[0] next_execution_time_before = next_trigger.next_execution_time periodic.process_cron_triggers_v2(None, None) start_wf_mock = get_engine_client_mock.return_value.start_workflow start_wf_mock.assert_called_once() # Check actual parameters of the call. self.assertEqual( ('my_wf', '', None, {}), start_wf_mock.mock_calls[0][1] ) self.assertIn( t.id, start_wf_mock.mock_calls[0][2]['description'] ) self._await( lambda: triggers.get_next_cron_triggers(), fail_message="No triggers were found" ) next_triggers = triggers.get_next_cron_triggers() self.assertEqual(1, len(next_triggers)) next_trigger = next_triggers[0] next_execution_time_after = next_trigger.next_execution_time # Checking the workflow was executed, by # verifying that the next execution time changed. self.assertNotEqual( next_execution_time_before, next_execution_time_after )
def _call(self, ctx, method, target, async_=False, **kwargs): """Performs a remote call for the given method. :param ctx: authentication context associated with mistral :param method: name of the method that should be executed :param kwargs: keyword parameters for the remote-method :param target: Server name :param async: bool value means whether the request is asynchronous or not. :return: result of the method or None if async. """ correlation_id = utils.generate_unicode_uuid() body = { 'rpc_ctx': ctx.to_dict(), 'rpc_method': method, 'arguments': self._serialize_message(kwargs), 'async': async_ } LOG.debug("Publish request: %s", body) try: if not async_: self._listener.add_listener(correlation_id) # Publish request. for retry_round in six.moves.range(EPIPE_RETRIES): if self._publish_request(body, correlation_id): break # Start waiting for response. if async_: return LOG.debug( "Waiting a reply for sync call [reply_to = %s]", self.queue_name ) result = self._wait_for_result(correlation_id) res_type = result[kombu_base.TYPE] res_object = result[kombu_base.RESULT] if res_type == 'error': raise res_object else: res_object = self._deserialize_message(res_object)['body'] finally: if not async_: self._listener.remove_listener(correlation_id) return res_object
def test_add_listener(self): correlation_id = utils.generate_unicode_uuid() self.listener.add_listener(correlation_id) self.assertEqual( type(self.listener._results.get(correlation_id)), moves.queue.Queue ) self.assertEqual(0, self.listener._results[correlation_id].qsize())
def test_trigger_create_with_wf_id(self): trigger = t_s.create_cron_trigger('trigger-%s' % utils.generate_unicode_uuid(), None, {}, {}, '*/5 * * * *', None, None, datetime.datetime(2010, 8, 25), workflow_id=self.wf.id) self.assertEqual(self.wf.name, trigger.workflow_name)
def test_create_trust_in_trigger(self): cfg.CONF.set_default('auth_enable', True, group='pecan') self.addCleanup(cfg.CONF.set_default, 'auth_enable', False, group='pecan') trigger = t_s.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '*/2 * * * *', None, None, datetime.datetime(2010, 8, 25)) self.assertEqual('my_trust_id', trigger.trust_id)
def test_trigger_create(self): trigger = t_s.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '*/5 * * * *', None, None, datetime.datetime(2010, 8, 25)) self.assertEqual(datetime.datetime(2010, 8, 25, 0, 5), trigger.next_execution_time) next_time = t_s.get_next_execution_time(trigger['pattern'], trigger.next_execution_time) self.assertEqual(datetime.datetime(2010, 8, 25, 0, 10), next_time)
def _call(self, ctx, method, target, async_=False, **kwargs): """Performs a remote call for the given method. :param ctx: authentication context associated with mistral :param method: name of the method that should be executed :param kwargs: keyword parameters for the remote-method :param target: Server name :param async: bool value means whether the request is asynchronous or not. :return: result of the method or None if async. """ correlation_id = utils.generate_unicode_uuid() body = { 'rpc_ctx': ctx.convert_to_dict(), 'rpc_method': method, 'arguments': self._serialize_message(kwargs), 'async': async_ } LOG.debug("Publish request: {0}".format(body)) try: if not async_: self._listener.add_listener(correlation_id) # Publish request. with kombu.producers[self.conn].acquire(block=True) as producer: producer.publish(body=body, exchange=self.exchange, routing_key=self.topic, reply_to=self.queue_name, correlation_id=correlation_id, delivery_mode=2) # Start waiting for response. if async_: return result = self._wait_for_result(correlation_id) res_type = result[kombu_base.TYPE] res_object = result[kombu_base.RESULT] if res_type == 'error': raise res_object else: res_object = self._deserialize_message(res_object)['body'] finally: if not async_: self._listener.remove_listener(correlation_id) return res_object
def create_action_execution(action_def, action_input, task_ex=None, index=0, description=''): # TODO(rakhmerov): We can avoid hitting DB at all when calling something # create_action_execution(), these operations can be just done using # SQLAlchemy session (1-level cache) and session flush (on TX commit) would # send necessary SQL queries to DB. Currently, session flush happens # on every operation which may not be optimal. The problem with using just # session level cache is in generating ids. Ids are generated only on # session flush. And now we have a lot places where we need to have ids # before TX completion. # Assign the action execution ID here to minimize database calls. # Otherwise, the input property of the action execution DB object needs # to be updated with the action execution ID after the action execution # DB object is created. action_ex_id = utils.generate_unicode_uuid() if a_m.has_action_context( action_def.action_class, action_def.attributes or {}) and task_ex: action_input.update(a_m.get_action_context(task_ex, action_ex_id)) values = { 'id': action_ex_id, 'name': action_def.name, 'spec': action_def.spec, 'state': states.RUNNING, 'input': action_input, 'runtime_context': {'with_items_index': index}, 'description': description } if task_ex: values.update({ 'task_execution_id': task_ex.id, 'workflow_name': task_ex.workflow_name, 'workflow_id': task_ex.workflow_id, 'project_id': task_ex.project_id, }) else: values.update({ 'project_id': security.get_project_id(), }) action_ex = db_api.create_action_execution(values) if task_ex: # Add to collection explicitly so that it's in a proper # state within the current session. task_ex.executions.append(action_ex) return action_ex
def test_trigger_create_with_wf_id(self): trigger = t_s.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), None, {}, {}, '*/5 * * * *', None, None, datetime.datetime(2010, 8, 25), workflow_id=self.wf.id ) self.assertEqual(self.wf.name, trigger.workflow_name)
def __init__(self, conf): super(KombuRPCClient, self).__init__(conf) self.exchange = conf.get('exchange', '') self.user_id = conf.get('user_id', 'guest') self.password = conf.get('password', 'guest') self.topic = conf.get('topic', 'mistral') self.server_id = conf.get('server_id', '') self.host = conf.get('host', 'localhost') self.port = conf.get('port', 5672) self.virtual_host = conf.get('virtual_host', '/') self.durable_queue = conf.get('durable_queues', False) self.auto_delete = conf.get('auto_delete', False) self._timeout = conf.get('timeout', 60) self.conn = self._make_connection( self.host, self.port, self.user_id, self.password, self.virtual_host ) # Create exchange. exchange = self._make_exchange( self.exchange, durable=self.durable_queue, auto_delete=self.auto_delete ) # Create queue. queue_name = utils.generate_unicode_uuid() self.callback_queue = kombu.Queue( queue_name, exchange=exchange, routing_key=queue_name, durable=False, exclusive=True, auto_delete=True ) # Create consumer. self.consumer = kombu.Consumer( channel=self.conn.channel(), queues=self.callback_queue, callbacks=[self._on_response], accept=['pickle', 'json'] ) self.consumer.qos(prefetch_count=1)
def test_create_cron_trigger_with_pattern_and_first_time(self, validate_mock): cfg.CONF.set_default('auth_enable', False, group='pecan') wf = workflows.create_workflows(WORKFLOW_LIST)[0] # Make the first_time 1 sec later than current time, in order to make # it executed by next cron-trigger task. first_time = datetime.datetime.now() + datetime.timedelta(0, 1) # Creates a cron-trigger with pattern and first time, ensure the # cron-trigger can be executed more than once, and cron-trigger will # not be deleted. trigger_name = 'trigger-%s' % utils.generate_unicode_uuid() cron_trigger = triggers.create_cron_trigger( trigger_name, wf.name, {}, {}, '*/1 * * * *', first_time, None, None ) first_second = time.mktime(first_time.timetuple()) first_utc_time = datetime.datetime.utcfromtimestamp(first_second) self.assertEqual( first_utc_time, cron_trigger.next_execution_time ) periodic.MistralPeriodicTasks(cfg.CONF).process_cron_triggers_v2(None) next_time = triggers.get_next_execution_time( cron_trigger.pattern, cron_trigger.next_execution_time ) cron_trigger_db = db_api.get_cron_trigger(trigger_name) self.assertIsNotNone(cron_trigger_db) self.assertEqual( next_time, cron_trigger_db.next_execution_time )
def __init__(self, conf): super(KombuRPCClient, self).__init__(conf) self._register_mistral_serialization() self.exchange = conf.get('exchange', '') self.user_id = conf.get('user_id', 'guest') self.password = conf.get('password', 'guest') self.topic = conf.get('topic', 'mistral') self.server_id = conf.get('server_id', '') self.host = conf.get('host', 'localhost') self.port = conf.get('port', 5672) self.virtual_host = conf.get('virtual_host', '/') self.durable_queue = conf.get('durable_queues', False) self.auto_delete = conf.get('auto_delete', False) self._timeout = conf.get('timeout', 60) self.conn = self._make_connection( self.host, self.port, self.user_id, self.password, self.virtual_host ) # Create exchange. exchange = self._make_exchange( self.exchange, durable=self.durable_queue, auto_delete=self.auto_delete ) # Create queue. self.queue_name = utils.generate_unicode_uuid() self.callback_queue = kombu.Queue( self.queue_name, exchange=exchange, routing_key=self.queue_name, durable=False, exclusive=True, auto_delete=True ) self._listener = kombu_listener.KombuRPCListener( connection=self.conn, callback_queue=self.callback_queue ) self._listener.start()
def test_oneshot_trigger_create(self): trigger = t_s.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, None, "4242-12-25 13:37", None, datetime.datetime(2010, 8, 25) ) self.assertEqual( datetime.datetime(4242, 12, 25, 13, 37), trigger.next_execution_time )
def create_named_lock(name, session=None): # This method has to work not through SQLAlchemy session because # session may not immediately issue an SQL query to a database # and instead just schedule it whereas we need to make sure to # issue a query immediately. session.flush() insert = models.NamedLock.__table__.insert() lock_id = utils.generate_unicode_uuid() session.execute(insert.values(id=lock_id, name=name)) session.flush() return lock_id
def test_create_cron_trigger_with_pattern_and_first_time(self, validate_mock): cfg.CONF.set_default('auth_enable', False, group='pecan') wf = workflows.create_workflows(WORKFLOW_LIST)[0] # Make the first_time 1 sec later than current time, in order to make # it executed by next cron-trigger task. first_time = datetime.datetime.utcnow() + datetime.timedelta(0, 1) # Creates a cron-trigger with pattern and first time, ensure the # cron-trigger can be executed more than once, and cron-trigger will # not be deleted. trigger_name = 'trigger-%s' % utils.generate_unicode_uuid() cron_trigger = triggers.create_cron_trigger( trigger_name, wf.name, {}, {}, '*/1 * * * *', first_time, None, None ) interval = (cron_trigger.next_execution_time - first_time) self.assertLessEqual(interval.total_seconds(), 3.0) periodic.process_cron_triggers_v2(None, None) # After process_triggers context is set to None, need to reset it. auth_ctx.set_ctx(self.ctx) next_time = triggers.get_next_execution_time( cron_trigger.pattern, cron_trigger.next_execution_time ) cron_trigger_db = db_api.get_cron_trigger(trigger_name) self.assertIsNotNone(cron_trigger_db) interval = (cron_trigger_db.next_execution_time - next_time) self.assertLessEqual(interval.total_seconds(), 3.0)
def test_workflow_without_auth(self): cfg.CONF.set_default('auth_enable', False, group='pecan') wf = workflows.create_workflows(WORKFLOW_LIST)[0] triggers.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), wf.name, {}, {}, '* * * * * */1', None, None, None ) next_triggers = triggers.get_next_cron_triggers() self.assertEqual(1, len(next_triggers)) next_trigger = next_triggers[0] next_execution_time_before = next_trigger.next_execution_time ts_before = datetime.datetime.utcnow() periodic.process_cron_triggers_v2(None, None) self._await( lambda: triggers.get_next_cron_triggers(), fail_message="No triggers were found" ) next_triggers = triggers.get_next_cron_triggers() self.assertEqual(1, len(next_triggers)) next_trigger = next_triggers[0] next_execution_time_after = next_trigger.next_execution_time self.assertGreater( next_execution_time_after, ts_before ) self.assertNotEqual( next_execution_time_before, next_execution_time_after )
def _create_action_execution(self, input_dict, runtime_ctx, desc=''): # Assign the action execution ID here to minimize database calls. # Otherwise, the input property of the action execution DB object needs # to be updated with the action execution ID after the action execution # DB object is created. action_ex_id = utils.generate_unicode_uuid() # TODO(rakhmerov): Bad place, we probably need to push action context # to all actions. It's related to # https://blueprints.launchpad.net/mistral/+spec/mistral-custom-actions-api if a_m.has_action_context( self.action_def.action_class, self.action_def.attributes or {}) and self.task_ex: input_dict.update( a_m.get_action_context(self.task_ex, action_ex_id) ) values = { 'id': action_ex_id, 'name': self.action_def.name, 'spec': self.action_def.spec, 'state': states.RUNNING, 'input': input_dict, 'runtime_context': runtime_ctx, 'description': desc } if self.task_ex: values.update({ 'task_execution_id': self.task_ex.id, 'workflow_name': self.task_ex.workflow_name, 'workflow_id': self.task_ex.workflow_id, 'project_id': self.task_ex.project_id, }) else: values.update({ 'project_id': security.get_project_id(), }) self.action_ex = db_api.create_action_execution(values) if self.task_ex: # Add to collection explicitly so that it's in a proper # state within the current session. self.task_ex.executions.append(self.action_ex)
def test_last_minute_scheduled_workload(self): cfg.CONF.set_default('auth_enable', True, group='pecan') cfg.CONF.set_default('dtw_scheduler_last_minute', True, group='engine') wf = workflows.create_workflows(WORKFLOW_LIST)[0] name = 'dtw-%s' % utils.generate_unicode_uuid() d = dtw.create_delay_tolerant_workload( name, wf.name, {}, {}, (datetime.datetime.now() + datetime.timedelta(hours=2)) .strftime('%Y-%m-%dT%H:%M:%S'), 3600, None ) self.assertEqual('my_trust_id', d.trust_id) cfg.CONF.set_default('auth_enable', False, group='pecan') unscheduled_workload = dtw.get_unscheduled_delay_tolerant_workload() self.assertEqual(1, len(unscheduled_workload)) self.assertEqual(d.name, unscheduled_workload[0].name) self.assertEqual(d.deadline, unscheduled_workload[0].deadline) periodic.MistralPeriodicTasks( cfg.CONF).process_delay_tolerant_workload(None) unscheduled_workload_after = dtw \ .get_unscheduled_delay_tolerant_workload() self.assertEqual(1, len(unscheduled_workload_after)) # so we should check if we have a cron trigger associated with this # workload now cron_trigger_db = db_api.get_cron_trigger(d.name) self.assertIsNotNone(cron_trigger_db) self.assertEqual( name , cron_trigger_db.name )