def _with_auth_context(auth_ctx, func, *args, **kw): """Runs the given function with the specified auth context. :param auth_ctx: Authentication context. :param func: Function to run with the specified auth context. :param args: Function positional arguments. :param kw: Function keyword arguments. :return: Function result. """ old_auth_ctx = context.ctx() if context.has_ctx() else None context.set_ctx(auth_ctx) try: return func(*args, **kw) except Exception as e: # Note (rakhmerov): In case of "Too many connections" error from the # database it doesn't get wrapped with a SQLAlchemy exception for some # reason so we have to check the exception message explicitly. if isinstance(e, _RETRY_ERRORS) or 'Too many connections' in str(e): LOG.exception("DB error detected, operation will be retried: %s", func) raise finally: context.set_ctx(old_auth_ctx)
def _invoke_calls(delayed_calls): """Invokes prepared delayed calls. :param delayed_calls: Prepared delayed calls represented as tuples (target_auth_context, target_method, method_args). """ ctx_serializer = context.RpcContextSerializer() for (target_auth_context, target_method, method_args) in delayed_calls: try: # Set the correct context for the method. ctx_serializer.deserialize_context(target_auth_context) # Invoke the method. target_method(**method_args) except Exception as e: LOG.exception( "Delayed call failed, method: %s, exception: %s", target_method, e ) finally: # Remove context. context.set_ctx(None)
def test_scheduler_call_target_method_with_correct_auth(self, method): method.side_effect = self.target_check_context_method default_context = base.get_context(default=True) auth_context.set_ctx(default_context) default_project_id = ( default_context.project_id ) scheduler.schedule_call( None, TARGET_METHOD_PATH, DELAY, **{'expected_project_id': default_project_id} ) second_context = base.get_context(default=False) auth_context.set_ctx(second_context) second_project_id = ( second_context.project_id ) scheduler.schedule_call( None, TARGET_METHOD_PATH, DELAY, **{'expected_project_id': second_project_id} ) self.assertNotEqual(default_project_id, second_project_id) for _ in range(2): self.assertTrue(self.queue.get())
def delete_(): context.set_ctx(unit_base.get_context()) db_api.delete_workflow_execution(self.wf_ex_id) # Unlocking the "list" operation. list_lock.release()
def test_create_cron_trigger_with_pattern_and_first_time( self, validate_mock): cfg.CONF.set_default('auth_enable', False, group='pecan') wf = workflows.create_workflows(WORKFLOW_LIST)[0] # Make the first_time 1 sec later than current time, in order to make # it executed by next cron-trigger task. first_time = datetime.datetime.utcnow() + datetime.timedelta(0, 1) # Creates a cron-trigger with pattern and first time, ensure the # cron-trigger can be executed more than once, and cron-trigger will # not be deleted. trigger_name = 'trigger-%s' % utils.generate_unicode_uuid() cron_trigger = triggers.create_cron_trigger(trigger_name, wf.name, {}, {}, '*/1 * * * *', first_time, None, None) self.assertEqual(first_time, cron_trigger.next_execution_time) periodic.process_cron_triggers_v2(None, None) # After process_triggers context is set to None, need to reset it. auth_ctx.set_ctx(self.ctx) next_time = triggers.get_next_execution_time( cron_trigger.pattern, cron_trigger.next_execution_time) cron_trigger_db = db_api.get_cron_trigger(trigger_name) self.assertIsNotNone(cron_trigger_db) self.assertEqual(next_time, cron_trigger_db.next_execution_time)
def _clean_db(self): lookup_utils.clear_caches() contexts = [get_context(default=False), get_context(default=True)] for ctx in contexts: auth_context.set_ctx(ctx) with mock.patch('mistral.services.security.get_project_id', new=mock.MagicMock(return_value=ctx.project_id)): with db_api.transaction(): db_api.delete_event_triggers() db_api.delete_cron_triggers() db_api.delete_workflow_executions() db_api.delete_task_executions() db_api.delete_action_executions() db_api.delete_workbooks() db_api.delete_workflow_definitions() db_api.delete_environments() db_api.delete_resource_members() db_api.delete_delayed_calls() db_api.delete_scheduled_jobs() sqlite_lock.cleanup() if not cfg.CONF.database.connection.startswith('sqlite'): db_sa_base.get_engine().dispose()
def test_scheduler_call_target_method_with_correct_auth(self, method): default_context = base.get_context(default=True) auth_context.set_ctx(default_context) default_project_id = ( default_context._BaseContext__values['project_id']) method_args1 = {'expected_project_id': default_project_id} scheduler.schedule_call(None, CHECK_CONTEXT_METHOD_PATH, DELAY, **method_args1) second_context = base.get_context(default=False) auth_context.set_ctx(second_context) second_project_id = (second_context._BaseContext__values['project_id']) method_args2 = {'expected_project_id': second_project_id} scheduler.schedule_call(None, CHECK_CONTEXT_METHOD_PATH, DELAY, **method_args2) eventlet.sleep(WAIT) method.assert_any_call(default_project_id, default_project_id) method.assert_any_call(second_project_id, second_project_id) self.assertNotEqual(default_project_id, second_project_id)
def test_workflow_definition_public(self): # Create a workflow(scope=public) as under one project # then make sure it's visible for other projects. created0 = db_api.create_workflow_definition(WF_DEFINITIONS[0]) fetched = db_api.get_workflow_definitions() self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) # Assert that the project_id stored is actually the context's # project_id not the one given. self.assertEqual(created0.project_id, auth_context.ctx().project_id) self.assertNotEqual( WF_DEFINITIONS[0]['project_id'], auth_context.ctx().project_id ) # Create a new user. ctx = auth_context.MistralContext( user_id='9-0-44-5', project_id='99-88-33', user_name='test-user', project_name='test-another', is_admin=False ) auth_context.set_ctx(ctx) fetched = db_api.get_workflow_definitions() self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) self.assertEqual('public', created0.scope)
def _delete(executions): for execution in executions: try: # Setup project_id for _secure_query delete execution. # TODO(tuan_luong): Manipulation with auth_ctx should be # out of db transaction scope. ctx = auth_ctx.MistralContext( user_id=None, project_id=execution.project_id, auth_token=None, is_admin=True ) auth_ctx.set_ctx(ctx) LOG.debug( 'DELETE execution id : %s from date : %s ' 'according to expiration policy', execution.id, execution.updated_at ) db_api.delete_workflow_execution(execution.id) except Exception as e: msg = ("Failed to delete [execution_id=%s]\n %s" % (execution.id, traceback.format_exc(e))) LOG.warning(msg) finally: auth_ctx.set_ctx(None)
def _switch_context(project_id, is_admin): _ctx = ctx.MistralContext(user_id=None, project_id=project_id, auth_token=None, is_admin=is_admin) ctx.set_ctx(_ctx)
def run_delayed_calls(self, ctx=None): time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1) # Wrap delayed calls processing in transaction to # guarantee that calls will be processed just once. # Do delete query to DB first to force hanging up all # parallel transactions. # It should work on isolation level 'READ-COMMITTED', # 'REPEATABLE-READ' and above. # # 'REPEATABLE-READ' is by default in MySQL and # 'READ-COMMITTED is by default in PostgreSQL. delayed_calls = [] with db_api.transaction(): for call in db_api.get_delayed_calls_to_start(time_filter): # Delete this delayed call from DB before the making call in # order to prevent calling from parallel transaction. db_api.delete_delayed_call(call.id) LOG.debug('Processing next delayed call: %s', call) context.set_ctx(context.MistralContext(call.auth_context)) if call.factory_method_path: factory = importutils.import_class( call.factory_method_path) target_method = getattr(factory(), call.target_method_name) else: target_method = importutils.import_class( call.target_method_name) method_args = copy.copy(call.method_arguments) if call.serializers: # Deserialize arguments. for arg_name, ser_path in call.serializers.items(): serializer = importutils.import_class(ser_path)() deserialized = serializer.deserialize( method_args[arg_name]) method_args[arg_name] = deserialized delayed_calls.append((target_method, method_args)) # TODO(m4dcoder): Troubleshoot deadlocks with PostgreSQL and MySQL. # The queries in the target method such as # mistral.engine.task_handler.run_action can deadlock # with delete_delayed_call. Please keep the scope of the # transaction short. for (target_method, method_args) in delayed_calls: with db_api.transaction(): try: # Call the method. target_method(**method_args) except Exception as e: LOG.debug("Delayed call failed [call=%s, exception=%s]", call, e)
def _start_workflow(self, triggers, event_params): """Start workflows defined in event triggers.""" for t in triggers: LOG.info('Start to process event trigger: %s', t['id']) workflow_params = t.get('workflow_params', {}) workflow_params.update({'event_params': event_params}) # Setup context before schedule triggers. ctx = security.create_context(t['trust_id'], t['project_id']) auth_ctx.set_ctx(ctx) description = { "description": ("Workflow execution created by event" " trigger '(%s)'." % t['id']), "triggered_by": { "type": "event_trigger", "id": t['id'], "name": t['name'] } } try: self.engine_client.start_workflow( t['workflow_id'], t['workflow_namespace'], t['workflow_input'], description=json.dumps(description), **workflow_params) except Exception as e: LOG.exception( "Failed to process event trigger %s, " "error: %s", t['id'], str(e)) finally: auth_ctx.set_ctx(None)
def test_scheduler_call_target_method_with_correct_auth(self, method): method.side_effect = self.target_check_context_method default_context = base.get_context(default=True) auth_context.set_ctx(default_context) default_project_id = default_context.project_id job = sched_base.SchedulerJob( run_after=DELAY, func_name=TARGET_METHOD_PATH, func_args={'expected_project_id': default_project_id}) self.scheduler.schedule(job) second_context = base.get_context(default=False) auth_context.set_ctx(second_context) second_project_id = second_context.project_id job = sched_base.SchedulerJob( run_after=DELAY, func_name=TARGET_METHOD_PATH, func_args={'expected_project_id': second_project_id}) self.scheduler.schedule(job) self.assertNotEqual(default_project_id, second_project_id) for _ in range(2): self.assertTrue(self.queue.get())
def test_get_endpoint_for_project_noauth(self): # service_catalog is not set by default. auth_context.set_ctx(base.get_context()) self.addCleanup(auth_context.set_ctx, None) self.assertRaises(exceptions.UnauthorizedException, keystone.get_endpoint_for_project, 'keystone')
def _start_workflow(self, triggers, event_params): """Start workflows defined in event triggers.""" for t in triggers: LOG.info('Start to process event trigger: %s', t['id']) workflow_params = t.get('workflow_params', {}) workflow_params.update({'event_params': event_params}) # Setup context before schedule triggers. ctx = security.create_context(t['trust_id'], t['project_id']) auth_ctx.set_ctx(ctx) try: self.engine_client.start_workflow( t['workflow_id'], t['workflow_input'], description="Workflow execution created by event " "trigger %s." % t['id'], **workflow_params ) except Exception as e: LOG.exception("Failed to process event trigger %s, " "error: %s", t['id'], str(e)) finally: auth_ctx.set_ctx(None)
def _delete(executions): for execution in executions: try: # Setup project_id for _secure_query delete execution. # TODO(tuan_luong): Manipulation with auth_ctx should be # out of db transaction scope. ctx = auth_ctx.MistralContext( user=None, tenant=execution.project_id, auth_token=None, is_admin=True ) auth_ctx.set_ctx(ctx) LOG.debug( 'Delete execution id : %s from date : %s ' 'according to expiration policy', execution.id, execution.updated_at ) db_api.delete_workflow_execution(execution.id) except Exception as e: msg = ("Failed to delete [execution_id=%s]\n %s" % (execution.id, traceback.format_exc(e))) LOG.warning(msg) finally: auth_ctx.set_ctx(None)
def _clean_db(self): contexts = [ get_context(default=False), get_context(default=True) ] for ctx in contexts: auth_context.set_ctx(ctx) with mock.patch('mistral.services.security.get_project_id', new=mock.MagicMock(return_value=ctx.project_id)): with db_api.transaction(): db_api.delete_event_triggers() db_api.delete_cron_triggers() db_api.delete_workflow_executions() db_api.delete_task_executions() db_api.delete_action_executions() db_api.delete_workbooks() db_api.delete_workflow_definitions() db_api.delete_environments() db_api.delete_resource_members() sqlite_lock.cleanup() if not cfg.CONF.database.connection.startswith('sqlite'): db_sa_base.get_engine().dispose()
def run_delayed_task(context): """Runs the delayed task. Performs all the steps required to setup a task to run which are not already done. This is mostly code copied over from convey_task_result. :param context Mistral authentication context inherited from a caller thread. """ auth_context.set_ctx(context) db_api.start_tx() try: execution_id = task['execution_id'] execution = db_api.execution_get(execution_id) # Change state from DELAYED to RUNNING. WORKFLOW_TRACE.info("Task '%s' [%s -> %s]" % (task['name'], task['state'], states.RUNNING)) executables = data_flow.prepare_tasks([task], outbound_context, workbook) db_api.commit_tx() finally: db_api.end_tx() if states.is_stopped_or_finished(execution['state']): return for task_id, action_name, action_params in executables: self._run_task(task_id, action_name, action_params)
def _start_workflow(self, triggers, payload, metadata): """Start workflows defined in event triggers.""" for t in triggers: LOG.info('Start to process event trigger: %s', t['id']) workflow_params = t.get('workflow_params', {}) workflow_params.update( {'event_payload': payload, 'event_metadata': metadata} ) # Setup context before schedule triggers. ctx = security.create_context(t['trust_id'], t['project_id']) auth_ctx.set_ctx(ctx) try: self.engine_client.start_workflow( t['workflow_id'], t['workflow_input'], description="Workflow execution created by event " "trigger %s." % t['id'], **workflow_params ) except Exception as e: LOG.exception("Failed to process event trigger %s, " "error: %s", t['id'], str(e)) finally: auth_ctx.set_ctx(None)
def _loop(): global _stopped # This is an administrative thread so we need to set an admin # security context. auth_ctx.set_ctx( auth_ctx.MistralContext( user=None, tenant=None, auth_token=None, is_admin=True ) ) while not _stopped: try: handle_expired_actions() except Exception: LOG.exception( 'Action execution checker iteration failed' ' due to unexpected exception.' ) # For some mysterious reason (probably eventlet related) # the exception is not cleared from the context automatically. # This results in subsequent log.warning calls to show invalid # info. if sys.version_info < (3,): sys.exc_clear() eventlet.sleep(CONF.action_heartbeat.check_interval)
def process_cron_triggers_v2(self, ctx): for t in triggers.get_next_cron_triggers(): LOG.debug("Processing cron trigger: %s" % t) # Setup admin context before schedule triggers. ctx = security.create_context(t.trust_id, t.project_id) auth_ctx.set_ctx(ctx) LOG.debug("Cron trigger security context: %s" % ctx) try: rpc.get_engine_client().start_workflow( t.workflow.name, t.workflow_input, description="workflow execution by cron trigger.", **t.workflow_params ) finally: if t.remaining_executions > 0: t.remaining_executions -= 1 if t.remaining_executions == 0: db_api_v2.delete_cron_trigger(t.name) else: # if remaining execution = None or > 0 next_time = triggers.get_next_execution_time( t.pattern, t.next_execution_time ) db_api_v2.update_cron_trigger( t.name, {'next_execution_time': next_time, 'remaining_executions': t.remaining_executions} ) auth_ctx.set_ctx(None)
def test_workflow_definition_public(self): # Create a workflow(scope=public) as under one project # then make sure it's visible for other projects. created0 = db_api.create_workflow_definition(WF_DEFINITIONS[0]) fetched = db_api.get_workflow_definitions() self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) # Assert that the project_id stored is actually the context's # project_id not the one given. self.assertEqual(created0.project_id, auth_context.ctx().project_id) self.assertNotEqual( WF_DEFINITIONS[0]['project_id'], auth_context.ctx().project_id ) # Create a new user. auth_context.set_ctx(test_base.get_context(default=False)) fetched = db_api.get_workflow_definitions() self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) self.assertEqual('public', created0.scope)
def process_cron_triggers_v2(self, ctx): for t in triggers.get_next_cron_triggers(): LOG.debug("Processing cron trigger: %s" % t) # Setup admin context before schedule triggers. ctx = security.create_context(t.trust_id, t.project_id) auth_ctx.set_ctx(ctx) LOG.debug("Cron trigger security context: %s" % ctx) try: rpc.get_engine_client().start_workflow( t.workflow.name, t.workflow_input, description="Workflow execution created by cron trigger.", **t.workflow_params ) finally: if t.remaining_executions is not None and t.remaining_executions > 0: t.remaining_executions -= 1 if t.remaining_executions == 0: db_api_v2.delete_cron_trigger(t.name) else: # if remaining execution = None or > 0 next_time = triggers.get_next_execution_time(t.pattern, t.next_execution_time) db_api_v2.update_cron_trigger( t.name, {"next_execution_time": next_time, "remaining_executions": t.remaining_executions} ) auth_ctx.set_ctx(None)
def _dtw_last_minute_scheduling(self, ctx): for d in dtw.get_unscheduled_delay_tolerant_workload(): LOG.debug("Processing delay tolerant workload: %s" % d) # Setup admin context before schedule triggers. ctx = security.create_context(d.trust_id, d.project_id) auth_ctx.set_ctx(ctx) LOG.debug("Delay tolerant workload security context: %s" % ctx) # calculate last time for running this - deadline less the # duration of the work # TODO(murp): check the status of the security context on this # TODO(murp): convert job_duration to timedelta start_time = d.deadline - datetime.timedelta(seconds=d.job_duration) triggers.create_cron_trigger(d.name, d.workflow_name, d.workflow_input, workflow_params=d.workflow_params, count=1, first_time=start_time, start_time=start_time, workflow_id=d.workflow_id)
def _dtw_schedule_immediately(self, ctx): for d in dtw.get_unscheduled_delay_tolerant_workload(): LOG.debug("Processing delay tolerant workload: %s" % d) # Setup admin context before schedule triggers. ctx = security.create_context(d.trust_id, d.project_id) auth_ctx.set_ctx(ctx) LOG.debug("Delay tolerant workload security context: %s" % ctx) try: # execute the workload db_api_v2.update_delay_tolerant_workload( d.name, {'executed': True} ) rpc.get_engine_client().start_workflow( d.workflow.name, d.workflow_input, description="DTW Workflow execution created.", **d.workflow_params ) except Exception: # Log and continue to next cron trigger. LOG.exception( "Failed to process delay tolerant workload %s" % str(d)) finally: auth_ctx.set_ctx(None)
def run_execution_expiration_policy(self, ctx): LOG.debug("Starting expiration policy task.") older_than = CONF.execution_expiration_policy.older_than exp_time = (datetime.datetime.now() - datetime.timedelta(minutes=older_than)) with db_api.transaction(): # TODO(gpaz): In the future should use generic method with # filters params and not specific method that filter by time. for execution in db_api.get_expired_executions(exp_time): try: # Setup project_id for _secure_query delete execution. ctx = auth_ctx.MistralContext( user_id=None, project_id=execution.project_id, auth_token=None, is_admin=True ) auth_ctx.set_ctx(ctx) LOG.debug( 'DELETE execution id : %s from date : %s ' 'according to expiration policy', execution.id, execution.updated_at ) db_api.delete_workflow_execution(execution.id) except Exception as e: msg = ("Failed to delete [execution_id=%s]\n %s" % (execution.id, traceback.format_exc(e))) LOG.warning(msg) finally: auth_ctx.set_ctx(None)
def test_workbook_public(self): # create a workbook(scope=public) as under one project # then make sure it's visible for other projects. created0 = db_api.workbook_create(WORKBOOKS[0]) fetched = db_api.workbooks_get_all() self.assertEqual(1, len(fetched)) self.assertDictEqual(created0, fetched[0]) # assert that the project_id stored is actually the context's # project_id not the one given. self.assertEqual(created0['project_id'], auth_context.ctx().project_id) self.assertNotEqual(WORKBOOKS[0]['project_id'], auth_context.ctx().project_id) # create a new user. ctx = auth_context.MistralContext(user_id='9-0-44-5', project_id='99-88-33', user_name='test-user', project_name='test-another', is_admin=False) auth_context.set_ctx(ctx) fetched = db_api.workbooks_get_all() self.assertEqual(1, len(fetched)) self.assertDictEqual(created0, fetched[0]) self.assertEqual('public', created0['scope'])
def _loop(): global _stopped # This is an administrative thread so we need to set an admin # security context. auth_ctx.set_ctx( auth_ctx.MistralContext(user=None, tenant=None, auth_token=None, is_admin=True)) while not _stopped: try: handle_expired_actions() except Exception: LOG.exception('Action execution checker iteration failed' ' due to unexpected exception.') # For some mysterious reason (probably eventlet related) # the exception is not cleared from the context automatically. # This results in subsequent log.warning calls to show invalid # info. if sys.version_info < (3, ): sys.exc_clear() eventlet.sleep(CONF.action_heartbeat.check_interval)
def process_cron_triggers_v2(self, ctx): for t in triggers.get_next_cron_triggers(): LOG.debug("Processing cron trigger: %s" % t) # Setup admin context before schedule triggers. ctx = security.create_context(t.trust_id, t.project_id) auth_ctx.set_ctx(ctx) LOG.debug("Cron trigger security context: %s" % ctx) try: # Try to advance the cron trigger next_execution_time and # remaining_executions if relevant. modified = advance_cron_trigger(t) # If cron trigger was not already modified by another engine. if modified: LOG.debug( "Starting workflow '%s' by cron trigger '%s'", t.workflow.name, t.name ) rpc.get_engine_client().start_workflow( t.workflow.name, t.workflow_input, description="Workflow execution created " "by cron trigger.", **t.workflow_params ) except Exception: # Log and continue to next cron trigger. LOG.exception("Failed to process cron trigger %s" % str(t)) finally: auth_ctx.set_ctx(None)
def _set_auth_ctx(ctx): if not isinstance(ctx, dict): return context = auth_ctx.MistralContext.from_dict(ctx) auth_ctx.set_ctx(context) return context
def report(reporter, ctx): LOG.debug("Running heartbeat reporter...") if not reporter._running_actions: return auth_ctx.set_ctx(ctx) reporter._engine_client.report_running_actions(reporter._running_actions)
def _set_auth_ctx(ctx): if not isinstance(ctx, dict): return context = auth_context.MistralContext(**ctx) auth_context.set_ctx(context) return context
def tearDown(self): """Restores the size limit config to default.""" super(ExpirationPolicyTest, self).tearDown() cfg.CONF.set_default('auth_enable', False, group='pecan') ctx.set_ctx(None) _set_expiration_policy_config(None, None)
def process_cron_triggers_v2(self, ctx): LOG.debug("Processing cron triggers...") for trigger in triggers.get_next_cron_triggers(): LOG.debug("Processing cron trigger: %s", trigger) try: # Setup admin context before schedule triggers. ctx = security.create_context( trigger.trust_id, trigger.project_id ) auth_ctx.set_ctx(ctx) LOG.debug("Cron trigger security context: %s", ctx) # Try to advance the cron trigger next_execution_time and # remaining_executions if relevant. modified = advance_cron_trigger(trigger) # If cron trigger was not already modified by another engine. if modified: LOG.debug( "Starting workflow '%s' by cron trigger '%s'", trigger.workflow.name, trigger.name ) description = { "description": ( "Workflow execution created by cron" " trigger '(%s)'." % trigger.id ), "triggered_by": { "type": "cron_trigger", "id": trigger.id, "name": trigger.name, } } rpc.get_engine_client().start_workflow( trigger.workflow.name, trigger.workflow.namespace, None, trigger.workflow_input, description=json.dumps(description), **trigger.workflow_params ) except Exception: # Log and continue to next cron trigger. LOG.exception( "Failed to process cron trigger %s", str(trigger) ) finally: auth_ctx.set_ctx(None)
def test_event_engine_public_trigger(self, mock_start): t = copy.deepcopy(EVENT_TRIGGER) # Create public trigger as an admin self.ctx = base.get_context(default=False, admin=True) auth_context.set_ctx(self.ctx) t['scope'] = 'public' t['project_id'] = self.ctx.tenant trigger = db_api.create_event_trigger(t) # Switch to the user. self.ctx = base.get_context(default=True) auth_context.set_ctx(self.ctx) e_engine = evt_eng.DefaultEventEngine() self.addCleanup(e_engine.handler_tg.stop) event = { 'event_type': EVENT_TYPE, 'payload': {}, 'publisher': 'fake_publisher', 'timestamp': '', 'context': { 'project_id': '%s' % self.ctx.project_id, 'user_id': 'fake_user' }, } # Moreover, assert that trigger.project_id != event.project_id self.assertNotEqual( trigger.project_id, event['context']['project_id'] ) with mock.patch.object(e_engine, 'engine_client') as client_mock: e_engine.event_queue.put(event) time.sleep(1) self.assertEqual(1, client_mock.start_workflow.call_count) args, kwargs = client_mock.start_workflow.call_args self.assertEqual( (EVENT_TRIGGER['workflow_id'], '', None, {}), args ) self.assertDictEqual( { 'service': 'fake_publisher', 'project_id': '%s' % self.ctx.project_id, 'user_id': 'fake_user', 'timestamp': '' }, kwargs['event_params'] )
def _within_new_thread(): old_auth_ctx = context.ctx() if context.has_ctx() else None context.set_ctx(auth_ctx) try: _process_queue(queue) finally: context.set_ctx(old_auth_ctx)
def _switch_context(project_id, is_admin): _ctx = ctx.MistralContext( user_id=None, project_id=project_id, auth_token=None, is_admin=is_admin ) ctx.set_ctx(_ctx)
def tearDown(self): """Restores the size limit config to default.""" super(ExpirationPolicyTest, self).tearDown() cfg.CONF.set_default('auth_enable', False, group='pecan') ctx.set_ctx(None) _set_expiration_policy_config(None, None, None, None)
def test_get_endpoint_for_project_noauth(self, client): client().tokens.get_token_data.return_value = {'token': None} # service_catalog is not set by default. auth_context.set_ctx(base.get_context()) self.addCleanup(auth_context.set_ctx, None) self.assertRaises(exceptions.UnauthorizedException, keystone.get_endpoint_for_project, 'keystone')
def setUp(self): super(DbTestCase, self).setUp() self.__heavy_init() self.ctx = get_context() auth_context.set_ctx(self.ctx) self.addCleanup(auth_context.set_ctx, None) self.addCleanup(self._clean_db)
def test_event_engine_public_trigger(self, mock_start): t = copy.deepcopy(EVENT_TRIGGER) # Create public trigger as an admin self.ctx = base.get_context(default=False, admin=True) auth_context.set_ctx(self.ctx) t['scope'] = 'public' t['project_id'] = self.ctx.tenant trigger = db_api.create_event_trigger(t) # Switch to the user. self.ctx = base.get_context(default=True) auth_context.set_ctx(self.ctx) e_engine = evt_eng.DefaultEventEngine() self.addCleanup(e_engine.handler_tg.stop) event = { 'event_type': EVENT_TYPE, 'payload': {}, 'publisher': 'fake_publisher', 'timestamp': '', 'context': { 'project_id': '%s' % self.ctx.project_id, 'user_id': 'fake_user' }, } # Moreover, assert that trigger.project_id != event.project_id self.assertNotEqual( trigger.project_id, event['context']['project_id'] ) with mock.patch.object(e_engine, 'engine_client') as client_mock: e_engine.event_queue.put(event) time.sleep(1) self.assertEqual(1, client_mock.start_workflow.call_count) args, kwargs = client_mock.start_workflow.call_args self.assertEqual((EVENT_TRIGGER['workflow_id'], '', {}), args) self.assertDictEqual( { 'service': 'fake_publisher', 'project_id': '%s' % self.ctx.project_id, 'user_id': 'fake_user', 'timestamp': '' }, kwargs['event_params'] )
def test_delete_other_tenant_action_execution(self): created = db_api.create_action_execution(ACTION_EXECS[0]) # Create a new user. auth_context.set_ctx(test_base.get_context(default=False)) self.assertRaises( exc.NotFoundException, db_api.delete_action_execution, created.id )
def run_delayed_calls(self, ctx=None): time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1) # Wrap delayed calls processing in transaction to # guarantee that calls will be processed just once. # Do delete query to DB first to force hanging up all # parallel transactions. # It should work on isolation level 'READ-COMMITTED', # 'REPEATABLE-READ' and above. # # 'REPEATABLE-READ' is by default in MySQL and # 'READ-COMMITTED is by default in PostgreSQL. with db_api.transaction(): delayed_calls = db_api.get_delayed_calls_to_start(time_filter) for call in delayed_calls: # Delete this delayed call from DB before the making call in # order to prevent calling from parallel transaction. db_api.delete_delayed_call(call.id) LOG.debug('Processing next delayed call: %s', call) context.set_ctx(context.MistralContext(call.auth_context)) if call.factory_method_path: factory = importutils.import_class( call.factory_method_path ) target_method = getattr(factory(), call.target_method_name) else: target_method = importutils.import_class( call.target_method_name ) method_args = copy.copy(call.method_arguments) if call.serializers: # Deserialize arguments. for arg_name, ser_path in call.serializers.items(): serializer = importutils.import_class(ser_path)() deserialized = serializer.deserialize( method_args[arg_name] ) method_args[arg_name] = deserialized try: # Call the method. target_method(**method_args) except Exception as e: LOG.debug( "Delayed call failed [call=%s, exception=%s]", call, e )
def test_get_endpoint_for_project_noauth(self, client): client().tokens.get_token_data.return_value = {'token': None} # service_catalog is not set by default. auth_context.set_ctx(base.get_context()) self.addCleanup(auth_context.set_ctx, None) self.assertRaises( exceptions.UnauthorizedException, keystone.get_endpoint_for_project, 'keystone' )
def _within_new_thread(): old_auth_ctx = context.ctx() if context.has_ctx() else None context.set_ctx(auth_ctx) try: if tx_queue: _process_tx_queue(tx_queue) if non_tx_queue: _process_non_tx_queue(non_tx_queue) finally: context.set_ctx(old_auth_ctx)
def scheduler_triggers(self, ctx): LOG.debug('Processing next Scheduler triggers.') for trigger in sched.get_next_triggers(): wb = db_api.workbook_get(trigger['workbook_name']) context.set_ctx(trusts.create_context(wb)) try: task = parser.get_workbook( wb['definition']).get_trigger_task_name(trigger['name']) self.engine.start_workflow_execution(wb['name'], task) finally: sched.set_next_execution_time(trigger) context.set_ctx(None)
def test_workbooks_in_two_projects(self): created = db_api.create_workbook(WORKBOOKS[1]) fetched = db_api.get_workbooks() self.assertEqual(1, len(fetched)) self.assertEqual(created, fetched[0]) # Create a new user. auth_context.set_ctx(test_base.get_context(default=False)) created = db_api.create_workbook(WORKBOOKS[1]) fetched = db_api.get_workbooks() self.assertEqual(1, len(fetched)) self.assertEqual(created, fetched[0])
def _invoke_job(auth_ctx, func, args): ctx_serializer = context.RpcContextSerializer() try: # Set the correct context for the function. ctx_serializer.deserialize_context(auth_ctx) # Invoke the function. func(**args) except Exception as e: LOG.exception("Scheduled job failed, method: %s, exception: %s", func, e) finally: # Remove context. context.set_ctx(None)
def setUp(self): super(DbTestCase, self).setUp() _db_fd, self.db_path = tempfile.mkstemp() cfg.CONF.set_default('connection', 'sqlite:///' + self.db_path, group='database') db_api.setup_db() self.addCleanup(db_api.drop_db) self.ctx = auth_context.MistralContext(user_id='1-2-3-4', project_id='5-6-7-8', user_name='test-user', project_name='test-project', is_admin=False) auth_context.set_ctx(self.ctx) self.addCleanup(auth_context.set_ctx, None)
def _run_correct_locking(self, wf_ex): # Set context info for the thread. auth_context.set_ctx(test_base.get_context()) self._random_sleep() with db_api.transaction(): # Lock workflow execution and get the most up-to-date object. wf_ex = db_api.acquire_lock(db_models.WorkflowExecution, wf_ex.id) # Refresh the object. db_api.get_workflow_execution(wf_ex.id) wf_ex.name = str(int(wf_ex.name) + 1) return wf_ex.name