def create_context(trust_id, project_id): """Creates Mistral security context. :param trust_id: Trust Id. :param project_id: Project Id. :return: Mistral security context. """ if CONF.pecan.auth_enable: client = keystone.client_for_trusts(trust_id) return auth_ctx.MistralContext( user_id=client.user_id, project_id=project_id, auth_token=client.auth_token, is_trust_scoped=True, trust_id=trust_id, ) return auth_ctx.MistralContext( user_id=None, project_id=None, auth_token=None, is_admin=True )
def create_context(trust_id, project_id): """Creates Mistral security context. :param trust_id: Trust Id. :param project_id: Project Id. :return: Mistral security context. """ if CONF.pecan.auth_enable: client = keystone.client_for_trusts(trust_id) if client.session: # Method get_token is deprecated, using get_auth_headers. token = client.session.get_auth_headers().get('X-Auth-Token') user_id = client.session.get_user_id() else: token = client.auth_token user_id = client.user_id return auth_ctx.MistralContext( user=user_id, tenant=project_id, auth_token=token, is_trust_scoped=True, trust_id=trust_id, ) return auth_ctx.MistralContext(user=None, tenant=None, auth_token=None, is_admin=True)
def get_context(default=True, admin=False): if default: return auth_context.MistralContext( user_id='1-2-3-4', project_id=security.DEFAULT_PROJECT_ID, user_name='test-user', project_name='test-project', is_admin=admin) else: return auth_context.MistralContext(user_id='9-0-44-5', project_id='99-88-33', user_name='test-user', project_name='test-another', is_admin=admin)
def create_context(workbook): if 'trust_id' not in workbook: return if CONF.pecan.auth_enable: client = keystone.client_for_trusts(workbook['trust_id']) return context.MistralContext(user_id=client.user_id, project_id=workbook['project_id'], auth_token=client.auth_token) else: return context.MistralContext(user_id=None, project_id=None, auth_token=None)
def test__on_message_is_async(self, mock_get_context, get_rpc_method, publish_message): result = 'result' request = { 'async': True, 'rpc_ctx': {}, 'rpc_method': 'found_method', 'arguments': self.server._serialize_message({ 'a': 1, 'b': 2 }) } message = mock.MagicMock() message.properties = { 'reply_to': None, 'correlation_id': None } message.delivery_info.get.return_value = False rpc_method = mock.MagicMock(return_value=result) get_rpc_method.return_value = rpc_method ctx = context.MistralContext() mock_get_context.return_value = ctx self.server._on_message(request, message) rpc_method.assert_called_once_with( rpc_ctx=ctx, a=1, b=2 ) self.assertEqual(publish_message.call_count, 0)
def _loop(): global _stopped # This is an administrative thread so we need to set an admin # security context. auth_ctx.set_ctx( auth_ctx.MistralContext(user=None, tenant=None, auth_token=None, is_admin=True)) while not _stopped: try: handle_expired_actions() except Exception: LOG.exception('Action execution checker iteration failed' ' due to unexpected exception.') # For some mysterious reason (probably eventlet related) # the exception is not cleared from the context automatically. # This results in subsequent log.warning calls to show invalid # info. if sys.version_info < (3, ): sys.exc_clear() eventlet.sleep(CONF.action_heartbeat.check_interval)
def _delete(executions): for execution in executions: try: # Setup project_id for _secure_query delete execution. # TODO(tuan_luong): Manipulation with auth_ctx should be # out of db transaction scope. ctx = auth_ctx.MistralContext( user_id=None, project_id=execution.project_id, auth_token=None, is_admin=True ) auth_ctx.set_ctx(ctx) LOG.debug( 'DELETE execution id : %s from date : %s ' 'according to expiration policy', execution.id, execution.updated_at ) db_api.delete_workflow_execution(execution.id) except Exception as e: msg = ("Failed to delete [execution_id=%s]\n %s" % (execution.id, traceback.format_exc(e))) LOG.warning(msg) finally: auth_ctx.set_ctx(None)
def run_delayed_calls(self, ctx=None): time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1) # Wrap delayed calls processing in transaction to # guarantee that calls will be processed just once. # Do delete query to DB first to force hanging up all # parallel transactions. # It should work on isolation level 'READ-COMMITTED', # 'REPEATABLE-READ' and above. # # 'REPEATABLE-READ' is by default in MySQL and # 'READ-COMMITTED is by default in PostgreSQL. delayed_calls = [] with db_api.transaction(): for call in db_api.get_delayed_calls_to_start(time_filter): # Delete this delayed call from DB before the making call in # order to prevent calling from parallel transaction. db_api.delete_delayed_call(call.id) LOG.debug('Processing next delayed call: %s', call) context.set_ctx(context.MistralContext(call.auth_context)) if call.factory_method_path: factory = importutils.import_class( call.factory_method_path) target_method = getattr(factory(), call.target_method_name) else: target_method = importutils.import_class( call.target_method_name) method_args = copy.copy(call.method_arguments) if call.serializers: # Deserialize arguments. for arg_name, ser_path in call.serializers.items(): serializer = importutils.import_class(ser_path)() deserialized = serializer.deserialize( method_args[arg_name]) method_args[arg_name] = deserialized delayed_calls.append((target_method, method_args)) # TODO(m4dcoder): Troubleshoot deadlocks with PostgreSQL and MySQL. # The queries in the target method such as # mistral.engine.task_handler.run_action can deadlock # with delete_delayed_call. Please keep the scope of the # transaction short. for (target_method, method_args) in delayed_calls: with db_api.transaction(): try: # Call the method. target_method(**method_args) except Exception as e: LOG.debug("Delayed call failed [call=%s, exception=%s]", call, e)
def setup(action_execution_reporter): interval = CONF.action_heartbeat.check_interval max_missed = CONF.action_heartbeat.max_missed_heartbeats enabled = interval and max_missed if not enabled: LOG.info("Action heartbeat reporting disabled.") return None tg = threadgroup.ThreadGroup() ctx = auth_ctx.MistralContext( user=None, tenant=None, auth_token=None, is_admin=True ) tg.add_dynamic_timer( action_execution_reporter.run_periodic_tasks, initial_delay=None, periodic_interval_max=1, context=ctx ) return tg
def _switch_context(project_id, is_admin): _ctx = ctx.MistralContext(user_id=None, project_id=project_id, auth_token=None, is_admin=is_admin) ctx.set_ctx(_ctx)
def run_execution_expiration_policy(self, ctx): LOG.debug("Starting expiration policy task.") older_than = CONF.execution_expiration_policy.older_than exp_time = (datetime.datetime.now() - datetime.timedelta(minutes=older_than)) with db_api.transaction(): # TODO(gpaz): In the future should use generic method with # filters params and not specific method that filter by time. for execution in db_api.get_expired_executions(exp_time): try: # Setup project_id for _secure_query delete execution. ctx = auth_ctx.MistralContext( user_id=None, project_id=execution.project_id, auth_token=None, is_admin=True ) auth_ctx.set_ctx(ctx) LOG.debug( 'DELETE execution id : %s from date : %s ' 'according to expiration policy', execution.id, execution.updated_at ) db_api.delete_workflow_execution(execution.id) except Exception as e: msg = ("Failed to delete [execution_id=%s]\n %s" % (execution.id, traceback.format_exc(e))) LOG.warning(msg) finally: auth_ctx.set_ctx(None)
def _set_auth_ctx(ctx): if not isinstance(ctx, dict): return context = auth_context.MistralContext(**ctx) auth_context.set_ctx(context) return context
def setup(): tg = threadgroup.ThreadGroup() pt = ExecutionExpirationPolicy(CONF) ctx = auth_ctx.MistralContext(user_id=None, project_id=None, auth_token=None, is_admin=True) tg.add_dynamic_timer(pt.run_periodic_tasks, initial_delay=None, periodic_interval_max=1, context=ctx)
def setup(): tg = threadgroup.ThreadGroup() pt = ExecutionExpirationPolicy(CONF) _check_ignored_states_config() ctx = auth_ctx.MistralContext(user=None, tenant=None, auth_token=None, is_admin=True) tg.add_dynamic_timer(pt.run_periodic_tasks, initial_delay=None, periodic_interval_max=1, context=ctx) return tg
def setup(): tg = threadgroup.ThreadGroup() pt = MistralPeriodicTasks(CONF) ctx = auth_ctx.MistralContext(user=None, tenant=None, auth_token=None, is_admin=True) tg.add_dynamic_timer(pt.run_periodic_tasks, initial_delay=None, periodic_interval_max=1, context=ctx) _periodic_tasks[pt] = tg return tg
def test_nova_action_config_endpoint(self, mock_novaclient, mock_nova_endpoint, mock_ks_endpoint_v2): # this is the default, but be explicit config.CONF.set_default('os_actions_endpoint_type', 'publicURL') test_ctx = ctx.MistralContext(user_id=None, project_id='1234', project_name='admin', auth_token=None, is_admin=False) ctx.set_ctx(test_ctx) # attributes mirror keystone Endpoint object exactly # (with endpoint type publicURL) keystone_attrs = { 'url': 'http://192.0.2.1:5000/v2.0', 'enabled': True, 'id': 'b1ddf133fa6e491c8ee13701be97db2d', 'interface': 'public', 'links': { u'self': u'http://192.0.2.1:5000/v3/endpoints/' 'b1ddf133fa6e491c8ee13701be97db2d' }, 'region': 'regionOne', 'region_id': 'regionOne', 'service_id': '8f4afc75cd584d5cb381f68a9db80147', } keystone_endpoint = FakeEndpoint(**keystone_attrs) nova_attrs = { 'url': 'http://192.0.2.1:8774/v2/%(tenant_id)s', 'enabled': True, 'id': '5bb51b33c9984513b52b6a3e85154305', 'interface': 'public', 'links': { u'self': u'http://192.0.2.1:5000/v3/endpoints/' '5bb51b33c9984513b52b6a3e85154305' }, 'region': 'regionOne', 'region_id': 'regionOne', 'service_id': '1af46173f37848edb65bd4962ed2d09d', } nova_endpoint = FakeEndpoint(**nova_attrs) mock_ks_endpoint_v2.return_value(keystone_endpoint) mock_nova_endpoint.return_value(nova_endpoint) method_name = "servers.get" action_class = actions.NovaAction action_class.client_method_name = method_name params = {'server': '1234-abcd'} action = action_class(**params) action.run() mock_novaclient.Client.assert_called_once_with( 2, username=None, api_key=None, endpoint_type='publicURL', service_type='compute', auth_token=test_ctx.auth_token, tenant_id=test_ctx.project_id, region_name=mock_ks_endpoint_v2().region, auth_url=mock_ks_endpoint_v2().url) self.assertTrue(mock_novaclient.Client().servers.get.called) mock_novaclient.Client().servers.get.assert_called_once_with( server="1234-abcd")
def test_nova_action_config_endpoint(self, mock_novaclient, mock_nova_endpoint, mock_ks_endpoint_v2): # this is the default, but be explicit config.CONF.set_default('os_actions_endpoint_type', 'public') test_ctx = ctx.MistralContext( user_id=None, project_id='1234', project_name='admin', auth_token=None, is_admin=False, # set year to 3016 in order for token to always be valid expires_at='3016-07-13T18:34:22.000000Z') ctx.set_ctx(test_ctx) # attributes mirror keystone Endpoint object exactly # (with endpoint type public) keystone_attrs = { 'url': 'http://192.0.2.1:5000/v2.0', 'enabled': True, 'id': 'b1ddf133fa6e491c8ee13701be97db2d', 'interface': 'public', 'links': { u'self': u'http://192.0.2.1:5000/v3/endpoints/' 'b1ddf133fa6e491c8ee13701be97db2d' }, 'region': 'regionOne', 'region_id': 'regionOne', 'service_id': '8f4afc75cd584d5cb381f68a9db80147', } keystone_endpoint = FakeEndpoint(**keystone_attrs) nova_attrs = { 'url': 'http://192.0.2.1:8774/v2/%(tenant_id)s', 'enabled': True, 'id': '5bb51b33c9984513b52b6a3e85154305', 'interface': 'public', 'links': { u'self': u'http://192.0.2.1:5000/v3/endpoints/' '5bb51b33c9984513b52b6a3e85154305' }, 'region': 'regionOne', 'region_id': 'regionOne', 'service_id': '1af46173f37848edb65bd4962ed2d09d', } nova_endpoint = FakeEndpoint(**nova_attrs) mock_ks_endpoint_v2.return_value(keystone_endpoint) mock_nova_endpoint.return_value(nova_endpoint) method_name = "servers.get" action_class = actions.NovaAction action_class.client_method_name = method_name params = {'server': '1234-abcd'} action = action_class(**params) action.run() mock_novaclient.Client.assert_called_once_with( 2, username=None, api_key=None, endpoint_type='public', service_type='compute', auth_token=test_ctx.auth_token, tenant_id=test_ctx.project_id, region_name=mock_ks_endpoint_v2().region, auth_url=mock_ks_endpoint_v2().url) self.assertTrue(mock_novaclient.Client().servers.get.called) mock_novaclient.Client().servers.get.assert_called_once_with( server="1234-abcd") # Repeat test in order to validate cache. mock_novaclient.reset_mock() action.run() # TODO(d0ugal): Uncomment the following line when caching is fixed. # mock_novaclient.Client.assert_not_called() mock_novaclient.Client().servers.get.assert_called_with( server="1234-abcd") # Repeat again with different context for cache testing. test_ctx.project_name = 'service' test_ctx.project_id = '1235' ctx.set_ctx(test_ctx) mock_novaclient.reset_mock() action.run() mock_novaclient.Client.assert_called_once_with( 2, username=None, api_key=None, endpoint_type='public', service_type='compute', auth_token=test_ctx.auth_token, tenant_id=test_ctx.project_id, region_name=mock_ks_endpoint_v2().region, auth_url=mock_ks_endpoint_v2().url) self.assertTrue(mock_novaclient.Client().servers.get.called) mock_novaclient.Client().servers.get.assert_called_once_with( server="1234-abcd")
def run_delayed_calls(self, ctx=None): time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1) # Wrap delayed calls processing in transaction to # guarantee that calls will be processed just once. # Do delete query to DB first to force hanging up all # parallel transactions. # It should work on isolation level 'READ-COMMITTED', # 'REPEATABLE-READ' and above. # # 'REPEATABLE-READ' is by default in MySQL and # 'READ-COMMITTED is by default in PostgreSQL. delayed_calls = [] with db_api.transaction(): candidate_calls = db_api.get_delayed_calls_to_start(time_filter) calls_to_make = [] for call in candidate_calls: # Mark this delayed call has been processed in order to # prevent calling from parallel transaction. result, number_of_updated = db_api.update_delayed_call( id=call.id, values={'processing': True}, query_filter={"processing": False}) # If number_of_updated != 1 other scheduler already # updated. if number_of_updated == 1: calls_to_make.append(result) for call in calls_to_make: LOG.debug('Processing next delayed call: %s', call) target_auth_context = copy.deepcopy(call.auth_context) if call.factory_method_path: factory = importutils.import_class(call.factory_method_path) target_method = getattr(factory(), call.target_method_name) else: target_method = importutils.import_class( call.target_method_name) method_args = copy.deepcopy(call.method_arguments) if call.serializers: # Deserialize arguments. for arg_name, ser_path in call.serializers.items(): serializer = importutils.import_class(ser_path)() deserialized = serializer.deserialize( method_args[arg_name]) method_args[arg_name] = deserialized delayed_calls.append( (target_auth_context, target_method, method_args)) for (target_auth_context, target_method, method_args) in delayed_calls: try: # Set the correct context for the method. context.set_ctx(context.MistralContext(target_auth_context)) # Call the method. target_method(**method_args) except Exception as e: LOG.exception("Delayed call failed, method: %s, exception: %s", target_method, e) finally: # Remove context. context.set_ctx(None) with db_api.transaction(): for call in calls_to_make: try: # Delete calls that were processed. db_api.delete_delayed_call(call.id) except Exception as e: LOG.error( "failed to delete call [call=%s, " "exception=%s]", call, e)